Insecure Direct Object Reference attack via predictable secret ID in Juju
Description
In Juju from version 3.0.0 through 3.6.18, when a secret owner grants permissions to a secret to a grantee, the secret owner relies exclusively on a predictable XID of the secret to verify ownership. This allows a malicious grantee which can request secrets to predict past secrets granted by the same secret owner to different grantees, allowing them to use the resources granted by those past secrets. Successful exploitation relies on a very specific configuration, specific data semantic, and the administrator having the need to deploy at least two different applications, one of them controlled by the attacker.
Affected packages
Versions sourced from the GitHub Security Advisory.
| Package | Affected versions | Patched versions |
|---|---|---|
github.com/juju/jujuGo | >= 0.0.0-20221021155847-35c560704ee2, < 0.0.0-20260319091847-d06919eb03ec | 0.0.0-20260319091847-d06919eb03ec |
Affected products
1- Range: 3.0.0
Patches
1d06919eb03ecMerge pull request #22040 from wallyworld/merge-juju-private
238 files changed · +10206 −3093
api/client/resources/client.go+15 −1 modified@@ -6,11 +6,13 @@ package resources import ( "context" "io" + "slices" "strings" charmresource "github.com/juju/charm/v12/resource" "github.com/juju/errors" "github.com/juju/names/v5" + "gopkg.in/errgo.v1" "github.com/juju/juju/api/base" apicharm "github.com/juju/juju/api/common/charm" @@ -100,6 +102,18 @@ func newListResourcesArgs(applications []string) (params.ListResourcesArgs, erro return args, nil } +func translateUploadError(err error) error { + permissionCodes := []string{params.CodeForbidden, params.CodeUnauthorized} + var uploadErr *errgo.Err + if errors.As(err, &uploadErr) { + if slices.Contains(permissionCodes, params.ErrCode(uploadErr.Cause())) || + slices.Contains(permissionCodes, params.ErrCode(uploadErr.Underlying())) { + return apiservererrors.ErrPerm + } + } + return err +} + // Upload sends the provided resource blob up to Juju. func (c Client) Upload(application, name, filename, pendingID string, reader io.ReadSeeker) error { uReq, err := NewUploadRequest(application, name, filename, reader) @@ -117,7 +131,7 @@ func (c Client) Upload(application, name, filename, pendingID string, reader io. var response params.UploadResult // ignored ctx := context.TODO() if err := c.httpClient.Do(ctx, req, &response); err != nil { - return errors.Trace(err) + return errors.Trace(translateUploadError(err)) } return nil
api/client/resources/client_upload_test.go+31 −6 modified@@ -19,6 +19,7 @@ import ( "github.com/kr/pretty" "go.uber.org/mock/gomock" gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" "github.com/juju/juju/api/base/mocks" "github.com/juju/juju/api/client/resources" @@ -113,7 +114,7 @@ func (s *UploadSuite) TestUploadBadApplication(c *gc.C) { c.Check(err, gc.ErrorMatches, `.*invalid application.*`) } -func (s *UploadSuite) TestUploadFailed(c *gc.C) { +func (s *UploadSuite) assertUploadFailed(c *gc.C, uploadErr error, expectedMsg string) { defer s.setup(c).Finish() data := "<data>" @@ -128,9 +129,21 @@ func (s *UploadSuite) TestUploadFailed(c *gc.C) { req.ContentLength = int64(len(data)) ctx := context.TODO() - s.mockHTTPClient.EXPECT().Do(ctx, reqMatcher{c, req}, gomock.Any()).Return(errors.New("boom")) + s.mockHTTPClient.EXPECT().Do(ctx, reqMatcher{c, req}, gomock.Any()).Return(uploadErr) err = s.client.Upload("a-application", "spam", "foo.zip", "", strings.NewReader(data)) - c.Assert(err, gc.ErrorMatches, "boom") + c.Assert(err, gc.ErrorMatches, expectedMsg) +} + +func (s *UploadSuite) TestUploadFailed(c *gc.C) { + s.assertUploadFailed(c, errors.New("upload failed"), "upload failed") +} + +func (s *UploadSuite) TestUploadAuthError(c *gc.C) { + authErr := errgo.Mask(params.Error{ + Code: params.CodeUnauthorized, + Message: "user unauthorized", + }) + s.assertUploadFailed(c, authErr, "permission denied") } func (s *UploadSuite) TestAddPendingResources(c *gc.C) { @@ -243,7 +256,7 @@ func (s *UploadSuite) TestUploadPendingResourceBadApplication(c *gc.C) { c.Assert(err, gc.ErrorMatches, `.*invalid application.*`) } -func (s *UploadSuite) TestUploadPendingResourceFailed(c *gc.C) { +func (s *UploadSuite) assertUploadPendingResourceFailed(c *gc.C, uploadErr error, expectedMsg string) { defer s.setup(c).Finish() res, apiResult := newResourceResult(c, "spam") @@ -271,10 +284,22 @@ func (s *UploadSuite) TestUploadPendingResourceFailed(c *gc.C) { ctx := context.TODO() s.mockFacadeCaller.EXPECT().FacadeCall("AddPendingResources", &args, gomock.Any()).SetArg(2, results).Return(nil) - s.mockHTTPClient.EXPECT().Do(ctx, reqMatcher{c, req}, gomock.Any()).Return(errors.New("boom")) + s.mockHTTPClient.EXPECT().Do(ctx, reqMatcher{c, req}, gomock.Any()).Return(uploadErr) _, err = s.client.UploadPendingResource("a-application", res[0].Resource, "file.zip", strings.NewReader(data)) - c.Assert(err, gc.ErrorMatches, "boom") + c.Assert(err, gc.ErrorMatches, expectedMsg) +} + +func (s *UploadSuite) TestUploadPendingResourceFailed(c *gc.C) { + s.assertUploadPendingResourceFailed(c, errors.New("upload failed"), "upload failed") +} + +func (s *UploadSuite) TestUploadPendingResourceAuthError(c *gc.C) { + authErr := errgo.Mask(params.Error{ + Code: params.CodeUnauthorized, + Message: "user unauthorized", + }) + s.assertUploadPendingResourceFailed(c, authErr, "permission denied") } func newResourceResult(c *gc.C, names ...string) ([]coreresources.Resource, params.ResourcesResult) {
api/controller/secretsrevoker/client.go+60 −0 added@@ -0,0 +1,60 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker + +import ( + "time" + + "github.com/juju/errors" + + "github.com/juju/juju/api/base" + apiwatcher "github.com/juju/juju/api/watcher" + apiservererrors "github.com/juju/juju/apiserver/errors" + "github.com/juju/juju/core/watcher" + "github.com/juju/juju/rpc/params" +) + +// Client is the api client for the SecretRevoker facade. +type Client struct { + facade base.FacadeCaller +} + +// NewClient creates a secret revoker API client. +func NewClient(caller base.APICaller) *Client { + return &Client{ + facade: base.NewFacadeCaller(caller, "SecretsRevoker"), + } +} + +// WatchIssuedTokenExpiry calls the SecretsRevoker facade to create a secret +// backends issued token expiry watcher. The watcher fires when a secret backend +// issued token is created, sending the RFC3339 encoded timestamp when it will +// expire. +func (c *Client) WatchIssuedTokenExpiry() (watcher.StringsWatcher, error) { + var result params.StringsWatchResult + err := c.facade.FacadeCall("WatchIssuedTokenExpiry", nil, &result) + if err != nil { + return nil, err + } + if result.Error != nil { + return nil, apiservererrors.RestoreError(result.Error) + } + w := apiwatcher.NewStringsWatcher(c.facade.RawAPICaller(), result) + return w, nil +} + +// RevokeIssuedTokens calls the SecretsRevoker facade to revoke all issued +// tokens up until the specified time and returns the time for the next +// revocation. +func (c *Client) RevokeIssuedTokens(until time.Time) (time.Time, error) { + var result params.RevokeIssuedTokensResult + err := c.facade.FacadeCall("RevokeIssuedTokens", until, &result) + if err != nil { + return time.Time{}, errors.Trace(err) + } + if result.Error != nil { + return time.Time{}, result.Error + } + return result.Next, nil +}
api/controller/secretsrevoker/client_test.go+88 −0 added@@ -0,0 +1,88 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker_test + +import ( + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/api/controller/secretsrevoker" + "github.com/juju/juju/rpc/params" + coretesting "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&SecretRevokerSuite{}) + +type SecretRevokerSuite struct { + coretesting.BaseSuite +} + +func (s *SecretRevokerSuite) TestNewClient(c *gc.C) { + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result any) error { + return nil + }) + client := secretsrevoker.NewClient(apiCaller) + c.Assert(client, gc.NotNil) +} + +func (s *SecretRevokerSuite) TestWatchIssuedTokenExpiry(c *gc.C) { + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result any) error { + c.Check(objType, gc.Equals, "SecretsRevoker") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "WatchIssuedTokenExpiry") + c.Check(arg, gc.IsNil) + c.Assert(result, gc.FitsTypeOf, ¶ms.StringsWatchResult{}) + *(result.(*params.StringsWatchResult)) = params.StringsWatchResult{ + Error: ¶ms.Error{Message: "FAIL"}, + } + return nil + }) + client := secretsrevoker.NewClient(apiCaller) + _, err := client.WatchIssuedTokenExpiry() + c.Assert(err, gc.ErrorMatches, "FAIL") +} + +func (s *SecretRevokerSuite) TestRevokeIssuedTokens(c *gc.C) { + now := time.Now() + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result any) error { + c.Check(objType, gc.Equals, "SecretsRevoker") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "RevokeIssuedTokens") + c.Check(arg, jc.DeepEquals, now) + c.Assert(result, gc.FitsTypeOf, ¶ms.RevokeIssuedTokensResult{}) + *(result.(*params.RevokeIssuedTokensResult)) = params.RevokeIssuedTokensResult{ + Error: ¶ms.Error{Message: "boom"}, + } + return nil + }) + client := secretsrevoker.NewClient(apiCaller) + next, err := client.RevokeIssuedTokens(now) + c.Assert(err, gc.ErrorMatches, "boom") + c.Check(next, gc.Equals, time.Time{}) +} + +func (s *SecretRevokerSuite) TestRevokeIssuedTokensWithResult(c *gc.C) { + now := time.Now() + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result any) error { + c.Check(objType, gc.Equals, "SecretsRevoker") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "RevokeIssuedTokens") + c.Check(arg, jc.DeepEquals, now) + c.Assert(result, gc.FitsTypeOf, ¶ms.RevokeIssuedTokensResult{}) + *(result.(*params.RevokeIssuedTokensResult)) = params.RevokeIssuedTokensResult{ + Next: now, + } + return nil + }) + client := secretsrevoker.NewClient(apiCaller) + next, err := client.RevokeIssuedTokens(now) + c.Assert(err, gc.IsNil) + c.Check(next, gc.Equals, now) +}
api/controller/secretsrevoker/doc.go+5 −0 added@@ -0,0 +1,5 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package secretsrevoker provides the api client for the secretsrevoker facade. +package secretsrevoker
api/controller/secretsrevoker/package_test.go+14 −0 added@@ -0,0 +1,14 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +}
api/facadeversions.go+1 −0 modified@@ -120,6 +120,7 @@ var facadeVersions = facades.FacadeVersions{ "Secrets": {1, 2}, "SecretsManager": {1, 2, 3, 4}, "SecretsDrain": {1}, + "SecretsRevoker": {1}, "UserSecretsDrain": {1}, "UserSecretsManager": {1}, "Singular": {2},
api/httpclient_test.go+7 −0 modified@@ -69,6 +69,13 @@ var httpClientTests = []struct { handler: http.NotFound, expectError: `(?m)Get http://.*/: 404 page not found.*`, expectErrorIs: errors.NotFound, +}, { + about: "non-JSON auth error response", + handler: func(w http.ResponseWriter, req *http.Request) { + http.Error(w, "some unauth error", http.StatusUnauthorized) + }, + expectError: `(?m)Get http://.*/: some unauth error.*`, + expectErrorIs: errors.Unauthorized, }, { about: "bad error response", handler: func(w http.ResponseWriter, req *http.Request) {
apiserver/allfacades.go+5 −3 modified@@ -100,6 +100,7 @@ import ( "github.com/juju/juju/apiserver/facades/controller/migrationtarget" "github.com/juju/juju/apiserver/facades/controller/remoterelations" "github.com/juju/juju/apiserver/facades/controller/secretbackendmanager" + "github.com/juju/juju/apiserver/facades/controller/secretsrevoker" "github.com/juju/juju/apiserver/facades/controller/singular" "github.com/juju/juju/apiserver/facades/controller/sshserver" "github.com/juju/juju/apiserver/facades/controller/sshtunneler" @@ -267,11 +268,12 @@ func AllFacades() *facade.Registry { resourceshookcontext.Register(registry) retrystrategy.Register(registry) singular.Register(registry) - secrets.Register(registry) - secretbackends.Register(registry) secretbackendmanager.Register(registry) - secretsmanager.Register(registry) + secretbackends.Register(registry) + secrets.Register(registry) secretsdrain.Register(registry) + secretsmanager.Register(registry) + secretsrevoker.Register(registry) usersecrets.Register(registry) usersecretsdrain.Register(registry) sshclient.Register(registry)
apiserver/apiserver.go+37 −8 modified@@ -723,7 +723,8 @@ func (srv *Server) endpoints() ([]apihttp.Endpoint, error) { controllerTag: systemState.ControllerTag(), } var debuglogAuth httpcontext.CompositeAuthorizer = []authentication.Authorizer{ - tagKindAuthorizer{names.MachineTagKind, names.ControllerAgentTagKind}, + tagKindAuthorizer{names.ControllerAgentTagKind}, + controllerAuthorizer{}, controllerAdminAuthorizer, modelPermissionAuthorizer{ perm: permission.ReadAccess, @@ -796,14 +797,18 @@ func (srv *Server) endpoints() ([]apihttp.Endpoint, error) { }, } modelToolsDownloadHandler := srv.monitoredHandler(newToolsDownloadHandler(httpCtxt), "tools") - resourcesHandler := srv.monitoredHandler(&ResourcesHandler{ - StateAuthFunc: func(req *http.Request, tagKinds ...string) (ResourcesBackend, state.PoolHelper, names.Tag, - error) { - st, entity, err := httpCtxt.stateForRequestAuthenticatedTag(req, tagKinds...) + var resourcesUploadAuthorizer httpcontext.CompositeAuthorizer = []authentication.Authorizer{ + controllerAdminAuthorizer, + modelPermissionAuthorizer{ + perm: permission.WriteAccess, + }, + } + resourceUploadHandler := srv.monitoredHandler(&ResourcesUploadHandler{ + StateFunc: func(req *http.Request) (ResourcesBackend, state.PoolHelper, names.Tag, error) { + st, entity, err := httpCtxt.stateForRequestAuthenticated(req) if err != nil { return nil, nil, nil, errors.Trace(err) } - rst := st.Resources() return rst, st, entity.Tag(), nil }, @@ -821,6 +826,23 @@ func (srv *Server) endpoints() ([]apihttp.Endpoint, error) { return nil }, }, "applications") + var resourcesDownloadAuthorizer httpcontext.CompositeAuthorizer = []authentication.Authorizer{ + controllerAdminAuthorizer, + modelPermissionAuthorizer{ + perm: permission.ReadAccess, + }, + tagKindAuthorizer{names.ControllerAgentTagKind, names.MachineTagKind, names.ApplicationTagKind}, + } + resourceDownloadHandler := srv.monitoredHandler(&ResourcesDownloadHandler{ + StateFunc: func(req *http.Request) (ResourcesBackend, state.PoolHelper, names.Tag, error) { + st, entity, err := httpCtxt.stateForRequestAuthenticated(req) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + rst := st.Resources() + return rst, st, entity.Tag(), nil + }, + }, "applications") unitResourcesHandler := srv.monitoredHandler(&UnitResourcesHandler{ NewOpener: func(req *http.Request, tagKinds ...string) (resources.Opener, state.PoolHelper, error) { st, _, err := httpCtxt.stateForRequestAuthenticatedTag(req, tagKinds...) @@ -931,8 +953,15 @@ func (srv *Server) endpoints() ([]apihttp.Endpoint, error) { handler: modelToolsDownloadHandler, unauthenticated: true, }, { - pattern: modelRoutePrefix + "/applications/:application/resources/:resource", - handler: resourcesHandler, + pattern: modelRoutePrefix + "/applications/:application/resources/:resource", + methods: []string{"GET"}, + handler: resourceDownloadHandler, + authorizer: resourcesDownloadAuthorizer, + }, { + pattern: modelRoutePrefix + "/applications/:application/resources/:resource", + methods: []string{"PUT"}, + handler: resourceUploadHandler, + authorizer: resourcesUploadAuthorizer, }, { pattern: modelRoutePrefix + "/units/:unit/resources/:resource", handler: unitResourcesHandler,
apiserver/common/secrets/mocks/provider_mock.go+33 −4 modified@@ -42,6 +42,21 @@ func (m *MockSecretBackendProvider) EXPECT() *MockSecretBackendProviderMockRecor return m.recorder } +// CleanupIssuedTokens mocks base method. +func (m *MockSecretBackendProvider) CleanupIssuedTokens(arg0 *provider.ModelBackendConfig, arg1 []string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupIssuedTokens", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CleanupIssuedTokens indicates an expected call of CleanupIssuedTokens. +func (mr *MockSecretBackendProviderMockRecorder) CleanupIssuedTokens(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupIssuedTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupIssuedTokens), arg0, arg1) +} + // CleanupModel mocks base method. func (m *MockSecretBackendProvider) CleanupModel(arg0 *provider.ModelBackendConfig) error { m.ctrl.T.Helper() @@ -84,6 +99,20 @@ func (mr *MockSecretBackendProviderMockRecorder) Initialise(arg0 any) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialise", reflect.TypeOf((*MockSecretBackendProvider)(nil).Initialise), arg0) } +// IssuesTokens mocks base method. +func (m *MockSecretBackendProvider) IssuesTokens() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IssuesTokens") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IssuesTokens indicates an expected call of IssuesTokens. +func (mr *MockSecretBackendProviderMockRecorder) IssuesTokens() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssuesTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).IssuesTokens)) +} + // NewBackend mocks base method. func (m *MockSecretBackendProvider) NewBackend(arg0 *provider.ModelBackendConfig) (provider.SecretsBackend, error) { m.ctrl.T.Helper() @@ -100,18 +129,18 @@ func (mr *MockSecretBackendProviderMockRecorder) NewBackend(arg0 any) *gomock.Ca } // RestrictedConfig mocks base method. -func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 names.Tag, arg4, arg5 provider.SecretRevisions) (*provider.BackendConfig, error) { +func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 string, arg4 names.Tag, arg5 []string, arg6, arg7 provider.SecretRevisions) (*provider.BackendConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(*provider.BackendConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // RestrictedConfig indicates an expected call of RestrictedConfig. -func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { +func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // Type mocks base method.
apiserver/common/secrets/mocks/state_mock.go+144 −0 modified@@ -72,6 +72,20 @@ func (mr *MockSecretsStoreMockRecorder) CreateSecret(arg0, arg1 any) *gomock.Cal return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSecret", reflect.TypeOf((*MockSecretsStore)(nil).CreateSecret), arg0, arg1) } +// CreateSecretBackendIssuedToken mocks base method. +func (m *MockSecretsStore) CreateSecretBackendIssuedToken(arg0 state.SecretBackendIssuedToken) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSecretBackendIssuedToken", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateSecretBackendIssuedToken indicates an expected call of CreateSecretBackendIssuedToken. +func (mr *MockSecretsStoreMockRecorder) CreateSecretBackendIssuedToken(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSecretBackendIssuedToken", reflect.TypeOf((*MockSecretsStore)(nil).CreateSecretBackendIssuedToken), arg0) +} + // DeleteSecret mocks base method. func (m *MockSecretsStore) DeleteSecret(arg0 *secrets.URI, arg1 ...int) ([]secrets.ValueRef, error) { m.ctrl.T.Helper() @@ -92,6 +106,20 @@ func (mr *MockSecretsStoreMockRecorder) DeleteSecret(arg0 any, arg1 ...any) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSecret", reflect.TypeOf((*MockSecretsStore)(nil).DeleteSecret), varargs...) } +// ExpireSecretBackendIssuedTokensForConsumer mocks base method. +func (m *MockSecretsStore) ExpireSecretBackendIssuedTokensForConsumer(arg0 names.Tag) state.ModelOperation { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExpireSecretBackendIssuedTokensForConsumer", arg0) + ret0, _ := ret[0].(state.ModelOperation) + return ret0 +} + +// ExpireSecretBackendIssuedTokensForConsumer indicates an expected call of ExpireSecretBackendIssuedTokensForConsumer. +func (mr *MockSecretsStoreMockRecorder) ExpireSecretBackendIssuedTokensForConsumer(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpireSecretBackendIssuedTokensForConsumer", reflect.TypeOf((*MockSecretsStore)(nil).ExpireSecretBackendIssuedTokensForConsumer), arg0) +} + // GetOwnedSecretMetadataAsApp mocks base method. func (m *MockSecretsStore) GetOwnedSecretMetadataAsApp(arg0 names.ApplicationTag, arg1 *secrets.URI) (*secrets.SecretMetadataOwnerIdent, error) { m.ctrl.T.Helper() @@ -258,6 +286,51 @@ func (mr *MockSecretsStoreMockRecorder) ListModelSecrets(arg0 any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListModelSecrets", reflect.TypeOf((*MockSecretsStore)(nil).ListModelSecrets), arg0) } +// ListReservedSecrets mocks base method. +func (m *MockSecretsStore) ListReservedSecrets(arg0 []names.Tag) ([]*secrets.URI, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListReservedSecrets", arg0) + ret0, _ := ret[0].([]*secrets.URI) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListReservedSecrets indicates an expected call of ListReservedSecrets. +func (mr *MockSecretsStoreMockRecorder) ListReservedSecrets(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListReservedSecrets", reflect.TypeOf((*MockSecretsStore)(nil).ListReservedSecrets), arg0) +} + +// ListSecretBackendIssuedTokenUntil mocks base method. +func (m *MockSecretsStore) ListSecretBackendIssuedTokenUntil(arg0 time.Time) ([]state.SecretBackendIssuedToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretBackendIssuedTokenUntil", arg0) + ret0, _ := ret[0].([]state.SecretBackendIssuedToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecretBackendIssuedTokenUntil indicates an expected call of ListSecretBackendIssuedTokenUntil. +func (mr *MockSecretsStoreMockRecorder) ListSecretBackendIssuedTokenUntil(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretBackendIssuedTokenUntil", reflect.TypeOf((*MockSecretsStore)(nil).ListSecretBackendIssuedTokenUntil), arg0) +} + +// ListSecretBackendIssuedTokenUntilForConsumer mocks base method. +func (m *MockSecretsStore) ListSecretBackendIssuedTokenUntilForConsumer(arg0 time.Time, arg1 names.Tag) ([]state.SecretBackendIssuedToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretBackendIssuedTokenUntilForConsumer", arg0, arg1) + ret0, _ := ret[0].([]state.SecretBackendIssuedToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecretBackendIssuedTokenUntilForConsumer indicates an expected call of ListSecretBackendIssuedTokenUntilForConsumer. +func (mr *MockSecretsStoreMockRecorder) ListSecretBackendIssuedTokenUntilForConsumer(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretBackendIssuedTokenUntilForConsumer", reflect.TypeOf((*MockSecretsStore)(nil).ListSecretBackendIssuedTokenUntilForConsumer), arg0, arg1) +} + // ListSecretRevisions mocks base method. func (m *MockSecretsStore) ListSecretRevisions(arg0 *secrets.URI) ([]*secrets.SecretRevisionMetadata, error) { m.ctrl.T.Helper() @@ -303,6 +376,63 @@ func (mr *MockSecretsStoreMockRecorder) ListUnusedSecretRevisions(arg0 any) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUnusedSecretRevisions", reflect.TypeOf((*MockSecretsStore)(nil).ListUnusedSecretRevisions), arg0) } +// NextSecretBackendIssuedTokenExpiry mocks base method. +func (m *MockSecretsStore) NextSecretBackendIssuedTokenExpiry() (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NextSecretBackendIssuedTokenExpiry") + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NextSecretBackendIssuedTokenExpiry indicates an expected call of NextSecretBackendIssuedTokenExpiry. +func (mr *MockSecretsStoreMockRecorder) NextSecretBackendIssuedTokenExpiry() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextSecretBackendIssuedTokenExpiry", reflect.TypeOf((*MockSecretsStore)(nil).NextSecretBackendIssuedTokenExpiry)) +} + +// RemoveSecretBackendIssuedTokens mocks base method. +func (m *MockSecretsStore) RemoveSecretBackendIssuedTokens(arg0 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveSecretBackendIssuedTokens", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveSecretBackendIssuedTokens indicates an expected call of RemoveSecretBackendIssuedTokens. +func (mr *MockSecretsStoreMockRecorder) RemoveSecretBackendIssuedTokens(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSecretBackendIssuedTokens", reflect.TypeOf((*MockSecretsStore)(nil).RemoveSecretBackendIssuedTokens), arg0) +} + +// RemoveSecretReservations mocks base method. +func (m *MockSecretsStore) RemoveSecretReservations(arg0 names.Tag) state.ModelOperation { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveSecretReservations", arg0) + ret0, _ := ret[0].(state.ModelOperation) + return ret0 +} + +// RemoveSecretReservations indicates an expected call of RemoveSecretReservations. +func (mr *MockSecretsStoreMockRecorder) RemoveSecretReservations(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSecretReservations", reflect.TypeOf((*MockSecretsStore)(nil).RemoveSecretReservations), arg0) +} + +// ReserveSecret mocks base method. +func (m *MockSecretsStore) ReserveSecret(arg0 *secrets.URI, arg1 names.Tag) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReserveSecret", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReserveSecret indicates an expected call of ReserveSecret. +func (mr *MockSecretsStoreMockRecorder) ReserveSecret(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReserveSecret", reflect.TypeOf((*MockSecretsStore)(nil).ReserveSecret), arg0, arg1) +} + // SecretGrants mocks base method. func (m *MockSecretsStore) SecretGrants(arg0 *secrets.URI, arg1 secrets.SecretRole) ([]secrets.AccessInfo, error) { m.ctrl.T.Helper() @@ -378,6 +508,20 @@ func (mr *MockSecretsStoreMockRecorder) WatchRevisionsToPrune(arg0 any) *gomock. return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchRevisionsToPrune", reflect.TypeOf((*MockSecretsStore)(nil).WatchRevisionsToPrune), arg0) } +// WatchSecretBackendIssuedTokenExpiry mocks base method. +func (m *MockSecretsStore) WatchSecretBackendIssuedTokenExpiry() state.StringsWatcher { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WatchSecretBackendIssuedTokenExpiry") + ret0, _ := ret[0].(state.StringsWatcher) + return ret0 +} + +// WatchSecretBackendIssuedTokenExpiry indicates an expected call of WatchSecretBackendIssuedTokenExpiry. +func (mr *MockSecretsStoreMockRecorder) WatchSecretBackendIssuedTokenExpiry() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchSecretBackendIssuedTokenExpiry", reflect.TypeOf((*MockSecretsStore)(nil).WatchSecretBackendIssuedTokenExpiry)) +} + // MockSecretBackendsStorage is a mock of SecretBackendsStorage interface. type MockSecretBackendsStorage struct { ctrl *gomock.Controller
apiserver/common/secrets/secrets.go+110 −27 modified@@ -7,8 +7,11 @@ import ( "context" "encoding/json" "sort" + "time" + "github.com/google/uuid" "github.com/juju/collections/set" + "github.com/juju/collections/transform" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names/v5" @@ -146,7 +149,7 @@ func DrainBackendConfigInfo(backendID string, model Model, authTag names.Tag, le if !ok { return nil, errors.Errorf("missing secret backend %q", backendID) } - backendCfg, err := backendConfigInfo(model, backendID, &cfg, authTag, leadershipChecker, true, true) + backendCfg, err := backendConfigInfo(model, backendID, &cfg, authTag, leadershipChecker, true, true, nil) if err != nil { return nil, errors.Trace(err) } @@ -180,8 +183,13 @@ func SecretCleanupBackendConfigInfo(model Model, backendID string) (*provider.Mo // The client is expected to be restricted to write only those secrets // owned by the agent, and read only those secrets shared with the agent. // The result includes config for all relevant backends, including the id -// of the current active backend. -func BackendConfigInfo(model Model, sameController bool, backendIDs []string, wantAll bool, authTag names.Tag, leadershipChecker leadership.Checker) (*provider.ModelBackendConfigInfo, error) { +// of the current active backend. If [only] is passed, then the config may be +// restricted down to just that URI, if the auth tag has access to it. +func BackendConfigInfo( + model Model, sameController bool, backendIDs []string, wantAll bool, + authTag names.Tag, leadershipChecker leadership.Checker, + only []*coresecrets.URI, +) (*provider.ModelBackendConfigInfo, error) { adminModelCfg, err := AdminBackendConfigInfo(model) if err != nil { return nil, errors.Annotate(err, "getting configured secrets providers") @@ -204,7 +212,16 @@ func BackendConfigInfo(model Model, sameController bool, backendIDs []string, wa if !ok { return nil, errors.Errorf("missing secret backend %q", backendID) } - backendCfg, err := backendConfigInfo(model, backendID, &cfg, authTag, leadershipChecker, sameController, false) + backendCfg, err := backendConfigInfo( + model, + backendID, + &cfg, + authTag, + leadershipChecker, + sameController, + false, + only, + ) if err != nil { return nil, errors.Trace(err) } @@ -214,8 +231,13 @@ func BackendConfigInfo(model Model, sameController bool, backendIDs []string, wa } func backendConfigInfo( - model Model, backendID string, adminCfg *provider.ModelBackendConfig, - authTag names.Tag, leadershipChecker leadership.Checker, sameController, forDrain bool, + model Model, + backendID string, + adminCfg *provider.ModelBackendConfig, + authTag names.Tag, + leadershipChecker leadership.Checker, + sameController, forDrain bool, + only []*coresecrets.URI, ) (*provider.ModelBackendConfig, error) { p, err := GetProvider(adminCfg.BackendType) if err != nil { @@ -231,11 +253,13 @@ func backendConfigInfo( // (or its app if the agent is a leader). ownedFilter := state.SecretsFilter{ OwnerTags: []names.Tag{authTag}, + URIs: only, } // Find secrets shared with the agent. // We include secrets shared with the app or just the specified unit. readFilter := state.SecretsFilter{ ConsumerTags: []names.Tag{authTag}, + URIs: only, } // Find secrets owned by the application that should be readable for non leader units. readAppOwnedFilter := state.SecretsFilter{} @@ -259,30 +283,79 @@ func backendConfigInfo( // Granted secrets can be consumed in application level for all units. readFilter.ConsumerTags = append(readFilter.ConsumerTags, authApp) case names.ApplicationTag: + // App Tag has access to application secrets. case names.ModelTag: - // Model Tag is validate for user secrets. + // Model Tag has access to user secrets. default: return nil, errors.NotSupportedf("login as %q", authTag) } - ownedRevisions := map[string]provider.SecretRevisions{} - if err := getExternalRevisions(secretsState, backendID, ownedFilter, ownedRevisions); err != nil { + ownedIDs, ownedRevs, err := getExternalRevisions( + secretsState, backendID, ownedFilter) + if err != nil { return nil, errors.Trace(err) } - readRevisions := map[string]provider.SecretRevisions{} - if err := getExternalRevisions(secretsState, backendID, readFilter, readRevisions); err != nil { + ownedReservedIDs, err := secretsState.ListReservedSecrets(ownedFilter.OwnerTags) + if err != nil { + return nil, errors.Trace(err) + } + if len(only) > 0 { + onlySet := set.NewStrings(transform.Slice(only, func(uri *coresecrets.URI) string { + return uri.ID + })...) + for _, reservedSecretID := range ownedReservedIDs { + if onlySet.Contains(reservedSecretID.ID) { + ownedIDs = append(ownedIDs, reservedSecretID.ID) + } + } + } else { + for _, reservedSecretID := range ownedReservedIDs { + ownedIDs = append(ownedIDs, reservedSecretID.ID) + } + } + + _, readRevs, err := getExternalRevisions( + secretsState, backendID, readFilter) + if err != nil { return nil, errors.Trace(err) } if len(readAppOwnedFilter.OwnerTags) > 0 { - if err := getExternalRevisions(secretsState, backendID, readAppOwnedFilter, readRevisions); err != nil { + _, appOwnedReadRevs, err := getExternalRevisions( + secretsState, backendID, readAppOwnedFilter) + if err != nil { return nil, errors.Trace(err) } + readRevs.Insert(appOwnedReadRevs) } - logger.Debugf("secrets for %v:\nowned: %v\nconsumed:%v", authTag.String(), ownedRevisions, readRevisions) - cfg, err := p.RestrictedConfig(adminCfg, sameController, forDrain, authTag, ownedRevisions[backendID], readRevisions[backendID]) + issuedTokenUUID := "" + if p.IssuesTokens() { + v, err := uuid.NewRandom() + if err != nil { + return nil, errors.Trace(err) + } + issuedTokenUUID = v.String() + + args := state.SecretBackendIssuedToken{ + UUID: issuedTokenUUID, + ExpireTime: time.Now().Add(coresecrets.IssuedTokenValidity), + BackendID: backendID, + Consumer: authTag, + } + err = secretsState.CreateSecretBackendIssuedToken(args) + if err != nil { + return nil, errors.Trace(err) + } + } + + logger.Debugf( + "secrets for %v:\nowned: %v\nowned revs: %v\nconsumed revs:%v", + authTag.String(), ownedIDs, ownedRevs, readRevs) + cfg, err := p.RestrictedConfig( + adminCfg, sameController, forDrain, issuedTokenUUID, authTag, ownedIDs, + ownedRevs, readRevs) if err != nil { return nil, errors.Trace(err) } @@ -295,29 +368,39 @@ func backendConfigInfo( return info, nil } -func getExternalRevisions(backend state.SecretsStore, backendID string, filter state.SecretsFilter, revisions map[string]provider.SecretRevisions) error { +func getExternalRevisions( + backend state.SecretsStore, + backendID string, + filter state.SecretsFilter, +) ([]string, provider.SecretRevisions, error) { secrets, err := backend.ListSecrets(filter) if err != nil { - return errors.Trace(err) + return nil, nil, errors.Trace(err) } + + revs := provider.SecretRevisions{} + ids := []string{} for _, md := range secrets { - revs, err := backend.ListSecretRevisions(md.URI) + secretRevs, err := backend.ListSecretRevisions(md.URI) if err != nil { - return errors.Annotatef(err, "cannot get revisions for secret %q", md.URI) + return nil, nil, errors.Annotatef(err, + "cannot get revisions for secret %q", md.URI) } - for _, rev := range revs { - if rev.ValueRef == nil || rev.ValueRef.BackendID != backendID { + + for _, secretRev := range secretRevs { + if secretRev.ValueRef == nil { continue } - revs, ok := revisions[rev.ValueRef.BackendID] - if !ok { - revs = provider.SecretRevisions{} + if secretRev.ValueRef.BackendID != backendID { + continue } - revs.Add(md.URI, rev.ValueRef.RevisionID) - revisions[rev.ValueRef.BackendID] = revs + revs.Add(md.URI, secretRev.ValueRef.RevisionID) } + + ids = append(ids, md.URI.ID) } - return nil + + return ids, revs, nil } func cloudSpecForModel(m Model) (cloudspec.CloudSpec, error) { @@ -683,7 +766,7 @@ func secretDeletionPreflightCheck(uriStr string, label string, removeState Secre // there are multiple secrets with the same label. func getSecretURIForLabel(secretsState ListSecretsState, modelUUID string, label string) (*coresecrets.URI, error) { results, err := secretsState.ListSecrets(state.SecretsFilter{ - Label: &label, + Labels: []string{label}, OwnerTags: []names.Tag{names.NewModelTag(modelUUID)}, }) if err != nil {
apiserver/common/secrets/secrets_test.go+446 −205 modified@@ -86,7 +86,7 @@ var ( func (s *secretsSuite) TestMarshallLegacyBackendConfig(c *gc.C) { cfg := params.SecretBackendConfig{ BackendType: kubernetes.BackendType, - Params: map[string]interface{}{ + Params: map[string]any{ "endpoint": "http://nowhere", "ca-certs": []string{"cert-data"}, "namespace": "fred", @@ -98,7 +98,7 @@ func (s *secretsSuite) TestMarshallLegacyBackendConfig(c *gc.C) { c.Assert(err, jc.ErrorIsNil) c.Assert(cfg, jc.DeepEquals, params.SecretBackendConfig{ BackendType: kubernetes.BackendType, - Params: map[string]interface{}{ + Params: map[string]any{ "endpoint": "http://nowhere", "ca-certs": []string{"cert-data"}, "credential": `{"auth-type":"oauth2","Attributes":{"Token":"bar"}}`, @@ -220,7 +220,7 @@ func (s *secretsSuite) assertAdminBackendConfigInfoDefault( ID: vaultBackendID, Name: "myvault", BackendType: vault.BackendType, - Config: map[string]interface{}{ + Config: map[string]any{ "endpoint": "http://vault", }, }}, nil) @@ -231,14 +231,6 @@ func (s *secretsSuite) assertAdminBackendConfigInfoDefault( } func (s *secretsSuite) TestBackendConfigInfoLeaderUnit(c *gc.C) { - s.assertBackendConfigInfoLeaderUnit(c, []string{"backend-id"}) -} - -func (s *secretsSuite) TestBackendConfigInfoDefaultAdmin(c *gc.C) { - s.assertBackendConfigInfoLeaderUnit(c, nil) -} - -func (s *secretsSuite) assertBackendConfigInfoLeaderUnit(c *gc.C, wanted []string) { ctrl := gomock.NewController(c) defer ctrl.Finish() @@ -254,9 +246,14 @@ func (s *secretsSuite) assertBackendConfigInfoLeaderUnit(c *gc.C, wanted []strin s.PatchValue(&secrets.GetSecretsState, func(secrets.Model) state.SecretsStore { return secretsState }) s.PatchValue(&secrets.GetSecretBackendsState, func(secrets.Model) state.SecretBackendsStorage { return backendState }) + backendIDs := []string{"backend-id"} + issuedTokenUUID := "" owned := []*coresecrets.SecretMetadata{ {URI: &coresecrets.URI{ID: "owned-1"}}, } + ownedIDs := []string{ + "owned-1", + } ownedRevs := map[string]set.Strings{ "owned-1": set.NewStrings("owned-rev-1", "owned-rev-2"), } @@ -280,47 +277,52 @@ func (s *secretsSuite) assertBackendConfigInfoLeaderUnit(c *gc.C, wanted []strin model.EXPECT().ControllerUUID().Return(coretesting.ControllerTag.Id()).AnyTimes() model.EXPECT().UUID().Return(coretesting.ModelTag.Id()).AnyTimes() model.EXPECT().Name().Return("fred").AnyTimes() - gomock.InOrder( - model.EXPECT().Config().Return(modelCfg, nil), - model.EXPECT().Type().Return(state.ModelTypeIAAS), - backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ - ID: "backend-id", - Name: "backend-name", - BackendType: "some-backend", - }, { - ID: "backend-id2", - Name: "backend-name2", - BackendType: "some-backend2", - }}, nil), - p.EXPECT().Initialise(gomock.Any()).Return(nil), - leadershipChecker.EXPECT().LeadershipCheck("gitlab", "gitlab/0").Return(token), - token.EXPECT().Check().Return(nil), - - secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - OwnerTags: []names.Tag{ - unitTag, names.NewApplicationTag("gitlab"), - }, - }).Return(owned, nil), - secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "owned-1"}). - Return([]*coresecrets.SecretRevisionMetadata{{ - Revision: 1, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-1"}, - }, { - Revision: 2, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-2"}, - }}, nil), - secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - ConsumerTags: []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, - }).Return(read, nil), - secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "read-1"}). - Return([]*coresecrets.SecretRevisionMetadata{{ - Revision: 1, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "read-rev-1"}, - }}, nil), - ) - p.EXPECT().RestrictedConfig(&adminCfg, true, false, unitTag, ownedRevs, readRevs).Return(&adminCfg.BackendConfig, nil) + model.EXPECT().Config().Return(modelCfg, nil) + model.EXPECT().Type().Return(state.ModelTypeIAAS) + secretsState.EXPECT().ListReservedSecrets( + []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, + ).Return(nil, nil) + backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ + ID: "backend-id", + Name: "backend-name", + BackendType: "some-backend", + }, { + ID: "backend-id2", + Name: "backend-name2", + BackendType: "some-backend2", + }}, nil) + p.EXPECT().Initialise(gomock.Any()).Return(nil) + p.EXPECT().IssuesTokens().Return(false) + leadershipChecker.EXPECT().LeadershipCheck("gitlab", "gitlab/0").Return(token) + token.EXPECT().Check().Return(nil) - info, err := secrets.BackendConfigInfo(model, true, wanted, false, unitTag, leadershipChecker) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + OwnerTags: []names.Tag{ + unitTag, names.NewApplicationTag("gitlab"), + }, + }).Return(owned, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "owned-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-1"}, + }, { + Revision: 2, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-2"}, + }}, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + ConsumerTags: []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, + }).Return(read, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "read-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "read-rev-1"}, + }}, nil) + p.EXPECT().RestrictedConfig( + &adminCfg, true, false, issuedTokenUUID, unitTag, + ownedIDs, ownedRevs, readRevs, + ).Return(&adminCfg.BackendConfig, nil) + + info, err := secrets.BackendConfigInfo(model, true, backendIDs, false, unitTag, leadershipChecker, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(info, jc.DeepEquals, &provider.ModelBackendConfigInfo{ ActiveID: "backend-id", @@ -353,6 +355,8 @@ func (s *secretsSuite) TestBackendConfigInfoNonLeaderUnit(c *gc.C) { s.PatchValue(&secrets.GetSecretsState, func(secrets.Model) state.SecretsStore { return secretsState }) s.PatchValue(&secrets.GetSecretBackendsState, func(secrets.Model) state.SecretBackendsStorage { return backendState }) + issuedTokenUUID := "" + ownedIDs := []string{"owned-1"} unitOwned := []*coresecrets.SecretMetadata{ {URI: &coresecrets.URI{ID: "owned-1"}}, } @@ -383,55 +387,59 @@ func (s *secretsSuite) TestBackendConfigInfoNonLeaderUnit(c *gc.C) { model.EXPECT().ControllerUUID().Return(coretesting.ControllerTag.Id()).AnyTimes() model.EXPECT().UUID().Return(coretesting.ModelTag.Id()).AnyTimes() model.EXPECT().Name().Return("fred").AnyTimes() - gomock.InOrder( - model.EXPECT().Config().Return(modelCfg, nil), - model.EXPECT().Type().Return(state.ModelTypeIAAS), - backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ - ID: "backend-id", - Name: "backend-name", - BackendType: "some-backend", - }}, nil), - p.EXPECT().Initialise(gomock.Any()).Return(nil), - leadershipChecker.EXPECT().LeadershipCheck("gitlab", "gitlab/0").Return(token), - token.EXPECT().Check().Return(leadership.NewNotLeaderError("", "")), - - secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - OwnerTags: []names.Tag{unitTag}, - }).Return(unitOwned, nil), - secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "owned-1"}). - Return([]*coresecrets.SecretRevisionMetadata{{ - Revision: 1, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-1"}, - }, { - Revision: 2, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-2"}, - }}, nil), - secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - ConsumerTags: []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, - }).Return(read, nil), - secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "read-1"}). - Return([]*coresecrets.SecretRevisionMetadata{{ - Revision: 1, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "read-rev-1"}, - }}, nil), - secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - OwnerTags: []names.Tag{names.NewApplicationTag("gitlab")}, - }).Return(appOwned, nil), - secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "app-owned-1"}). - Return([]*coresecrets.SecretRevisionMetadata{{ - Revision: 1, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-1"}, - }, { - Revision: 2, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-2"}, - }, { - Revision: 3, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-3"}, - }}, nil), - ) - p.EXPECT().RestrictedConfig(&adminCfg, true, false, unitTag, ownedRevs, readRevs).Return(&adminCfg.BackendConfig, nil) - - info, err := secrets.BackendConfigInfo(model, true, []string{"backend-id"}, false, unitTag, leadershipChecker) + model.EXPECT().Config().Return(modelCfg, nil) + model.EXPECT().Type().Return(state.ModelTypeIAAS) + backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ + ID: "backend-id", + Name: "backend-name", + BackendType: "some-backend", + }}, nil) + p.EXPECT().Initialise(gomock.Any()).Return(nil) + p.EXPECT().IssuesTokens().Return(false) + leadershipChecker.EXPECT().LeadershipCheck("gitlab", "gitlab/0").Return(token) + token.EXPECT().Check().Return(leadership.NewNotLeaderError("", "")) + + secretsState.EXPECT().ListReservedSecrets( + []names.Tag{unitTag}).Return(nil, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + OwnerTags: []names.Tag{unitTag}, + }).Return(unitOwned, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "owned-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-1"}, + }, { + Revision: 2, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-2"}, + }}, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + ConsumerTags: []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, + }).Return(read, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "read-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "read-rev-1"}, + }}, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + OwnerTags: []names.Tag{names.NewApplicationTag("gitlab")}, + }).Return(appOwned, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "app-owned-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-1"}, + }, { + Revision: 2, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-2"}, + }, { + Revision: 3, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-3"}, + }}, nil) + p.EXPECT().RestrictedConfig( + &adminCfg, true, false, issuedTokenUUID, unitTag, + ownedIDs, ownedRevs, readRevs, + ).Return(&adminCfg.BackendConfig, nil) + + info, err := secrets.BackendConfigInfo(model, true, []string{"backend-id"}, false, unitTag, leadershipChecker, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(info, jc.DeepEquals, &provider.ModelBackendConfigInfo{ ActiveID: "backend-id", @@ -464,6 +472,10 @@ func (s *secretsSuite) TestDrainBackendConfigInfo(c *gc.C) { s.PatchValue(&secrets.GetSecretsState, func(secrets.Model) state.SecretsStore { return secretsState }) s.PatchValue(&secrets.GetSecretBackendsState, func(secrets.Model) state.SecretBackendsStorage { return backendState }) + issuedTokenUUID := "" + ownedIDs := []string{ + "owned-1", + } unitOwned := []*coresecrets.SecretMetadata{ {URI: &coresecrets.URI{ID: "owned-1"}}, } @@ -494,53 +506,57 @@ func (s *secretsSuite) TestDrainBackendConfigInfo(c *gc.C) { model.EXPECT().ControllerUUID().Return(coretesting.ControllerTag.Id()).AnyTimes() model.EXPECT().UUID().Return(coretesting.ModelTag.Id()).AnyTimes() model.EXPECT().Name().Return("fred").AnyTimes() - gomock.InOrder( - model.EXPECT().Config().Return(modelCfg, nil), - model.EXPECT().Type().Return(state.ModelTypeIAAS), - backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ - ID: "backend-id", - Name: "backend-name", - BackendType: "some-backend", - }}, nil), - p.EXPECT().Initialise(gomock.Any()).Return(nil), - leadershipChecker.EXPECT().LeadershipCheck("gitlab", "gitlab/0").Return(token), - token.EXPECT().Check().Return(leadership.NewNotLeaderError("", "")), - - secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - OwnerTags: []names.Tag{unitTag}, - }).Return(unitOwned, nil), - secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "owned-1"}). - Return([]*coresecrets.SecretRevisionMetadata{{ - Revision: 1, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-1"}, - }, { - Revision: 2, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-2"}, - }}, nil), - secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - ConsumerTags: []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, - }).Return(read, nil), - secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "read-1"}). - Return([]*coresecrets.SecretRevisionMetadata{{ - Revision: 1, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "read-rev-1"}, - }}, nil), - secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - OwnerTags: []names.Tag{names.NewApplicationTag("gitlab")}, - }).Return(appOwned, nil), - secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "app-owned-1"}). - Return([]*coresecrets.SecretRevisionMetadata{{ - Revision: 1, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-1"}, - }, { - Revision: 2, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-2"}, - }, { - Revision: 3, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-3"}, - }}, nil), - ) - p.EXPECT().RestrictedConfig(&adminCfg, true, true, unitTag, ownedRevs, readRevs).Return(&adminCfg.BackendConfig, nil) + model.EXPECT().Config().Return(modelCfg, nil) + model.EXPECT().Type().Return(state.ModelTypeIAAS) + backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ + ID: "backend-id", + Name: "backend-name", + BackendType: "some-backend", + }}, nil) + p.EXPECT().Initialise(gomock.Any()).Return(nil) + p.EXPECT().IssuesTokens().Return(false) + leadershipChecker.EXPECT().LeadershipCheck("gitlab", "gitlab/0").Return(token) + token.EXPECT().Check().Return(leadership.NewNotLeaderError("", "")) + + secretsState.EXPECT().ListReservedSecrets( + []names.Tag{unitTag}).Return(nil, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + OwnerTags: []names.Tag{unitTag}, + }).Return(unitOwned, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "owned-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-1"}, + }, { + Revision: 2, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-2"}, + }}, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + ConsumerTags: []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, + }).Return(read, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "read-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "read-rev-1"}, + }}, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + OwnerTags: []names.Tag{names.NewApplicationTag("gitlab")}, + }).Return(appOwned, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "app-owned-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-1"}, + }, { + Revision: 2, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-2"}, + }, { + Revision: 3, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "app-owned-rev-3"}, + }}, nil) + p.EXPECT().RestrictedConfig( + &adminCfg, true, true, issuedTokenUUID, unitTag, + ownedIDs, ownedRevs, readRevs, + ).Return(&adminCfg.BackendConfig, nil) info, err := secrets.DrainBackendConfigInfo("backend-id", model, unitTag, leadershipChecker) c.Assert(err, jc.ErrorIsNil) @@ -616,6 +632,8 @@ func (s *secretsSuite) TestBackendConfigInfoAppTagLogin(c *gc.C) { s.PatchValue(&secrets.GetSecretsState, func(secrets.Model) state.SecretsStore { return secretsState }) s.PatchValue(&secrets.GetSecretBackendsState, func(secrets.Model) state.SecretBackendsStorage { return backendState }) + issuedTokenUUID := "" + ownedIDs := []string{"owned-1"} owned := []*coresecrets.SecretMetadata{ {URI: &coresecrets.URI{ID: "owned-1"}}, } @@ -642,39 +660,41 @@ func (s *secretsSuite) TestBackendConfigInfoAppTagLogin(c *gc.C) { model.EXPECT().ControllerUUID().Return(coretesting.ControllerTag.Id()).AnyTimes() model.EXPECT().UUID().Return(coretesting.ModelTag.Id()).AnyTimes() model.EXPECT().Name().Return("fred").AnyTimes() - gomock.InOrder( - model.EXPECT().Config().Return(modelCfg, nil), - model.EXPECT().Type().Return(state.ModelTypeIAAS), - backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ - ID: "backend-id", - Name: "backend-name", - BackendType: "some-backend", - }}, nil), - p.EXPECT().Initialise(gomock.Any()).Return(nil), - - secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - OwnerTags: []names.Tag{appTag}, - }).Return(owned, nil), - secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "owned-1"}). - Return([]*coresecrets.SecretRevisionMetadata{{ - Revision: 1, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-1"}, - }, { - Revision: 2, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-2"}, - }}, nil), - secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - ConsumerTags: []names.Tag{appTag}, - }).Return(read, nil), - secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "read-1"}). - Return([]*coresecrets.SecretRevisionMetadata{{ - Revision: 1, - ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "read-rev-1"}, - }}, nil), - ) - p.EXPECT().RestrictedConfig(&adminCfg, true, false, appTag, ownedRevs, readRevs).Return(&adminCfg.BackendConfig, nil) - - info, err := secrets.BackendConfigInfo(model, true, []string{"backend-id"}, false, appTag, leadershipChecker) + model.EXPECT().Config().Return(modelCfg, nil) + model.EXPECT().Type().Return(state.ModelTypeIAAS) + backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ + ID: "backend-id", + Name: "backend-name", + BackendType: "some-backend", + }}, nil) + p.EXPECT().Initialise(gomock.Any()).Return(nil) + p.EXPECT().IssuesTokens().Return(false) + secretsState.EXPECT().ListReservedSecrets([]names.Tag{appTag}).Return(nil, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + OwnerTags: []names.Tag{appTag}, + }).Return(owned, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "owned-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-1"}, + }, { + Revision: 2, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-2"}, + }}, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + ConsumerTags: []names.Tag{appTag}, + }).Return(read, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "read-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "read-rev-1"}, + }}, nil) + p.EXPECT().RestrictedConfig( + &adminCfg, true, false, issuedTokenUUID, appTag, + ownedIDs, ownedRevs, readRevs, + ).Return(&adminCfg.BackendConfig, nil) + + info, err := secrets.BackendConfigInfo(model, true, []string{"backend-id"}, false, appTag, leadershipChecker, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(info, jc.DeepEquals, &provider.ModelBackendConfigInfo{ ActiveID: "backend-id", @@ -712,21 +732,19 @@ func (s *secretsSuite) TestBackendConfigInfoFailedInvalidAuthTag(c *gc.C) { model.EXPECT().ControllerUUID().Return(coretesting.ControllerTag.Id()).AnyTimes() model.EXPECT().UUID().Return(coretesting.ModelTag.Id()).AnyTimes() model.EXPECT().Name().Return("fred").AnyTimes() - gomock.InOrder( - model.EXPECT().Config().Return(cfg, nil), - model.EXPECT().Type().Return(state.ModelTypeIAAS), - backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ - ID: "some-id", - Name: "myvault", - BackendType: vault.BackendType, - Config: map[string]interface{}{ - "endpoint": "http://vault", - }, - }}, nil), - p.EXPECT().Initialise(gomock.Any()).Return(nil), - ) + model.EXPECT().Config().Return(cfg, nil) + model.EXPECT().Type().Return(state.ModelTypeIAAS) + backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ + ID: "some-id", + Name: "myvault", + BackendType: vault.BackendType, + Config: map[string]any{ + "endpoint": "http://vault", + }, + }}, nil) + p.EXPECT().Initialise(gomock.Any()).Return(nil) - _, err := secrets.BackendConfigInfo(model, true, []string{"some-id"}, false, badTag, leadershipChecker) + _, err := secrets.BackendConfigInfo(model, true, []string{"some-id"}, false, badTag, leadershipChecker, nil) c.Assert(err, gc.ErrorMatches, `login as "user-foo" not supported`) } @@ -925,10 +943,6 @@ func (s *secretsSuite) TestRemoveSecretsForSecretOwners(c *gc.C) { }) } -func ptr[T any](v T) *T { - return &v -} - func (s *secretsSuite) TestRemoveSecretsByLabel(c *gc.C) { ctrl := gomock.NewController(c) defer ctrl.Finish() @@ -940,7 +954,7 @@ func (s *secretsSuite) TestRemoveSecretsByLabel(c *gc.C) { s.PatchValue(&secrets.GetProvider, func(string) (provider.SecretBackendProvider, error) { return mockprovider, nil }) removeState.EXPECT().ListSecrets(state.SecretsFilter{ - Label: ptr("my-secret"), + Labels: []string{"my-secret"}, OwnerTags: []names.Tag{coretesting.ModelTag}, }).Return([]*coresecrets.SecretMetadata{{ URI: uri, @@ -1004,7 +1018,6 @@ func (s *secretsSuite) TestRemoveSecretsForModelAdminFromJujuBackend(c *gc.C) { c.Assert(results, jc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{{}}, }) - } func (s *secretsSuite) TestRemoveSecretsForModelAdminWithRevisions(c *gc.C) { @@ -1034,7 +1047,7 @@ func (s *secretsSuite) TestRemoveSecretsForModelAdminWithRevisions(c *gc.C) { ModelName: "fred", BackendConfig: provider.BackendConfig{ BackendType: "some-backend", - Config: map[string]interface{}{"foo": "admin"}, + Config: map[string]any{"foo": "admin"}, }, } mockprovider.EXPECT().NewBackend(cfg).Return(backend, nil) @@ -1102,7 +1115,7 @@ func (s *secretsSuite) TestRemoveSecretsForModelAdmin(c *gc.C) { ModelName: "fred", BackendConfig: provider.BackendConfig{ BackendType: "some-backend", - Config: map[string]interface{}{"foo": "admin"}, + Config: map[string]any{"foo": "admin"}, }, } mockprovider.EXPECT().NewBackend(cfg).Return(backend, nil) @@ -1177,7 +1190,7 @@ func (s *secretsSuite) TestRemoveSecretsForModelAdminDuringBackendMigration(c *g ModelName: "fred", BackendConfig: provider.BackendConfig{ BackendType: "some-backend", - Config: map[string]interface{}{"before": "migration"}, + Config: map[string]any{"before": "migration"}, }, } backendAfterMigrationCfg := &provider.ModelBackendConfig{ @@ -1186,7 +1199,7 @@ func (s *secretsSuite) TestRemoveSecretsForModelAdminDuringBackendMigration(c *g ModelName: "fred", BackendConfig: provider.BackendConfig{ BackendType: "some-backend", - Config: map[string]interface{}{"after": "migration"}, + Config: map[string]any{"after": "migration"}, }, } @@ -1253,7 +1266,6 @@ func (s *secretsSuite) TestRemoveSecretsForModelAdminDuringBackendMigration(c *g c.Assert(results, jc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{{}}, }) - } func (s *secretsSuite) TestRemoveSecretNotFoundForModelAdmin(c *gc.C) { @@ -1333,5 +1345,234 @@ func (s *secretsSuite) TestRemoveSecretNotFoundForModelAdmin(c *gc.C) { c.Assert(results, jc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{{}}, }) +} + +func (s *secretsSuite) TestBackendConfigInfoIssuesToken(c *gc.C) { + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + unitTag := names.NewUnitTag("gitlab/0") + model := mocks.NewMockModel(ctrl) + leadershipChecker := mocks.NewMockChecker(ctrl) + token := mocks.NewMockToken(ctrl) + secretProvider := mocks.NewMockSecretBackendProvider(ctrl) + backendState := mocks.NewMockSecretBackendsStorage(ctrl) + secretsState := mocks.NewMockSecretsStore(ctrl) + + s.PatchValue(&secrets.GetProvider, func(string) (provider.SecretBackendProvider, error) { return secretProvider, nil }) + s.PatchValue(&secrets.GetSecretsState, func(secrets.Model) state.SecretsStore { return secretsState }) + s.PatchValue(&secrets.GetSecretBackendsState, func(secrets.Model) state.SecretBackendsStorage { return backendState }) + + backendIDs := []string{"backend-id"} + issuedTokenUUID := "" + owned := []*coresecrets.SecretMetadata{ + {URI: &coresecrets.URI{ID: "owned-1"}}, + } + ownedIDs := []string{ + "owned-1", + } + ownedRevs := map[string]set.Strings{ + "owned-1": set.NewStrings("owned-rev-1", "owned-rev-2"), + } + read := []*coresecrets.SecretMetadata{ + {URI: &coresecrets.URI{ID: "read-1"}}, + } + readRevs := map[string]set.Strings{ + "read-1": set.NewStrings("read-rev-1"), + } + modelCfg := coretesting.CustomModelConfig(c, coretesting.Attrs{ + "secret-backend": "backend-name", + }) + adminCfg := provider.ModelBackendConfig{ + ControllerUUID: coretesting.ControllerTag.Id(), + ModelUUID: coretesting.ModelTag.Id(), + ModelName: "fred", + BackendConfig: provider.BackendConfig{ + BackendType: "some-backend", + }, + } + model.EXPECT().ControllerUUID().Return(coretesting.ControllerTag.Id()).AnyTimes() + model.EXPECT().UUID().Return(coretesting.ModelTag.Id()).AnyTimes() + model.EXPECT().Name().Return("fred").AnyTimes() + model.EXPECT().Config().Return(modelCfg, nil) + model.EXPECT().Type().Return(state.ModelTypeIAAS) + secretsState.EXPECT().ListReservedSecrets( + []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, + ).Return(nil, nil) + backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ + ID: "backend-id", + Name: "backend-name", + BackendType: "some-backend", + }}, nil) + secretProvider.EXPECT().Initialise(gomock.Any()).Return(nil) + secretProvider.EXPECT().IssuesTokens().Return(true) + secretsState.EXPECT().CreateSecretBackendIssuedToken( + gomock.Any(), + ).DoAndReturn(func(sbit state.SecretBackendIssuedToken) error { + issuedTokenUUID = sbit.UUID + c.Check(sbit.Consumer, gc.Equals, unitTag) + c.Check(sbit.BackendID, gc.Equals, "backend-id") + c.Check(sbit.ExpireTime, jc.After, time.Now()) + return nil + }) + leadershipChecker.EXPECT().LeadershipCheck("gitlab", "gitlab/0").Return(token) + token.EXPECT().Check().Return(nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + OwnerTags: []names.Tag{ + unitTag, names.NewApplicationTag("gitlab"), + }, + }).Return(owned, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "owned-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-1"}, + }, { + Revision: 2, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "owned-rev-2"}, + }}, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + ConsumerTags: []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, + }).Return(read, nil) + secretsState.EXPECT().ListSecretRevisions(&coresecrets.URI{ID: "read-1"}). + Return([]*coresecrets.SecretRevisionMetadata{{ + Revision: 1, + ValueRef: &coresecrets.ValueRef{BackendID: "backend-id", RevisionID: "read-rev-1"}, + }}, nil) + secretProvider.EXPECT().RestrictedConfig( + &adminCfg, true, false, gomock.Any(), unitTag, + ownedIDs, ownedRevs, readRevs, + ).DoAndReturn(func( + _ *provider.ModelBackendConfig, + _, _ bool, + gotIssuedTokenUUID string, + _ names.Tag, + _ []string, + _, _ provider.SecretRevisions, + ) (*provider.BackendConfig, error) { + c.Check(gotIssuedTokenUUID, gc.Equals, issuedTokenUUID) + return &adminCfg.BackendConfig, nil + }) + + info, err := secrets.BackendConfigInfo(model, true, backendIDs, false, unitTag, leadershipChecker, nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(info, jc.DeepEquals, &provider.ModelBackendConfigInfo{ + ActiveID: "backend-id", + Configs: map[string]provider.ModelBackendConfig{ + "backend-id": { + ControllerUUID: coretesting.ControllerTag.Id(), + ModelUUID: coretesting.ModelTag.Id(), + ModelName: "fred", + BackendConfig: provider.BackendConfig{ + BackendType: "some-backend", + }, + }, + }, + }) +} + +func (s *secretsSuite) TestBackendConfigInfoIssuesTokenWithReservedSecrets(c *gc.C) { + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + unitTag := names.NewUnitTag("gitlab/0") + model := mocks.NewMockModel(ctrl) + leadershipChecker := mocks.NewMockChecker(ctrl) + token := mocks.NewMockToken(ctrl) + secretProvider := mocks.NewMockSecretBackendProvider(ctrl) + backendState := mocks.NewMockSecretBackendsStorage(ctrl) + secretsState := mocks.NewMockSecretsStore(ctrl) + + s.PatchValue(&secrets.GetProvider, func(string) (provider.SecretBackendProvider, error) { return secretProvider, nil }) + s.PatchValue(&secrets.GetSecretsState, func(secrets.Model) state.SecretsStore { return secretsState }) + s.PatchValue(&secrets.GetSecretBackendsState, func(secrets.Model) state.SecretBackendsStorage { return backendState }) + + backendIDs := []string{"backend-id"} + issuedTokenUUID := "" + reserved := []*coresecrets.URI{ + {ID: "reserved-1"}, + } + owned := []*coresecrets.SecretMetadata{} + ownedIDs := []string{ + "reserved-1", + } + ownedRevs := map[string]set.Strings{} + read := []*coresecrets.SecretMetadata{} + readRevs := map[string]set.Strings{} + modelCfg := coretesting.CustomModelConfig(c, coretesting.Attrs{ + "secret-backend": "backend-name", + }) + adminCfg := provider.ModelBackendConfig{ + ControllerUUID: coretesting.ControllerTag.Id(), + ModelUUID: coretesting.ModelTag.Id(), + ModelName: "fred", + BackendConfig: provider.BackendConfig{ + BackendType: "some-backend", + }, + } + model.EXPECT().ControllerUUID().Return(coretesting.ControllerTag.Id()).AnyTimes() + model.EXPECT().UUID().Return(coretesting.ModelTag.Id()).AnyTimes() + model.EXPECT().Name().Return("fred").AnyTimes() + model.EXPECT().Config().Return(modelCfg, nil) + model.EXPECT().Type().Return(state.ModelTypeIAAS) + secretsState.EXPECT().ListReservedSecrets( + []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, + ).Return(reserved, nil) + backendState.EXPECT().ListSecretBackends().Return([]*coresecrets.SecretBackend{{ + ID: "backend-id", + Name: "backend-name", + BackendType: "some-backend", + }}, nil) + secretProvider.EXPECT().Initialise(gomock.Any()).Return(nil) + secretProvider.EXPECT().IssuesTokens().Return(true) + secretsState.EXPECT().CreateSecretBackendIssuedToken( + gomock.Any(), + ).DoAndReturn(func(sbit state.SecretBackendIssuedToken) error { + issuedTokenUUID = sbit.UUID + c.Check(sbit.Consumer, gc.Equals, unitTag) + c.Check(sbit.BackendID, gc.Equals, "backend-id") + c.Check(sbit.ExpireTime, jc.After, time.Now()) + return nil + }) + leadershipChecker.EXPECT().LeadershipCheck("gitlab", "gitlab/0").Return(token) + token.EXPECT().Check().Return(nil) + + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + OwnerTags: []names.Tag{ + unitTag, names.NewApplicationTag("gitlab"), + }, + }).Return(owned, nil) + secretsState.EXPECT().ListSecrets(state.SecretsFilter{ + ConsumerTags: []names.Tag{unitTag, names.NewApplicationTag("gitlab")}, + }).Return(read, nil) + secretProvider.EXPECT().RestrictedConfig( + &adminCfg, true, false, gomock.Any(), unitTag, + ownedIDs, ownedRevs, readRevs, + ).DoAndReturn(func( + _ *provider.ModelBackendConfig, + _, _ bool, + gotIssuedTokenUUID string, + _ names.Tag, + _ []string, + _, _ provider.SecretRevisions, + ) (*provider.BackendConfig, error) { + c.Check(gotIssuedTokenUUID, gc.Equals, issuedTokenUUID) + return &adminCfg.BackendConfig, nil + }) + + info, err := secrets.BackendConfigInfo(model, true, backendIDs, false, unitTag, leadershipChecker, nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(info, jc.DeepEquals, &provider.ModelBackendConfigInfo{ + ActiveID: "backend-id", + Configs: map[string]provider.ModelBackendConfig{ + "backend-id": { + ControllerUUID: coretesting.ControllerTag.Id(), + ModelUUID: coretesting.ModelTag.Id(), + ModelName: "fred", + BackendConfig: provider.BackendConfig{ + BackendType: "some-backend", + }, + }, + }, + }) }
apiserver/debuglog_db_test.go+17 −1 modified@@ -16,6 +16,7 @@ import ( "github.com/juju/juju/apiserver/websocket/websockettest" "github.com/juju/juju/core/permission" "github.com/juju/juju/rpc/params" + "github.com/juju/juju/state" "github.com/juju/juju/testing/factory" ) @@ -97,9 +98,10 @@ func (s *debugLogDBSuite) TestUserLoginRejected(c *gc.C) { websockettest.AssertWebsocketClosed(c, conn) } -func (s *debugLogDBSuite) TestMachineLoginsAccepted(c *gc.C) { +func (s *debugLogDBSuite) TestControllerMachineLoginsAccepted(c *gc.C) { m, password := s.Factory.MakeMachineReturningPassword(c, &factory.MachineParams{ Nonce: "foo-nonce", + Jobs: []state.MachineJob{state.JobManageModel}, }) header := jujuhttp.BasicAuthHeader(m.Tag().String(), password) header.Add(params.MachineNonceHeader, "foo-nonce") @@ -111,6 +113,20 @@ func (s *debugLogDBSuite) TestMachineLoginsAccepted(c *gc.C) { c.Assert(result.Error, gc.IsNil) } +func (s *debugLogDBSuite) TestNonControllerMachineLoginsRejected(c *gc.C) { + m, password := s.Factory.MakeMachineReturningPassword(c, &factory.MachineParams{ + Nonce: "foo-nonce", + }) + header := jujuhttp.BasicAuthHeader(m.Tag().String(), password) + header.Add(params.MachineNonceHeader, "foo-nonce") + conn, _, err := s.dialWebsocketInternal(c, noResultsPlease, header) //nolint:bodyclose // WebSocket library handles response body closure + c.Assert(err, jc.ErrorIsNil) + defer conn.Close() + + websockettest.AssertJSONError(c, conn, "authorization failed: permission denied") + websockettest.AssertWebsocketClosed(c, conn) +} + func (s *debugLogDBSuite) logURL(scheme string, queryParams url.Values) *url.URL { url := s.URL("/log", queryParams) url.Scheme = scheme
apiserver/facades/agent/secretsmanager/mocks/secretsprovider.go+33 −4 modified@@ -40,6 +40,21 @@ func (m *MockSecretBackendProvider) EXPECT() *MockSecretBackendProviderMockRecor return m.recorder } +// CleanupIssuedTokens mocks base method. +func (m *MockSecretBackendProvider) CleanupIssuedTokens(arg0 *provider.ModelBackendConfig, arg1 []string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupIssuedTokens", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CleanupIssuedTokens indicates an expected call of CleanupIssuedTokens. +func (mr *MockSecretBackendProviderMockRecorder) CleanupIssuedTokens(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupIssuedTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupIssuedTokens), arg0, arg1) +} + // CleanupModel mocks base method. func (m *MockSecretBackendProvider) CleanupModel(arg0 *provider.ModelBackendConfig) error { m.ctrl.T.Helper() @@ -82,6 +97,20 @@ func (mr *MockSecretBackendProviderMockRecorder) Initialise(arg0 any) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialise", reflect.TypeOf((*MockSecretBackendProvider)(nil).Initialise), arg0) } +// IssuesTokens mocks base method. +func (m *MockSecretBackendProvider) IssuesTokens() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IssuesTokens") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IssuesTokens indicates an expected call of IssuesTokens. +func (mr *MockSecretBackendProviderMockRecorder) IssuesTokens() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssuesTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).IssuesTokens)) +} + // NewBackend mocks base method. func (m *MockSecretBackendProvider) NewBackend(arg0 *provider.ModelBackendConfig) (provider.SecretsBackend, error) { m.ctrl.T.Helper() @@ -98,18 +127,18 @@ func (mr *MockSecretBackendProviderMockRecorder) NewBackend(arg0 any) *gomock.Ca } // RestrictedConfig mocks base method. -func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 names.Tag, arg4, arg5 provider.SecretRevisions) (*provider.BackendConfig, error) { +func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 string, arg4 names.Tag, arg5 []string, arg6, arg7 provider.SecretRevisions) (*provider.BackendConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(*provider.BackendConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // RestrictedConfig indicates an expected call of RestrictedConfig. -func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { +func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // Type mocks base method.
apiserver/facades/agent/secretsmanager/mocks/secretsstate.go+14 −0 modified@@ -271,6 +271,20 @@ func (mr *MockSecretsStateMockRecorder) ListSecrets(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecrets", reflect.TypeOf((*MockSecretsState)(nil).ListSecrets), arg0) } +// ReserveSecret mocks base method. +func (m *MockSecretsState) ReserveSecret(arg0 *secrets.URI, arg1 names.Tag) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReserveSecret", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReserveSecret indicates an expected call of ReserveSecret. +func (mr *MockSecretsStateMockRecorder) ReserveSecret(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReserveSecret", reflect.TypeOf((*MockSecretsState)(nil).ReserveSecret), arg0, arg1) +} + // SecretGrants mocks base method. func (m *MockSecretsState) SecretGrants(arg0 *secrets.URI, arg1 secrets.SecretRole) ([]secrets.AccessInfo, error) { m.ctrl.T.Helper()
apiserver/facades/agent/secretsmanager/register.go+1 −1 modified@@ -82,7 +82,7 @@ func NewSecretManagerAPI(context facade.Context) (*SecretsManagerAPI, error) { return nil, errors.Trace(err) } secretBackendConfigGetter := func(backendIDs []string, wantAll bool) (*provider.ModelBackendConfigInfo, error) { - return secrets.BackendConfigInfo(secrets.SecretsModel(model), true, backendIDs, wantAll, context.Auth().GetAuthTag(), leadershipChecker) + return secrets.BackendConfigInfo(secrets.SecretsModel(model), true, backendIDs, wantAll, context.Auth().GetAuthTag(), leadershipChecker, nil) } secretBackendAdminConfigGetter := func() (*provider.ModelBackendConfigInfo, error) { return secrets.AdminBackendConfigInfo(secrets.SecretsModel(model))
apiserver/facades/agent/secretsmanager/secrets.go+7 −2 modified@@ -240,14 +240,19 @@ func (s *SecretsManagerAPI) getBackend(backendID string) (*secretsprovider.Model // CreateSecretURIs creates new secret URIs. func (s *SecretsManagerAPI) CreateSecretURIs(arg params.CreateSecretURIsArg) (params.StringResults, error) { if arg.Count <= 0 { - return params.StringResults{}, errors.NotValidf("secret URi count %d", arg.Count) + return params.StringResults{}, errors.NotValidf("secret URI count %d", arg.Count) } result := params.StringResults{ Results: make([]params.StringResult, arg.Count), } for i := 0; i < arg.Count; i++ { uri := coresecrets.NewURI().WithSource(s.modelUUID) - result.Results[i] = params.StringResult{Result: uri.String()} + err := s.secretsState.ReserveSecret(uri, s.authTag) + if err != nil { + result.Results[i].Error = apiservererrors.ServerError(err) + } else { + result.Results[i] = params.StringResult{Result: uri.String()} + } } return result, nil }
apiserver/facades/agent/secretsmanager/secrets_test.go+3 −0 modified@@ -228,6 +228,9 @@ func (s *SecretsManagerSuite) TestGetSecretBackendConfigsForDrain(c *gc.C) { func (s *SecretsManagerSuite) TestCreateSecretURIs(c *gc.C) { defer s.setup(c).Finish() + s.secretsState.EXPECT().ReserveSecret( + gomock.Any(), s.authTag).Return(nil).Times(2) + results, err := s.facade.CreateSecretURIs(params.CreateSecretURIsArg{ Count: 2, })
apiserver/facades/agent/secretsmanager/state.go+2 −0 modified@@ -31,7 +31,9 @@ type SecretsConsumer interface { SecretAccess(uri *secrets.URI, subject names.Tag) (secrets.SecretRole, error) } +// SecretsState provides the secrets manager agent facade access to secrets. type SecretsState interface { + ReserveSecret(*secrets.URI, names.Tag) error CreateSecret(*secrets.URI, state.CreateSecretParams) (*secrets.SecretMetadata, error) UpdateSecret(*secrets.URI, state.UpdateSecretParams) (*secrets.SecretMetadata, error) DeleteSecret(*secrets.URI, ...int) ([]secrets.ValueRef, error)
apiserver/facades/agent/uniter/register.go+2 −0 modified@@ -15,6 +15,7 @@ import ( "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/facades/agent/meterstatus" "github.com/juju/juju/apiserver/facades/agent/secretsmanager" + "github.com/juju/juju/state" ) // Register is called to expose a package of facades onto a given registry. @@ -130,6 +131,7 @@ func newUniterAPI(context facade.Context) (*UniterAPI, error) { m: m, st: st, + secrets: state.NewSecrets(st), clock: aClock, cancel: context.Cancel(), cacheModel: cacheModel,
apiserver/facades/agent/uniter/uniter.go+12 −0 modified@@ -55,6 +55,7 @@ type UniterAPI struct { lxdProfileAPI *LXDProfileAPIv2 m *state.Model st *state.State + secrets state.SecretsStore clock clock.Clock cancel <-chan struct{} auth facade.Authorizer @@ -2913,6 +2914,17 @@ func (u *UniterAPI) commitHookChangesForOneUnit(unitTag names.UnitTag, changes p } } + // Remove any secret reservations that, by this point, have not been + // turned into a secret. Secrets are reserved by the uniter in a call to + // CreateSecretURIs. + modelOps = append(modelOps, u.secrets.RemoveSecretReservations(unitTag)) + + // Expire any secret issued backend tokens that, by this point, should no + // longer be used by this agent. + modelOps = append( + modelOps, u.secrets.ExpireSecretBackendIssuedTokensForConsumer(unitTag), + ) + // Apply all changes in a single transaction. return u.st.ApplyOperation(state.ComposeModelOperations(modelOps...)) }
apiserver/facades/agent/uniter/uniter_test.go+78 −0 modified@@ -4762,6 +4762,84 @@ func (s *uniterSuite) TestCommitHookChangesWithSecrets(c *gc.C) { c.Assert(info.LatestRevision, gc.Equals, 2) } +func (s *uniterSuite) TestCommitHookChangesRemovesSecretReservations(c *gc.C) { + store := state.NewSecrets(s.State) + + // Reserve some secrets for the unit. + for range 10 { + u := secrets.NewURI() + err := store.ReserveSecret(u, s.wordpressUnit.Tag()) + c.Assert(err, jc.ErrorIsNil) + } + reserved, err := store.ListReservedSecrets([]names.Tag{ + s.wordpressUnit.Tag(), + }) + c.Assert(err, jc.ErrorIsNil) + c.Check(reserved, gc.HasLen, 10) + + // Commit the hook changes. + b := apiuniter.NewCommitHookParamsBuilder(s.wordpressUnit.UnitTag()) + b.UpdateCharmState(map[string]string{"charm-key": "charm-value"}) + req, _ := b.Build() + + result, err := s.uniter.CommitHookChanges(req) + c.Assert(err, jc.ErrorIsNil) + c.Check(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {Error: nil}, + }, + }) + + // Check all the secret reservations were removed. + reserved, err = store.ListReservedSecrets([]names.Tag{s.wordpressUnit.Tag()}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(reserved, gc.HasLen, 0) +} + +func (s *uniterSuite) TestCommitHookChangesExpiresSecretBackendTokens(c *gc.C) { + store := state.NewSecrets(s.State) + now := time.Now() + + // Create some secret backend issued tokens that are not expired yet. + for range 10 { + issuedToken := state.SecretBackendIssuedToken{ + UUID: utils.MustNewUUID().String(), + ExpireTime: now.Add(time.Hour), + BackendID: "backend-id", + Consumer: s.wordpressUnit.Tag(), + } + err := store.CreateSecretBackendIssuedToken(issuedToken) + c.Assert(err, jc.ErrorIsNil) + } + tokens, err := store.ListSecretBackendIssuedTokenUntilForConsumer( + now, s.wordpressUnit.Tag()) + c.Assert(err, jc.ErrorIsNil) + c.Check(tokens, gc.HasLen, 0) + tokens, err = store.ListSecretBackendIssuedTokenUntilForConsumer( + now.Add(time.Hour).Add(time.Minute), s.wordpressUnit.Tag()) + c.Assert(err, jc.ErrorIsNil) + c.Check(tokens, gc.HasLen, 10) + + // Commit the hook changes. + b := apiuniter.NewCommitHookParamsBuilder(s.wordpressUnit.UnitTag()) + b.UpdateCharmState(map[string]string{"charm-key": "charm-value"}) + req, _ := b.Build() + + result, err := s.uniter.CommitHookChanges(req) + c.Assert(err, jc.ErrorIsNil) + c.Check(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {Error: nil}, + }, + }) + + // Check that the secret backend issued tokens are all expired now. + tokens, err = store.ListSecretBackendIssuedTokenUntilForConsumer( + now, s.wordpressUnit.Tag()) + c.Assert(err, jc.ErrorIsNil) + c.Check(tokens, gc.HasLen, 10) +} + func (s *uniterSuite) TestCommitHookChangesWithStorage(c *gc.C) { // We need to set up a unit that has storage metadata defined. ch := s.AddTestingCharm(c, "storage-block2") // supports multiple storage instances
apiserver/facades/client/application/mocks/state_mock.go+145 −0 modified@@ -11,6 +11,7 @@ package mocks import ( reflect "reflect" + time "time" set "github.com/juju/collections/set" network "github.com/juju/juju/core/network" @@ -548,6 +549,20 @@ func (mr *MockSecretsStoreMockRecorder) CreateSecret(arg0, arg1 any) *gomock.Cal return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSecret", reflect.TypeOf((*MockSecretsStore)(nil).CreateSecret), arg0, arg1) } +// CreateSecretBackendIssuedToken mocks base method. +func (m *MockSecretsStore) CreateSecretBackendIssuedToken(arg0 state.SecretBackendIssuedToken) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSecretBackendIssuedToken", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateSecretBackendIssuedToken indicates an expected call of CreateSecretBackendIssuedToken. +func (mr *MockSecretsStoreMockRecorder) CreateSecretBackendIssuedToken(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSecretBackendIssuedToken", reflect.TypeOf((*MockSecretsStore)(nil).CreateSecretBackendIssuedToken), arg0) +} + // DeleteSecret mocks base method. func (m *MockSecretsStore) DeleteSecret(arg0 *secrets.URI, arg1 ...int) ([]secrets.ValueRef, error) { m.ctrl.T.Helper() @@ -568,6 +583,20 @@ func (mr *MockSecretsStoreMockRecorder) DeleteSecret(arg0 any, arg1 ...any) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSecret", reflect.TypeOf((*MockSecretsStore)(nil).DeleteSecret), varargs...) } +// ExpireSecretBackendIssuedTokensForConsumer mocks base method. +func (m *MockSecretsStore) ExpireSecretBackendIssuedTokensForConsumer(arg0 names.Tag) state.ModelOperation { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExpireSecretBackendIssuedTokensForConsumer", arg0) + ret0, _ := ret[0].(state.ModelOperation) + return ret0 +} + +// ExpireSecretBackendIssuedTokensForConsumer indicates an expected call of ExpireSecretBackendIssuedTokensForConsumer. +func (mr *MockSecretsStoreMockRecorder) ExpireSecretBackendIssuedTokensForConsumer(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpireSecretBackendIssuedTokensForConsumer", reflect.TypeOf((*MockSecretsStore)(nil).ExpireSecretBackendIssuedTokensForConsumer), arg0) +} + // GetOwnedSecretMetadataAsApp mocks base method. func (m *MockSecretsStore) GetOwnedSecretMetadataAsApp(arg0 names.ApplicationTag, arg1 *secrets.URI) (*secrets.SecretMetadataOwnerIdent, error) { m.ctrl.T.Helper() @@ -734,6 +763,51 @@ func (mr *MockSecretsStoreMockRecorder) ListModelSecrets(arg0 any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListModelSecrets", reflect.TypeOf((*MockSecretsStore)(nil).ListModelSecrets), arg0) } +// ListReservedSecrets mocks base method. +func (m *MockSecretsStore) ListReservedSecrets(arg0 []names.Tag) ([]*secrets.URI, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListReservedSecrets", arg0) + ret0, _ := ret[0].([]*secrets.URI) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListReservedSecrets indicates an expected call of ListReservedSecrets. +func (mr *MockSecretsStoreMockRecorder) ListReservedSecrets(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListReservedSecrets", reflect.TypeOf((*MockSecretsStore)(nil).ListReservedSecrets), arg0) +} + +// ListSecretBackendIssuedTokenUntil mocks base method. +func (m *MockSecretsStore) ListSecretBackendIssuedTokenUntil(arg0 time.Time) ([]state.SecretBackendIssuedToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretBackendIssuedTokenUntil", arg0) + ret0, _ := ret[0].([]state.SecretBackendIssuedToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecretBackendIssuedTokenUntil indicates an expected call of ListSecretBackendIssuedTokenUntil. +func (mr *MockSecretsStoreMockRecorder) ListSecretBackendIssuedTokenUntil(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretBackendIssuedTokenUntil", reflect.TypeOf((*MockSecretsStore)(nil).ListSecretBackendIssuedTokenUntil), arg0) +} + +// ListSecretBackendIssuedTokenUntilForConsumer mocks base method. +func (m *MockSecretsStore) ListSecretBackendIssuedTokenUntilForConsumer(arg0 time.Time, arg1 names.Tag) ([]state.SecretBackendIssuedToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretBackendIssuedTokenUntilForConsumer", arg0, arg1) + ret0, _ := ret[0].([]state.SecretBackendIssuedToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecretBackendIssuedTokenUntilForConsumer indicates an expected call of ListSecretBackendIssuedTokenUntilForConsumer. +func (mr *MockSecretsStoreMockRecorder) ListSecretBackendIssuedTokenUntilForConsumer(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretBackendIssuedTokenUntilForConsumer", reflect.TypeOf((*MockSecretsStore)(nil).ListSecretBackendIssuedTokenUntilForConsumer), arg0, arg1) +} + // ListSecretRevisions mocks base method. func (m *MockSecretsStore) ListSecretRevisions(arg0 *secrets.URI) ([]*secrets.SecretRevisionMetadata, error) { m.ctrl.T.Helper() @@ -779,6 +853,63 @@ func (mr *MockSecretsStoreMockRecorder) ListUnusedSecretRevisions(arg0 any) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUnusedSecretRevisions", reflect.TypeOf((*MockSecretsStore)(nil).ListUnusedSecretRevisions), arg0) } +// NextSecretBackendIssuedTokenExpiry mocks base method. +func (m *MockSecretsStore) NextSecretBackendIssuedTokenExpiry() (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NextSecretBackendIssuedTokenExpiry") + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NextSecretBackendIssuedTokenExpiry indicates an expected call of NextSecretBackendIssuedTokenExpiry. +func (mr *MockSecretsStoreMockRecorder) NextSecretBackendIssuedTokenExpiry() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextSecretBackendIssuedTokenExpiry", reflect.TypeOf((*MockSecretsStore)(nil).NextSecretBackendIssuedTokenExpiry)) +} + +// RemoveSecretBackendIssuedTokens mocks base method. +func (m *MockSecretsStore) RemoveSecretBackendIssuedTokens(arg0 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveSecretBackendIssuedTokens", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveSecretBackendIssuedTokens indicates an expected call of RemoveSecretBackendIssuedTokens. +func (mr *MockSecretsStoreMockRecorder) RemoveSecretBackendIssuedTokens(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSecretBackendIssuedTokens", reflect.TypeOf((*MockSecretsStore)(nil).RemoveSecretBackendIssuedTokens), arg0) +} + +// RemoveSecretReservations mocks base method. +func (m *MockSecretsStore) RemoveSecretReservations(arg0 names.Tag) state.ModelOperation { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveSecretReservations", arg0) + ret0, _ := ret[0].(state.ModelOperation) + return ret0 +} + +// RemoveSecretReservations indicates an expected call of RemoveSecretReservations. +func (mr *MockSecretsStoreMockRecorder) RemoveSecretReservations(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSecretReservations", reflect.TypeOf((*MockSecretsStore)(nil).RemoveSecretReservations), arg0) +} + +// ReserveSecret mocks base method. +func (m *MockSecretsStore) ReserveSecret(arg0 *secrets.URI, arg1 names.Tag) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReserveSecret", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReserveSecret indicates an expected call of ReserveSecret. +func (mr *MockSecretsStoreMockRecorder) ReserveSecret(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReserveSecret", reflect.TypeOf((*MockSecretsStore)(nil).ReserveSecret), arg0, arg1) +} + // SecretGrants mocks base method. func (m *MockSecretsStore) SecretGrants(arg0 *secrets.URI, arg1 secrets.SecretRole) ([]secrets.AccessInfo, error) { m.ctrl.T.Helper() @@ -853,3 +984,17 @@ func (mr *MockSecretsStoreMockRecorder) WatchRevisionsToPrune(arg0 any) *gomock. mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchRevisionsToPrune", reflect.TypeOf((*MockSecretsStore)(nil).WatchRevisionsToPrune), arg0) } + +// WatchSecretBackendIssuedTokenExpiry mocks base method. +func (m *MockSecretsStore) WatchSecretBackendIssuedTokenExpiry() state.StringsWatcher { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WatchSecretBackendIssuedTokenExpiry") + ret0, _ := ret[0].(state.StringsWatcher) + return ret0 +} + +// WatchSecretBackendIssuedTokenExpiry indicates an expected call of WatchSecretBackendIssuedTokenExpiry. +func (mr *MockSecretsStoreMockRecorder) WatchSecretBackendIssuedTokenExpiry() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchSecretBackendIssuedTokenExpiry", reflect.TypeOf((*MockSecretsStore)(nil).WatchSecretBackendIssuedTokenExpiry)) +}
apiserver/facades/client/applicationoffers/base.go+15 −5 modified@@ -72,6 +72,19 @@ func (api *BaseAPI) modelForName(modelName, ownerName string) (Model, string, bo return model, modelPath, model != nil, nil } +// resolveUserDisplayName resolves the display name of a user. +// External users return an empty display name. +func (api *BaseAPI) resolveUserDisplayName(backend Backend, user names.UserTag) (string, error) { + u, err := backend.User(user) + if errors.Is(err, errors.NotFound) { + // External users don't have display names. + return "", nil + } else if err != nil { + return "", errors.Trace(err) + } + return u.DisplayName(), nil +} + // applicationOffersFromModel gets details about remote applications that match given filters. func (api *BaseAPI) applicationOffersFromModel( modelUUID string, @@ -103,12 +116,9 @@ func (api *BaseAPI) applicationOffersFromModel( return nil, errors.Trace(err) } - var apiUserDisplayName string - u, err := backend.User(user) - if err != nil && !errors.Is(err, errors.NotFound) { + apiUserDisplayName, err := api.resolveUserDisplayName(backend, user) + if err != nil { return nil, errors.Trace(err) - } else if err == nil { - apiUserDisplayName = u.DisplayName() } var results []params.ApplicationOfferAdminDetailsV5
apiserver/facades/client/client/status.go+53 −12 modified@@ -228,7 +228,6 @@ func (c *Client) FullStatus(args params.StatusParams) (params.FullStatus, error) var noStatus params.FullStatus var context statusContext - context.cachedModel = c.modelCache context.appCharmCache = map[string]string{} m, err := c.stateAccessor.Model() @@ -678,7 +677,6 @@ type applicationStatusInfo struct { type statusContext struct { providerType string - cachedModel *cache.Model model *state.Model status *state.ModelStatus presence common.ModelPresenceContext @@ -1417,21 +1415,64 @@ func (context *statusContext) processApplication(application *state.Application) return processedStatus } units := context.allAppsUnitsCharmBindings.units[application.Name()] + + expectWorkload := false + if context.model.Type() == state.ModelTypeCAAS { + if charm.MetaFormat(applicationCharm) == charm.FormatV1 { + cm, err := context.model.CAASModel() + if err != nil { + processedStatus.Err = apiservererrors.ServerError(err) + return processedStatus + } + _, err = cm.PodSpec(application.ApplicationTag()) + if err != nil && !errors.Is(err, errors.NotFound) { + processedStatus.Err = apiservererrors.ServerError(err) + return processedStatus + } + expectWorkload = err == nil + } + } if application.IsPrincipal() { - expectWorkload, err := state.CheckApplicationExpectsWorkload(context.model, application.Name()) - if err != nil { - return params.ApplicationStatus{Err: apiservererrors.ServerError(err)} + processedStatus.Units = context.processUnits( + units, applicationCharm.URL(), expectWorkload) + } + + applicationStatus, err := application.Status() + if err != nil && !errors.Is(err, errors.NotFound) { + processedStatus.Err = apiservererrors.ServerError(err) + return processedStatus + } + if applicationStatus.Status == status.Unset { + // Derive the application status from the non-presence affected unit + // workload status + statuses := make([]status.StatusInfo, 0, len(units)) + for unitName := range units { + status, err := context.status.UnitWorkload(unitName, expectWorkload) + if errors.Is(err, errors.NotFound) { + continue + } else if err != nil { + processedStatus.Err = apiservererrors.ServerError(err) + return processedStatus + } + statuses = append(statuses, status) + } + derivedApplicationStatus := status.DeriveStatus(statuses) + if derivedApplicationStatus.Since == nil { + derivedApplicationStatus.Since = applicationStatus.Since } - processedStatus.Units = context.processUnits(units, applicationCharm.URL(), expectWorkload) + applicationStatus = derivedApplicationStatus } - // If for whatever reason the application isn't yet in the cache, - // we have an unknown status. - applicationStatus := status.StatusInfo{Status: status.Unknown} - cachedApp, err := context.cachedModel.Application(application.Name()) - if err == nil { - applicationStatus = cachedApp.DisplayStatus() + if context.model.Type() == state.ModelTypeCAAS { + operatorStatus, err := application.OperatorStatus() + if err != nil && !errors.Is(err, errors.NotFound) { + processedStatus.Err = apiservererrors.ServerError(err) + return processedStatus + } + applicationStatus = status.ApplicationDisplayStatus( + applicationStatus, operatorStatus, expectWorkload) } + processedStatus.Status.Status = applicationStatus.Status.String() processedStatus.Status.Info = applicationStatus.Message processedStatus.Status.Data = applicationStatus.Data
apiserver/facades/client/modelconfig/mocks/secretsprovider.go+33 −4 modified@@ -42,6 +42,21 @@ func (m *MockSecretBackendProvider) EXPECT() *MockSecretBackendProviderMockRecor return m.recorder } +// CleanupIssuedTokens mocks base method. +func (m *MockSecretBackendProvider) CleanupIssuedTokens(arg0 *provider.ModelBackendConfig, arg1 []string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupIssuedTokens", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CleanupIssuedTokens indicates an expected call of CleanupIssuedTokens. +func (mr *MockSecretBackendProviderMockRecorder) CleanupIssuedTokens(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupIssuedTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupIssuedTokens), arg0, arg1) +} + // CleanupModel mocks base method. func (m *MockSecretBackendProvider) CleanupModel(arg0 *provider.ModelBackendConfig) error { m.ctrl.T.Helper() @@ -84,6 +99,20 @@ func (mr *MockSecretBackendProviderMockRecorder) Initialise(arg0 any) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialise", reflect.TypeOf((*MockSecretBackendProvider)(nil).Initialise), arg0) } +// IssuesTokens mocks base method. +func (m *MockSecretBackendProvider) IssuesTokens() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IssuesTokens") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IssuesTokens indicates an expected call of IssuesTokens. +func (mr *MockSecretBackendProviderMockRecorder) IssuesTokens() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssuesTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).IssuesTokens)) +} + // NewBackend mocks base method. func (m *MockSecretBackendProvider) NewBackend(arg0 *provider.ModelBackendConfig) (provider.SecretsBackend, error) { m.ctrl.T.Helper() @@ -100,18 +129,18 @@ func (mr *MockSecretBackendProviderMockRecorder) NewBackend(arg0 any) *gomock.Ca } // RestrictedConfig mocks base method. -func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 names.Tag, arg4, arg5 provider.SecretRevisions) (*provider.BackendConfig, error) { +func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 string, arg4 names.Tag, arg5 []string, arg6, arg7 provider.SecretRevisions) (*provider.BackendConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(*provider.BackendConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // RestrictedConfig indicates an expected call of RestrictedConfig. -func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { +func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // Type mocks base method.
apiserver/facades/client/secretbackends/mocks/provider_mock.go+33 −4 modified@@ -42,6 +42,21 @@ func (m *MockSecretBackendProvider) EXPECT() *MockSecretBackendProviderMockRecor return m.recorder } +// CleanupIssuedTokens mocks base method. +func (m *MockSecretBackendProvider) CleanupIssuedTokens(arg0 *provider.ModelBackendConfig, arg1 []string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupIssuedTokens", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CleanupIssuedTokens indicates an expected call of CleanupIssuedTokens. +func (mr *MockSecretBackendProviderMockRecorder) CleanupIssuedTokens(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupIssuedTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupIssuedTokens), arg0, arg1) +} + // CleanupModel mocks base method. func (m *MockSecretBackendProvider) CleanupModel(arg0 *provider.ModelBackendConfig) error { m.ctrl.T.Helper() @@ -84,6 +99,20 @@ func (mr *MockSecretBackendProviderMockRecorder) Initialise(arg0 any) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialise", reflect.TypeOf((*MockSecretBackendProvider)(nil).Initialise), arg0) } +// IssuesTokens mocks base method. +func (m *MockSecretBackendProvider) IssuesTokens() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IssuesTokens") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IssuesTokens indicates an expected call of IssuesTokens. +func (mr *MockSecretBackendProviderMockRecorder) IssuesTokens() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssuesTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).IssuesTokens)) +} + // NewBackend mocks base method. func (m *MockSecretBackendProvider) NewBackend(arg0 *provider.ModelBackendConfig) (provider.SecretsBackend, error) { m.ctrl.T.Helper() @@ -100,18 +129,18 @@ func (mr *MockSecretBackendProviderMockRecorder) NewBackend(arg0 any) *gomock.Ca } // RestrictedConfig mocks base method. -func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 names.Tag, arg4, arg5 provider.SecretRevisions) (*provider.BackendConfig, error) { +func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 string, arg4 names.Tag, arg5 []string, arg6, arg7 provider.SecretRevisions) (*provider.BackendConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(*provider.BackendConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // RestrictedConfig indicates an expected call of RestrictedConfig. -func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { +func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // Type mocks base method.
apiserver/facades/client/secretbackends/state.go+2 −0 modified@@ -22,6 +22,8 @@ type SecretsBackendState interface { GetSecretBackendByID(ID string) (*secrets.SecretBackend, error) } +// SecretsState provides the secrets backends client facade access to list model +// secrets. type SecretsState interface { ListModelSecrets(all bool) (map[string]set.Strings, error) }
apiserver/facades/client/secrets/mocks/secretsbackend.go+33 −4 modified@@ -123,6 +123,21 @@ func (m *MockSecretBackendProvider) EXPECT() *MockSecretBackendProviderMockRecor return m.recorder } +// CleanupIssuedTokens mocks base method. +func (m *MockSecretBackendProvider) CleanupIssuedTokens(arg0 *provider.ModelBackendConfig, arg1 []string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupIssuedTokens", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CleanupIssuedTokens indicates an expected call of CleanupIssuedTokens. +func (mr *MockSecretBackendProviderMockRecorder) CleanupIssuedTokens(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupIssuedTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupIssuedTokens), arg0, arg1) +} + // CleanupModel mocks base method. func (m *MockSecretBackendProvider) CleanupModel(arg0 *provider.ModelBackendConfig) error { m.ctrl.T.Helper() @@ -165,6 +180,20 @@ func (mr *MockSecretBackendProviderMockRecorder) Initialise(arg0 any) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialise", reflect.TypeOf((*MockSecretBackendProvider)(nil).Initialise), arg0) } +// IssuesTokens mocks base method. +func (m *MockSecretBackendProvider) IssuesTokens() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IssuesTokens") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IssuesTokens indicates an expected call of IssuesTokens. +func (mr *MockSecretBackendProviderMockRecorder) IssuesTokens() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssuesTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).IssuesTokens)) +} + // NewBackend mocks base method. func (m *MockSecretBackendProvider) NewBackend(arg0 *provider.ModelBackendConfig) (provider.SecretsBackend, error) { m.ctrl.T.Helper() @@ -181,18 +210,18 @@ func (mr *MockSecretBackendProviderMockRecorder) NewBackend(arg0 any) *gomock.Ca } // RestrictedConfig mocks base method. -func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 names.Tag, arg4, arg5 provider.SecretRevisions) (*provider.BackendConfig, error) { +func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 string, arg4 names.Tag, arg5 []string, arg6, arg7 provider.SecretRevisions) (*provider.BackendConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(*provider.BackendConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // RestrictedConfig indicates an expected call of RestrictedConfig. -func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { +func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // Type mocks base method.
apiserver/facades/client/secrets/mocks/secretsstate.go+14 −0 modified@@ -167,6 +167,20 @@ func (mr *MockSecretsStateMockRecorder) ListUnusedSecretRevisions(arg0 any) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUnusedSecretRevisions", reflect.TypeOf((*MockSecretsState)(nil).ListUnusedSecretRevisions), arg0) } +// ReserveSecret mocks base method. +func (m *MockSecretsState) ReserveSecret(arg0 *secrets.URI, arg1 names.Tag) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReserveSecret", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReserveSecret indicates an expected call of ReserveSecret. +func (mr *MockSecretsStateMockRecorder) ReserveSecret(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReserveSecret", reflect.TypeOf((*MockSecretsState)(nil).ReserveSecret), arg0, arg1) +} + // SecretGrants mocks base method. func (m *MockSecretsState) SecretGrants(arg0 *secrets.URI, arg1 secrets.SecretRole) ([]secrets.AccessInfo, error) { m.ctrl.T.Helper()
apiserver/facades/client/secrets/package_test.go+2 −1 modified@@ -11,6 +11,7 @@ import ( apiservererrors "github.com/juju/juju/apiserver/errors" "github.com/juju/juju/apiserver/facade" + coresecrets "github.com/juju/juju/core/secrets" "github.com/juju/juju/secrets/provider" coretesting "github.com/juju/juju/testing" ) @@ -27,7 +28,7 @@ func NewTestAPI( secretsState SecretsState, secretsConsumer SecretsConsumer, adminBackendConfigGetter func() (*provider.ModelBackendConfigInfo, error), - backendConfigGetterForUserSecretsWrite func(backendID string) (*provider.ModelBackendConfigInfo, error), + backendConfigGetterForUserSecretsWrite func(string, []*coresecrets.URI) (*provider.ModelBackendConfigInfo, error), backendGetter func(*provider.ModelBackendConfig) (provider.SecretsBackend, error), ) (*SecretsAPI, error) { if !authorizer.AuthClient() {
apiserver/facades/client/secrets/register.go+8 −2 modified@@ -11,6 +11,7 @@ import ( "github.com/juju/juju/apiserver/common/secrets" apiservererrors "github.com/juju/juju/apiserver/errors" "github.com/juju/juju/apiserver/facade" + coresecrets "github.com/juju/juju/core/secrets" "github.com/juju/juju/secrets/provider" "github.com/juju/juju/state" ) @@ -50,10 +51,15 @@ func newSecretsAPI(context facade.Context) (*SecretsAPI, error) { adminBackendConfigGetter := func() (*provider.ModelBackendConfigInfo, error) { return secrets.AdminBackendConfigInfo(secrets.SecretsModel(model)) } - backendConfigGetterForUserSecretsWrite := func(backendID string) (*provider.ModelBackendConfigInfo, error) { + backendConfigGetterForUserSecretsWrite := func( + backendID string, only []*coresecrets.URI, + ) (*provider.ModelBackendConfigInfo, error) { // User secrets are owned by the model. authTag := model.ModelTag() - return secrets.BackendConfigInfo(secrets.SecretsModel(model), true, []string{backendID}, false, authTag, leadershipChecker) + return secrets.BackendConfigInfo( + secrets.SecretsModel(model), true, []string{backendID}, false, + authTag, leadershipChecker, only, + ) } backendGetter := func(cfg *provider.ModelBackendConfig) (provider.SecretsBackend, error) {
apiserver/facades/client/secrets/secrets.go+89 −51 modified@@ -41,7 +41,7 @@ type SecretsAPI struct { secretsConsumer SecretsConsumer adminBackendConfigGetter func() (*provider.ModelBackendConfigInfo, error) - backendConfigGetterForUserSecretsWrite func(backendID string) (*provider.ModelBackendConfigInfo, error) + backendConfigGetterForUserSecretsWrite func(backendID string, only []*coresecrets.URI) (*provider.ModelBackendConfigInfo, error) backendGetter func(*provider.ModelBackendConfig) (provider.SecretsBackend, error) } @@ -81,17 +81,16 @@ func (s *SecretsAPI) ListSecrets(arg params.ListSecretsArgs) (params.ListSecretR return result, errors.Trace(err) } } - var uri *coresecrets.URI + filter := state.SecretsFilter{} if arg.Filter.URI != nil { - var err error - uri, err = coresecrets.ParseURI(*arg.Filter.URI) + uri, err := coresecrets.ParseURI(*arg.Filter.URI) if err != nil { return params.ListSecretResults{}, errors.Trace(err) } + filter.URIs = append(filter.URIs, uri) } - filter := state.SecretsFilter{ - URI: uri, - Label: arg.Filter.Label, + if arg.Filter.Label != nil { + filter.Labels = append(filter.Labels, *arg.Filter.Label) } if arg.Filter.OwnerTag != nil { tag, err := names.ParseTag(*arg.Filter.OwnerTag) @@ -241,13 +240,18 @@ func (s *SecretsAPI) secretContentFromBackend(uri *coresecrets.URI, rev int) (co } } -func (s *SecretsAPI) getBackendForUserSecretsWrite() (provider.SecretsBackend, error) { +// getBackendForUserSecretsWrite returns the secret backend for user secrets, +// optionally limited to the list of secrets if a non-zero number is supplied. +func (s *SecretsAPI) getBackendForUserSecretsWrite( + only []*coresecrets.URI, +) (provider.SecretsBackend, error) { if s.activeBackendID == "" { if err := s.getBackendInfo(); err != nil { return nil, errors.Trace(err) } } - cfgInfo, err := s.backendConfigGetterForUserSecretsWrite(s.activeBackendID) + cfgInfo, err := s.backendConfigGetterForUserSecretsWrite( + s.activeBackendID, only) if err != nil { return nil, errors.Trace(err) } @@ -270,13 +274,53 @@ func (s *SecretsAPI) CreateSecrets(args params.CreateSecretArgs) (params.StringR if err := s.checkCanWrite(); err != nil { return result, errors.Trace(err) } - backend, err := s.getBackendForUserSecretsWrite() + + // Validate secrets before generating a secret URI. + for i, arg := range args.Args { + var err error + if arg.URI != nil { + err = errors.NotValidf( + "secret uri cannot be set on user secret create", + ) + } + if arg.OwnerTag != "" && arg.OwnerTag != s.modelUUID { + err = errors.NotValidf("owner tag %q", arg.OwnerTag) + } + if len(arg.Content.Data) == 0 { + err = errors.NotValidf("empty secret value") + } + if err != nil { + result.Results[i].Error = apiservererrors.ServerError(err) + } + } + + secretOwner := names.NewModelTag(s.modelUUID) + uris := make([]*coresecrets.URI, len(args.Args)) + // Generate secret URIs + for i := range args.Args { + if result.Results[i].Error != nil { + continue + } + + uri := coresecrets.NewURI() + err := s.secretsState.ReserveSecret(uri, secretOwner) + if err != nil { + result.Results[i].Error = apiservererrors.ServerError(err) + continue + } + uris[i] = uri + } + + backend, err := s.getBackendForUserSecretsWrite(uris) if err != nil { return result, errors.Trace(err) } for i, arg := range args.Args { - ID, err := s.createSecret(backend, arg) - result.Results[i].Result = ID + if result.Results[i].Error != nil { + continue + } + var err error + result.Results[i].Result, err = s.createSecret(backend, arg, uris[i]) if errors.Is(err, state.LabelExists) { err = errors.AlreadyExistsf("secret with name %q", *arg.Label) } @@ -292,44 +336,32 @@ func (t successfulToken) Check() error { return nil } -func (s *SecretsAPI) createSecret(backend provider.SecretsBackend, arg params.CreateSecretArg) (_ string, errOut error) { - if arg.OwnerTag != "" && arg.OwnerTag != s.modelUUID { - return "", errors.NotValidf("owner tag %q", arg.OwnerTag) - } +func (s *SecretsAPI) createSecret( + backend provider.SecretsBackend, + arg params.CreateSecretArg, + uri *coresecrets.URI, +) (_ string, errOut error) { secretOwner := names.NewModelTag(s.modelUUID) - var uri *coresecrets.URI - var err error - if arg.URI != nil { - uri, err = coresecrets.ParseURI(*arg.URI) - if err != nil { - return "", errors.Trace(err) - } - } else { - uri = coresecrets.NewURI() - } - if len(arg.Content.Data) == 0 { - return "", errors.NotValidf("empty secret value") - } v := coresecrets.NewSecretValue(arg.Content.Data) checksum, err := v.Checksum() if err != nil { return "", errors.Annotate(err, "calculating secret checksum") } arg.UpsertSecretArg.Content.Checksum = checksum + revId, err := backend.SaveContent(context.TODO(), uri, 1, coresecrets.NewSecretValue(arg.Content.Data)) if err != nil && !errors.Is(err, errors.NotSupported) { return "", errors.Trace(err) - } - if err == nil { + } else if err == nil { defer func() { if errOut != nil { // If we failed to create the secret, we should delete the // secret value from the backend. - if err2 := backend.DeleteContent(context.TODO(), revId); err2 != nil && - !errors.Is(err2, errors.NotSupported) && - !errors.Is(err2, errors.NotFound) { - logger.Errorf("failed to delete secret %q: %v", revId, err2) + if err := backend.DeleteContent(context.TODO(), revId); err != nil && + !errors.Is(err, errors.NotSupported) && + !errors.Is(err, errors.NotFound) { + logger.Errorf("failed to delete secret %q: %v", revId, err) } } }() @@ -400,12 +432,26 @@ func (s *SecretsAPI) UpdateSecrets(args params.UpdateUserSecretArgs) (params.Err if err := s.checkCanWrite(); err != nil { return result, errors.Trace(err) } - backend, err := s.getBackendForUserSecretsWrite() + + uris := make([]*coresecrets.URI, len(args.Args)) + for i, arg := range args.Args { + var err error + if arg.URI != "" { + uris[i], err = coresecrets.ParseURI(arg.URI) + } else { + uris[i], err = s.getSecretURI(s.modelUUID, arg.ExistingLabel) + } + if err != nil { + result.Results[i].Error = apiservererrors.ServerError(err) + } + } + + backend, err := s.getBackendForUserSecretsWrite(uris) if err != nil { return result, errors.Trace(err) } for i, arg := range args.Args { - err := s.updateSecret(backend, arg) + err := s.updateSecret(backend, arg, uris[i]) if errors.Is(err, state.LabelExists) { err = errors.AlreadyExistsf("secret with name %q", *arg.Label) } @@ -414,22 +460,14 @@ func (s *SecretsAPI) UpdateSecrets(args params.UpdateUserSecretArgs) (params.Err return result, nil } -func (s *SecretsAPI) updateSecret(backend provider.SecretsBackend, arg params.UpdateUserSecretArg) (errOut error) { +func (s *SecretsAPI) updateSecret( + backend provider.SecretsBackend, + arg params.UpdateUserSecretArg, + uri *coresecrets.URI, +) (errOut error) { if err := arg.Validate(); err != nil { return errors.Trace(err) } - var ( - uri *coresecrets.URI - err error - ) - if arg.URI != "" { - uri, err = coresecrets.ParseURI(arg.URI) - } else { - uri, err = s.getSecretURI(s.modelUUID, arg.ExistingLabel) - } - if err != nil { - return errors.Trace(err) - } md, err := s.secretsState.GetSecret(uri) if err != nil { @@ -557,7 +595,7 @@ type grantRevokeFunc func(*coresecrets.URI, state.SecretAccessParams) error func (s *SecretsAPI) getSecretURI(modelUUID, label string) (*coresecrets.URI, error) { results, err := s.secretsState.ListSecrets(state.SecretsFilter{ - Label: &label, + Labels: []string{label}, OwnerTags: []names.Tag{names.NewModelTag(modelUUID)}, }) if err != nil {
apiserver/facades/client/secrets/secrets_test.go+14 −9 modified@@ -68,8 +68,8 @@ func adminBackendConfigGetter() (*provider.ModelBackendConfigInfo, error) { }, nil } -func backendConfigGetterForUserSecretsWrite(c *gc.C) func(backendID string) (*provider.ModelBackendConfigInfo, error) { - return func(backendID string) (*provider.ModelBackendConfigInfo, error) { +func backendConfigGetterForUserSecretsWrite(c *gc.C) func(string, []*coresecrets.URI) (*provider.ModelBackendConfigInfo, error) { + return func(backendID string, _ []*coresecrets.URI) (*provider.ModelBackendConfigInfo, error) { c.Assert(backendID, gc.Equals, "backend-id") return &provider.ModelBackendConfigInfo{ ActiveID: "backend-id", @@ -326,13 +326,19 @@ func (s *SecretsSuite) assertCreateSecrets(c *gc.C, isInternal bool, finalStepFa s.expectAuthClient() s.authorizer.EXPECT().HasPermission(permission.WriteAccess, coretesting.ModelTag).Return(nil) - uri := coresecrets.NewURI() - uriStrPtr := ptr(uri.String()) + var uri *coresecrets.URI + s.secretsState.EXPECT().ReserveSecret( + gomock.Any(), coretesting.ModelTag, + ).DoAndReturn(func(arg1 *coresecrets.URI, owner names.Tag) error { + uri = arg1 + return nil + }) + if isInternal { - s.secretsBackend.EXPECT().SaveContent(gomock.Any(), uri, 1, coresecrets.NewSecretValue(map[string]string{"foo": "bar"})). + s.secretsBackend.EXPECT().SaveContent(gomock.Any(), gomock.Any(), 1, coresecrets.NewSecretValue(map[string]string{"foo": "bar"})). Return("", errors.NotSupportedf("not supported")) } else { - s.secretsBackend.EXPECT().SaveContent(gomock.Any(), uri, 1, coresecrets.NewSecretValue(map[string]string{"foo": "bar"})). + s.secretsBackend.EXPECT().SaveContent(gomock.Any(), gomock.Any(), 1, coresecrets.NewSecretValue(map[string]string{"foo": "bar"})). Return("rev-id", nil) } s.secretsState.EXPECT().CreateSecret(gomock.Any(), gomock.Any()).DoAndReturn(func(arg1 *coresecrets.URI, params state.CreateSecretParams) (*coresecrets.SecretMetadata, error) { @@ -381,7 +387,6 @@ func (s *SecretsSuite) assertCreateSecrets(c *gc.C, isInternal bool, finalStepFa Args: []params.CreateSecretArg{ { OwnerTag: coretesting.ModelTag.Id(), - URI: uriStrPtr, UpsertSecretArg: params.UpsertSecretArg{ Description: ptr("this is a user secret."), Label: ptr("label"), @@ -425,7 +430,7 @@ func (s *SecretsSuite) assertUpdateSecrets(c *gc.C, uri *coresecrets.URI, isInte existingLabel = "my-secret" uri = coresecrets.NewURI() s.secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - Label: ptr("my-secret"), + Labels: []string{"my-secret"}, OwnerTags: []names.Tag{coretesting.ModelTag}, }).Return([]*coresecrets.SecretMetadata{{ URI: uri, @@ -847,7 +852,7 @@ func (s *SecretsSuite) TestGrantSecretByName(c *gc.C) { uri := coresecrets.NewURI() s.secretsState.EXPECT().ListSecrets(state.SecretsFilter{ - Label: ptr("my-secret"), + Labels: []string{"my-secret"}, OwnerTags: []names.Tag{coretesting.ModelTag}, }).Return([]*coresecrets.SecretMetadata{{ URI: uri,
apiserver/facades/client/secrets/state.go+1 −0 modified@@ -12,6 +12,7 @@ import ( // SecretsState instances provide secret apis. type SecretsState interface { + ReserveSecret(*secrets.URI, names.Tag) error CreateSecret(*secrets.URI, state.CreateSecretParams) (*secrets.SecretMetadata, error) UpdateSecret(*secrets.URI, state.UpdateSecretParams) (*secrets.SecretMetadata, error) GetSecret(uri *secrets.URI) (*secrets.SecretMetadata, error)
apiserver/facades/controller/crossmodelsecrets/register.go+1 −1 modified@@ -52,7 +52,7 @@ func newStateCrossModelSecretsAPI(ctx facade.Context) (*CrossModelSecretsAPI, er return nil, errors.Trace(err) } defer closer.Release() - return secrets.BackendConfigInfo(secrets.SecretsModel(model), sameController, []string{backendID}, false, consumer, leadershipChecker) + return secrets.BackendConfigInfo(secrets.SecretsModel(model), sameController, []string{backendID}, false, consumer, leadershipChecker, nil) } secretInfoGetter := func(modelUUID string) (SecretsState, SecretsConsumer, func() bool, error) { st, err := ctx.StatePool().Get(modelUUID)
apiserver/facades/controller/secretbackendmanager/mocks/secretsprovider.go+33 −4 modified@@ -40,6 +40,21 @@ func (m *MockSecretBackendProvider) EXPECT() *MockSecretBackendProviderMockRecor return m.recorder } +// CleanupIssuedTokens mocks base method. +func (m *MockSecretBackendProvider) CleanupIssuedTokens(arg0 *provider.ModelBackendConfig, arg1 []string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupIssuedTokens", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CleanupIssuedTokens indicates an expected call of CleanupIssuedTokens. +func (mr *MockSecretBackendProviderMockRecorder) CleanupIssuedTokens(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupIssuedTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupIssuedTokens), arg0, arg1) +} + // CleanupModel mocks base method. func (m *MockSecretBackendProvider) CleanupModel(arg0 *provider.ModelBackendConfig) error { m.ctrl.T.Helper() @@ -82,6 +97,20 @@ func (mr *MockSecretBackendProviderMockRecorder) Initialise(arg0 any) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialise", reflect.TypeOf((*MockSecretBackendProvider)(nil).Initialise), arg0) } +// IssuesTokens mocks base method. +func (m *MockSecretBackendProvider) IssuesTokens() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IssuesTokens") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IssuesTokens indicates an expected call of IssuesTokens. +func (mr *MockSecretBackendProviderMockRecorder) IssuesTokens() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssuesTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).IssuesTokens)) +} + // NewBackend mocks base method. func (m *MockSecretBackendProvider) NewBackend(arg0 *provider.ModelBackendConfig) (provider.SecretsBackend, error) { m.ctrl.T.Helper() @@ -98,18 +127,18 @@ func (mr *MockSecretBackendProviderMockRecorder) NewBackend(arg0 any) *gomock.Ca } // RestrictedConfig mocks base method. -func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 names.Tag, arg4, arg5 provider.SecretRevisions) (*provider.BackendConfig, error) { +func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 string, arg4 names.Tag, arg5 []string, arg6, arg7 provider.SecretRevisions) (*provider.BackendConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(*provider.BackendConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // RestrictedConfig indicates an expected call of RestrictedConfig. -func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { +func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // Type mocks base method.
apiserver/facades/controller/secretsrevoker/doc.go+6 −0 added@@ -0,0 +1,6 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package secretsrevoker provides the facade for revoking secret backend issued +// tokens. This facade can be removed in Juju 4.0, replaced with a removal job. +package secretsrevoker
apiserver/facades/controller/secretsrevoker/getters.go+14 −0 added@@ -0,0 +1,14 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker + +import ( + secretsprovider "github.com/juju/juju/secrets/provider" +) + +// Getters is used for mocks only. +type Getters interface { + BackendConfigGetter() (*secretsprovider.ModelBackendConfigInfo, error) + ProviderGetter(backendType string) (secretsprovider.SecretBackendProvider, error) +}
apiserver/facades/controller/secretsrevoker/mocks_test.go+297 −0 added@@ -0,0 +1,297 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/juju/juju/apiserver/facades/controller/secretsrevoker (interfaces: SecretsState,Getters) +// +// Generated by this command: +// +// mockgen -typed -package secretsrevoker -destination mocks_test.go github.com/juju/juju/apiserver/facades/controller/secretsrevoker SecretsState,Getters +// + +// Package secretsrevoker is a generated GoMock package. +package secretsrevoker + +import ( + reflect "reflect" + time "time" + + provider "github.com/juju/juju/secrets/provider" + state "github.com/juju/juju/state" + gomock "go.uber.org/mock/gomock" +) + +// MockSecretsState is a mock of SecretsState interface. +type MockSecretsState struct { + ctrl *gomock.Controller + recorder *MockSecretsStateMockRecorder +} + +// MockSecretsStateMockRecorder is the mock recorder for MockSecretsState. +type MockSecretsStateMockRecorder struct { + mock *MockSecretsState +} + +// NewMockSecretsState creates a new mock instance. +func NewMockSecretsState(ctrl *gomock.Controller) *MockSecretsState { + mock := &MockSecretsState{ctrl: ctrl} + mock.recorder = &MockSecretsStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSecretsState) EXPECT() *MockSecretsStateMockRecorder { + return m.recorder +} + +// ListSecretBackendIssuedTokenUntil mocks base method. +func (m *MockSecretsState) ListSecretBackendIssuedTokenUntil(arg0 time.Time) ([]state.SecretBackendIssuedToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretBackendIssuedTokenUntil", arg0) + ret0, _ := ret[0].([]state.SecretBackendIssuedToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecretBackendIssuedTokenUntil indicates an expected call of ListSecretBackendIssuedTokenUntil. +func (mr *MockSecretsStateMockRecorder) ListSecretBackendIssuedTokenUntil(arg0 any) *MockSecretsStateListSecretBackendIssuedTokenUntilCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretBackendIssuedTokenUntil", reflect.TypeOf((*MockSecretsState)(nil).ListSecretBackendIssuedTokenUntil), arg0) + return &MockSecretsStateListSecretBackendIssuedTokenUntilCall{Call: call} +} + +// MockSecretsStateListSecretBackendIssuedTokenUntilCall wrap *gomock.Call +type MockSecretsStateListSecretBackendIssuedTokenUntilCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretsStateListSecretBackendIssuedTokenUntilCall) Return(arg0 []state.SecretBackendIssuedToken, arg1 error) *MockSecretsStateListSecretBackendIssuedTokenUntilCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretsStateListSecretBackendIssuedTokenUntilCall) Do(f func(time.Time) ([]state.SecretBackendIssuedToken, error)) *MockSecretsStateListSecretBackendIssuedTokenUntilCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretsStateListSecretBackendIssuedTokenUntilCall) DoAndReturn(f func(time.Time) ([]state.SecretBackendIssuedToken, error)) *MockSecretsStateListSecretBackendIssuedTokenUntilCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// NextSecretBackendIssuedTokenExpiry mocks base method. +func (m *MockSecretsState) NextSecretBackendIssuedTokenExpiry() (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NextSecretBackendIssuedTokenExpiry") + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NextSecretBackendIssuedTokenExpiry indicates an expected call of NextSecretBackendIssuedTokenExpiry. +func (mr *MockSecretsStateMockRecorder) NextSecretBackendIssuedTokenExpiry() *MockSecretsStateNextSecretBackendIssuedTokenExpiryCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextSecretBackendIssuedTokenExpiry", reflect.TypeOf((*MockSecretsState)(nil).NextSecretBackendIssuedTokenExpiry)) + return &MockSecretsStateNextSecretBackendIssuedTokenExpiryCall{Call: call} +} + +// MockSecretsStateNextSecretBackendIssuedTokenExpiryCall wrap *gomock.Call +type MockSecretsStateNextSecretBackendIssuedTokenExpiryCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretsStateNextSecretBackendIssuedTokenExpiryCall) Return(arg0 time.Time, arg1 error) *MockSecretsStateNextSecretBackendIssuedTokenExpiryCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretsStateNextSecretBackendIssuedTokenExpiryCall) Do(f func() (time.Time, error)) *MockSecretsStateNextSecretBackendIssuedTokenExpiryCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretsStateNextSecretBackendIssuedTokenExpiryCall) DoAndReturn(f func() (time.Time, error)) *MockSecretsStateNextSecretBackendIssuedTokenExpiryCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// RemoveSecretBackendIssuedTokens mocks base method. +func (m *MockSecretsState) RemoveSecretBackendIssuedTokens(arg0 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveSecretBackendIssuedTokens", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveSecretBackendIssuedTokens indicates an expected call of RemoveSecretBackendIssuedTokens. +func (mr *MockSecretsStateMockRecorder) RemoveSecretBackendIssuedTokens(arg0 any) *MockSecretsStateRemoveSecretBackendIssuedTokensCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSecretBackendIssuedTokens", reflect.TypeOf((*MockSecretsState)(nil).RemoveSecretBackendIssuedTokens), arg0) + return &MockSecretsStateRemoveSecretBackendIssuedTokensCall{Call: call} +} + +// MockSecretsStateRemoveSecretBackendIssuedTokensCall wrap *gomock.Call +type MockSecretsStateRemoveSecretBackendIssuedTokensCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretsStateRemoveSecretBackendIssuedTokensCall) Return(arg0 error) *MockSecretsStateRemoveSecretBackendIssuedTokensCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretsStateRemoveSecretBackendIssuedTokensCall) Do(f func([]string) error) *MockSecretsStateRemoveSecretBackendIssuedTokensCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretsStateRemoveSecretBackendIssuedTokensCall) DoAndReturn(f func([]string) error) *MockSecretsStateRemoveSecretBackendIssuedTokensCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WatchSecretBackendIssuedTokenExpiry mocks base method. +func (m *MockSecretsState) WatchSecretBackendIssuedTokenExpiry() state.StringsWatcher { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WatchSecretBackendIssuedTokenExpiry") + ret0, _ := ret[0].(state.StringsWatcher) + return ret0 +} + +// WatchSecretBackendIssuedTokenExpiry indicates an expected call of WatchSecretBackendIssuedTokenExpiry. +func (mr *MockSecretsStateMockRecorder) WatchSecretBackendIssuedTokenExpiry() *MockSecretsStateWatchSecretBackendIssuedTokenExpiryCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchSecretBackendIssuedTokenExpiry", reflect.TypeOf((*MockSecretsState)(nil).WatchSecretBackendIssuedTokenExpiry)) + return &MockSecretsStateWatchSecretBackendIssuedTokenExpiryCall{Call: call} +} + +// MockSecretsStateWatchSecretBackendIssuedTokenExpiryCall wrap *gomock.Call +type MockSecretsStateWatchSecretBackendIssuedTokenExpiryCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretsStateWatchSecretBackendIssuedTokenExpiryCall) Return(arg0 state.StringsWatcher) *MockSecretsStateWatchSecretBackendIssuedTokenExpiryCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretsStateWatchSecretBackendIssuedTokenExpiryCall) Do(f func() state.StringsWatcher) *MockSecretsStateWatchSecretBackendIssuedTokenExpiryCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretsStateWatchSecretBackendIssuedTokenExpiryCall) DoAndReturn(f func() state.StringsWatcher) *MockSecretsStateWatchSecretBackendIssuedTokenExpiryCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// MockGetters is a mock of Getters interface. +type MockGetters struct { + ctrl *gomock.Controller + recorder *MockGettersMockRecorder +} + +// MockGettersMockRecorder is the mock recorder for MockGetters. +type MockGettersMockRecorder struct { + mock *MockGetters +} + +// NewMockGetters creates a new mock instance. +func NewMockGetters(ctrl *gomock.Controller) *MockGetters { + mock := &MockGetters{ctrl: ctrl} + mock.recorder = &MockGettersMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGetters) EXPECT() *MockGettersMockRecorder { + return m.recorder +} + +// BackendConfigGetter mocks base method. +func (m *MockGetters) BackendConfigGetter() (*provider.ModelBackendConfigInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BackendConfigGetter") + ret0, _ := ret[0].(*provider.ModelBackendConfigInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BackendConfigGetter indicates an expected call of BackendConfigGetter. +func (mr *MockGettersMockRecorder) BackendConfigGetter() *MockGettersBackendConfigGetterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackendConfigGetter", reflect.TypeOf((*MockGetters)(nil).BackendConfigGetter)) + return &MockGettersBackendConfigGetterCall{Call: call} +} + +// MockGettersBackendConfigGetterCall wrap *gomock.Call +type MockGettersBackendConfigGetterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockGettersBackendConfigGetterCall) Return(arg0 *provider.ModelBackendConfigInfo, arg1 error) *MockGettersBackendConfigGetterCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockGettersBackendConfigGetterCall) Do(f func() (*provider.ModelBackendConfigInfo, error)) *MockGettersBackendConfigGetterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockGettersBackendConfigGetterCall) DoAndReturn(f func() (*provider.ModelBackendConfigInfo, error)) *MockGettersBackendConfigGetterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ProviderGetter mocks base method. +func (m *MockGetters) ProviderGetter(arg0 string) (provider.SecretBackendProvider, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProviderGetter", arg0) + ret0, _ := ret[0].(provider.SecretBackendProvider) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProviderGetter indicates an expected call of ProviderGetter. +func (mr *MockGettersMockRecorder) ProviderGetter(arg0 any) *MockGettersProviderGetterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProviderGetter", reflect.TypeOf((*MockGetters)(nil).ProviderGetter), arg0) + return &MockGettersProviderGetterCall{Call: call} +} + +// MockGettersProviderGetterCall wrap *gomock.Call +type MockGettersProviderGetterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockGettersProviderGetterCall) Return(arg0 provider.SecretBackendProvider, arg1 error) *MockGettersProviderGetterCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockGettersProviderGetterCall) Do(f func(string) (provider.SecretBackendProvider, error)) *MockGettersProviderGetterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockGettersProviderGetterCall) DoAndReturn(f func(string) (provider.SecretBackendProvider, error)) *MockGettersProviderGetterCall { + c.Call = c.Call.DoAndReturn(f) + return c +}
apiserver/facades/controller/secretsrevoker/package_test.go+17 −0 added@@ -0,0 +1,17 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +//go:generate go run go.uber.org/mock/mockgen -typed -package secretsrevoker -destination mocks_test.go github.com/juju/juju/apiserver/facades/controller/secretsrevoker SecretsState,Getters +//go:generate go run go.uber.org/mock/mockgen -typed -package secretsrevoker -destination secretsprovider_mocks_test.go github.com/juju/juju/secrets/provider SecretBackendProvider + +func TestPackage(t *testing.T) { + gc.TestingT(t) +}
apiserver/facades/controller/secretsrevoker/register.go+47 −0 added@@ -0,0 +1,47 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker + +import ( + "reflect" + + "github.com/juju/errors" + + commonsecrets "github.com/juju/juju/apiserver/common/secrets" + apiservererrors "github.com/juju/juju/apiserver/errors" + "github.com/juju/juju/apiserver/facade" + secretsprovider "github.com/juju/juju/secrets/provider" + "github.com/juju/juju/state" +) + +// Register is called to expose a package of facades onto a given registry. +func Register(registry facade.FacadeRegistry) { + registry.MustRegister("SecretsRevoker", 1, func(ctx facade.Context) (facade.Facade, error) { + return newSecretsRevokerAPI(ctx) + }, reflect.TypeOf((*SecretsRevokerAPI)(nil))) +} + +// newSecretsRevokerAPI creates a SecretsRevokerAPI for revoking secret backend +// tokens. +func newSecretsRevokerAPI(context facade.Context) (*SecretsRevokerAPI, error) { + if !context.Auth().AuthController() { + return nil, apiservererrors.ErrPerm + } + model, err := context.State().Model() + if err != nil { + return nil, errors.Trace(err) + } + + secretBackendConfigGetter := func() (*secretsprovider.ModelBackendConfigInfo, error) { + return commonsecrets.AdminBackendConfigInfo(commonsecrets.SecretsModel(model)) + } + + return &SecretsRevokerAPI{ + resources: context.Resources(), + state: state.NewSecrets(context.State()), + + backendConfigGetter: secretBackendConfigGetter, + providerGetter: secretsprovider.Provider, + }, nil +}
apiserver/facades/controller/secretsrevoker/revoker.go+122 −0 added@@ -0,0 +1,122 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker + +import ( + "time" + + "github.com/juju/errors" + + commonsecrets "github.com/juju/juju/apiserver/common/secrets" + apiservererrors "github.com/juju/juju/apiserver/errors" + "github.com/juju/juju/apiserver/facade" + "github.com/juju/juju/rpc/params" + secretsprovider "github.com/juju/juju/secrets/provider" +) + +// SecretsRevokerAPI is the implementation for the SecretsRevoker facade. +type SecretsRevokerAPI struct { + state SecretsState + + resources facade.Resources + + backendConfigGetter commonsecrets.BackendAdminConfigGetter + providerGetter func(string) (secretsprovider.SecretBackendProvider, error) +} + +// WatchIssuedTokenExpiry creates a secret backends issued token expiry watcher. +// The watcher fires when a secret backend issued token is created, sending the +// RFC3339 encoded timestamp when it will expire. +func (api *SecretsRevokerAPI) WatchIssuedTokenExpiry() (params.StringsWatchResult, error) { + result := params.StringsWatchResult{} + watch := api.state.WatchSecretBackendIssuedTokenExpiry() + if changes, ok := <-watch.Changes(); ok { + result.StringsWatcherId = api.resources.Register(watch) + result.Changes = changes + } else { + return result, errors.Errorf("cannot obtain token expiry times") + } + return result, nil +} + +// RevokeIssuedTokens revokes all issued tokens up until the specified time and +// returning the time for the next revocation. +func (api *SecretsRevokerAPI) RevokeIssuedTokens( + until time.Time, +) (params.RevokeIssuedTokensResult, error) { + result := params.RevokeIssuedTokensResult{} + + var err error + result.Next, err = api.revokeIssuedTokens(until) + if err != nil { + result.Error = apiservererrors.ServerError(err) + } + + return result, nil +} + +func (api *SecretsRevokerAPI) revokeIssuedTokens( + until time.Time, +) (time.Time, error) { + issuedTokens, err := api.state.ListSecretBackendIssuedTokenUntil(until) + if err != nil { + return time.Time{}, errors.Trace(err) + } + + if len(issuedTokens) == 0 { + next, err := api.state.NextSecretBackendIssuedTokenExpiry() + if err != nil { + return time.Time{}, errors.Trace(err) + } + return next, nil + } + + issuedTokensToBackend := map[string][]string{} + for _, ik := range issuedTokens { + b := issuedTokensToBackend[ik.BackendID] + b = append(b, ik.UUID) + issuedTokensToBackend[ik.BackendID] = b + } + + adminCfg, err := api.backendConfigGetter() + if err != nil { + return time.Time{}, errors.Trace(err) + } + + for backendID, issuedTokenUUIDs := range issuedTokensToBackend { + backendCfg, ok := adminCfg.Configs[backendID] + if !ok { + // If the backend doesn't exist. Discard the tokens. + err = api.state.RemoveSecretBackendIssuedTokens(issuedTokenUUIDs) + if err != nil { + return time.Time{}, errors.Trace(err) + } + continue + } + + p, err := api.providerGetter(backendCfg.BackendType) + if err != nil { + return time.Time{}, errors.Trace(err) + } + + removedUUIDs, cleanUpErr := p.CleanupIssuedTokens( + &backendCfg, issuedTokenUUIDs) + if len(removedUUIDs) > 0 { + err = api.state.RemoveSecretBackendIssuedTokens(removedUUIDs) + if err != nil { + return time.Time{}, errors.Trace(err) + } + } + if cleanUpErr != nil { + return time.Time{}, errors.Trace(cleanUpErr) + } + } + + next, err := api.state.NextSecretBackendIssuedTokenExpiry() + if err != nil { + return time.Time{}, errors.Trace(err) + } + + return next, nil +}
apiserver/facades/controller/secretsrevoker/revoker_test.go+120 −0 added@@ -0,0 +1,120 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker + +import ( + "time" + + "github.com/google/uuid" + "github.com/juju/names/v5" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "go.uber.org/mock/gomock" + gc "gopkg.in/check.v1" + + facademocks "github.com/juju/juju/apiserver/facade/mocks" + "github.com/juju/juju/rpc/params" + secretsprovider "github.com/juju/juju/secrets/provider" + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher/watchertest" +) + +type facadeSuite struct { + testing.IsolationSuite + + authorizer *facademocks.MockAuthorizer + resources *facademocks.MockResources + state *MockSecretsState + getters *MockGetters + provider *MockSecretBackendProvider + facade *SecretsRevokerAPI +} + +var _ = gc.Suite(&facadeSuite{}) + +func (s *facadeSuite) setup(c *gc.C) *gomock.Controller { + ctrl := gomock.NewController(c) + + s.authorizer = facademocks.NewMockAuthorizer(ctrl) + s.resources = facademocks.NewMockResources(ctrl) + s.state = NewMockSecretsState(ctrl) + s.getters = NewMockGetters(ctrl) + s.provider = NewMockSecretBackendProvider(ctrl) + + s.facade = &SecretsRevokerAPI{ + state: s.state, + resources: s.resources, + + backendConfigGetter: s.getters.BackendConfigGetter, + providerGetter: s.getters.ProviderGetter, + } + + s.authorizer.EXPECT().AuthController().Return(true).AnyTimes() + s.getters.EXPECT().ProviderGetter( + "my-backend-type").Return(s.provider, nil).AnyTimes() + + return ctrl +} + +func (s *facadeSuite) TestRevokeIssuedTokens(c *gc.C) { + defer s.setup(c).Finish() + + now := time.Now() + next := now.Add(time.Hour) + uuids := []string{uuid.NewString(), uuid.NewString()} + tokens := []state.SecretBackendIssuedToken{{ + UUID: uuids[0], + ExpireTime: now.Add(-time.Second), + BackendID: "some-backend", + Consumer: names.NewUnitTag("app/0"), + }, { + UUID: uuids[1], + ExpireTime: now.Add(-time.Second), + BackendID: "some-backend", + Consumer: names.NewUnitTag("app/0"), + }} + s.state.EXPECT().ListSecretBackendIssuedTokenUntil(now).Return(tokens, nil) + s.state.EXPECT().NextSecretBackendIssuedTokenExpiry().Return(next, nil) + + backends := &secretsprovider.ModelBackendConfigInfo{ + ActiveID: "some-backend", + Configs: map[string]secretsprovider.ModelBackendConfig{ + "some-backend": { + BackendConfig: secretsprovider.BackendConfig{ + BackendType: "my-backend-type", + }, + }, + }, + } + s.getters.EXPECT().BackendConfigGetter().Return(backends, nil) + + s.provider.EXPECT().CleanupIssuedTokens( + gomock.Any(), uuids).Return(uuids[:1], nil) + s.state.EXPECT().RemoveSecretBackendIssuedTokens(uuids[:1]).Return(nil) + + res, err := s.facade.RevokeIssuedTokens(now) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res, gc.DeepEquals, params.RevokeIssuedTokensResult{ + Next: next, + }) +} + +func (s *facadeSuite) TestWatchIssuedTokenExpiry(c *gc.C) { + defer s.setup(c).Finish() + + ch := make(chan []string, 1) + ch <- []string{"something"} + w := watchertest.NewStringsWatcher(ch) + defer w.Kill() + + s.state.EXPECT().WatchSecretBackendIssuedTokenExpiry().Return(w) + s.resources.EXPECT().Register(w).Return("abc") + + res, err := s.facade.WatchIssuedTokenExpiry() + c.Assert(err, jc.ErrorIsNil) + c.Assert(res, gc.DeepEquals, params.StringsWatchResult{ + StringsWatcherId: "abc", + Changes: []string{"something"}, + }) +}
apiserver/facades/controller/secretsrevoker/secretsprovider_mocks_test.go+348 −0 added@@ -0,0 +1,348 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/juju/juju/secrets/provider (interfaces: SecretBackendProvider) +// +// Generated by this command: +// +// mockgen -typed -package secretsrevoker -destination secretsprovider_mocks_test.go github.com/juju/juju/secrets/provider SecretBackendProvider +// + +// Package secretsrevoker is a generated GoMock package. +package secretsrevoker + +import ( + reflect "reflect" + + provider "github.com/juju/juju/secrets/provider" + names "github.com/juju/names/v5" + gomock "go.uber.org/mock/gomock" +) + +// MockSecretBackendProvider is a mock of SecretBackendProvider interface. +type MockSecretBackendProvider struct { + ctrl *gomock.Controller + recorder *MockSecretBackendProviderMockRecorder +} + +// MockSecretBackendProviderMockRecorder is the mock recorder for MockSecretBackendProvider. +type MockSecretBackendProviderMockRecorder struct { + mock *MockSecretBackendProvider +} + +// NewMockSecretBackendProvider creates a new mock instance. +func NewMockSecretBackendProvider(ctrl *gomock.Controller) *MockSecretBackendProvider { + mock := &MockSecretBackendProvider{ctrl: ctrl} + mock.recorder = &MockSecretBackendProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSecretBackendProvider) EXPECT() *MockSecretBackendProviderMockRecorder { + return m.recorder +} + +// CleanupIssuedTokens mocks base method. +func (m *MockSecretBackendProvider) CleanupIssuedTokens(arg0 *provider.ModelBackendConfig, arg1 []string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupIssuedTokens", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CleanupIssuedTokens indicates an expected call of CleanupIssuedTokens. +func (mr *MockSecretBackendProviderMockRecorder) CleanupIssuedTokens(arg0, arg1 any) *MockSecretBackendProviderCleanupIssuedTokensCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupIssuedTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupIssuedTokens), arg0, arg1) + return &MockSecretBackendProviderCleanupIssuedTokensCall{Call: call} +} + +// MockSecretBackendProviderCleanupIssuedTokensCall wrap *gomock.Call +type MockSecretBackendProviderCleanupIssuedTokensCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderCleanupIssuedTokensCall) Return(arg0 []string, arg1 error) *MockSecretBackendProviderCleanupIssuedTokensCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderCleanupIssuedTokensCall) Do(f func(*provider.ModelBackendConfig, []string) ([]string, error)) *MockSecretBackendProviderCleanupIssuedTokensCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderCleanupIssuedTokensCall) DoAndReturn(f func(*provider.ModelBackendConfig, []string) ([]string, error)) *MockSecretBackendProviderCleanupIssuedTokensCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CleanupModel mocks base method. +func (m *MockSecretBackendProvider) CleanupModel(arg0 *provider.ModelBackendConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupModel", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// CleanupModel indicates an expected call of CleanupModel. +func (mr *MockSecretBackendProviderMockRecorder) CleanupModel(arg0 any) *MockSecretBackendProviderCleanupModelCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupModel", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupModel), arg0) + return &MockSecretBackendProviderCleanupModelCall{Call: call} +} + +// MockSecretBackendProviderCleanupModelCall wrap *gomock.Call +type MockSecretBackendProviderCleanupModelCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderCleanupModelCall) Return(arg0 error) *MockSecretBackendProviderCleanupModelCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderCleanupModelCall) Do(f func(*provider.ModelBackendConfig) error) *MockSecretBackendProviderCleanupModelCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderCleanupModelCall) DoAndReturn(f func(*provider.ModelBackendConfig) error) *MockSecretBackendProviderCleanupModelCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CleanupSecrets mocks base method. +func (m *MockSecretBackendProvider) CleanupSecrets(arg0 *provider.ModelBackendConfig, arg1 names.Tag, arg2 provider.SecretRevisions) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupSecrets", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// CleanupSecrets indicates an expected call of CleanupSecrets. +func (mr *MockSecretBackendProviderMockRecorder) CleanupSecrets(arg0, arg1, arg2 any) *MockSecretBackendProviderCleanupSecretsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupSecrets", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupSecrets), arg0, arg1, arg2) + return &MockSecretBackendProviderCleanupSecretsCall{Call: call} +} + +// MockSecretBackendProviderCleanupSecretsCall wrap *gomock.Call +type MockSecretBackendProviderCleanupSecretsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderCleanupSecretsCall) Return(arg0 error) *MockSecretBackendProviderCleanupSecretsCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderCleanupSecretsCall) Do(f func(*provider.ModelBackendConfig, names.Tag, provider.SecretRevisions) error) *MockSecretBackendProviderCleanupSecretsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderCleanupSecretsCall) DoAndReturn(f func(*provider.ModelBackendConfig, names.Tag, provider.SecretRevisions) error) *MockSecretBackendProviderCleanupSecretsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Initialise mocks base method. +func (m *MockSecretBackendProvider) Initialise(arg0 *provider.ModelBackendConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Initialise", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Initialise indicates an expected call of Initialise. +func (mr *MockSecretBackendProviderMockRecorder) Initialise(arg0 any) *MockSecretBackendProviderInitialiseCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialise", reflect.TypeOf((*MockSecretBackendProvider)(nil).Initialise), arg0) + return &MockSecretBackendProviderInitialiseCall{Call: call} +} + +// MockSecretBackendProviderInitialiseCall wrap *gomock.Call +type MockSecretBackendProviderInitialiseCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderInitialiseCall) Return(arg0 error) *MockSecretBackendProviderInitialiseCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderInitialiseCall) Do(f func(*provider.ModelBackendConfig) error) *MockSecretBackendProviderInitialiseCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderInitialiseCall) DoAndReturn(f func(*provider.ModelBackendConfig) error) *MockSecretBackendProviderInitialiseCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// IssuesTokens mocks base method. +func (m *MockSecretBackendProvider) IssuesTokens() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IssuesTokens") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IssuesTokens indicates an expected call of IssuesTokens. +func (mr *MockSecretBackendProviderMockRecorder) IssuesTokens() *MockSecretBackendProviderIssuesTokensCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssuesTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).IssuesTokens)) + return &MockSecretBackendProviderIssuesTokensCall{Call: call} +} + +// MockSecretBackendProviderIssuesTokensCall wrap *gomock.Call +type MockSecretBackendProviderIssuesTokensCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderIssuesTokensCall) Return(arg0 bool) *MockSecretBackendProviderIssuesTokensCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderIssuesTokensCall) Do(f func() bool) *MockSecretBackendProviderIssuesTokensCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderIssuesTokensCall) DoAndReturn(f func() bool) *MockSecretBackendProviderIssuesTokensCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// NewBackend mocks base method. +func (m *MockSecretBackendProvider) NewBackend(arg0 *provider.ModelBackendConfig) (provider.SecretsBackend, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBackend", arg0) + ret0, _ := ret[0].(provider.SecretsBackend) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewBackend indicates an expected call of NewBackend. +func (mr *MockSecretBackendProviderMockRecorder) NewBackend(arg0 any) *MockSecretBackendProviderNewBackendCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBackend", reflect.TypeOf((*MockSecretBackendProvider)(nil).NewBackend), arg0) + return &MockSecretBackendProviderNewBackendCall{Call: call} +} + +// MockSecretBackendProviderNewBackendCall wrap *gomock.Call +type MockSecretBackendProviderNewBackendCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderNewBackendCall) Return(arg0 provider.SecretsBackend, arg1 error) *MockSecretBackendProviderNewBackendCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderNewBackendCall) Do(f func(*provider.ModelBackendConfig) (provider.SecretsBackend, error)) *MockSecretBackendProviderNewBackendCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderNewBackendCall) DoAndReturn(f func(*provider.ModelBackendConfig) (provider.SecretsBackend, error)) *MockSecretBackendProviderNewBackendCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// RestrictedConfig mocks base method. +func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 string, arg4 names.Tag, arg5 []string, arg6, arg7 provider.SecretRevisions) (*provider.BackendConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(*provider.BackendConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestrictedConfig indicates an expected call of RestrictedConfig. +func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *MockSecretBackendProviderRestrictedConfigCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return &MockSecretBackendProviderRestrictedConfigCall{Call: call} +} + +// MockSecretBackendProviderRestrictedConfigCall wrap *gomock.Call +type MockSecretBackendProviderRestrictedConfigCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderRestrictedConfigCall) Return(arg0 *provider.BackendConfig, arg1 error) *MockSecretBackendProviderRestrictedConfigCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderRestrictedConfigCall) Do(f func(*provider.ModelBackendConfig, bool, bool, string, names.Tag, []string, provider.SecretRevisions, provider.SecretRevisions) (*provider.BackendConfig, error)) *MockSecretBackendProviderRestrictedConfigCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderRestrictedConfigCall) DoAndReturn(f func(*provider.ModelBackendConfig, bool, bool, string, names.Tag, []string, provider.SecretRevisions, provider.SecretRevisions) (*provider.BackendConfig, error)) *MockSecretBackendProviderRestrictedConfigCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Type mocks base method. +func (m *MockSecretBackendProvider) Type() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Type") + ret0, _ := ret[0].(string) + return ret0 +} + +// Type indicates an expected call of Type. +func (mr *MockSecretBackendProviderMockRecorder) Type() *MockSecretBackendProviderTypeCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Type", reflect.TypeOf((*MockSecretBackendProvider)(nil).Type)) + return &MockSecretBackendProviderTypeCall{Call: call} +} + +// MockSecretBackendProviderTypeCall wrap *gomock.Call +type MockSecretBackendProviderTypeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderTypeCall) Return(arg0 string) *MockSecretBackendProviderTypeCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderTypeCall) Do(f func() string) *MockSecretBackendProviderTypeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderTypeCall) DoAndReturn(f func() string) *MockSecretBackendProviderTypeCall { + c.Call = c.Call.DoAndReturn(f) + return c +}
apiserver/facades/controller/secretsrevoker/state.go+21 −0 added@@ -0,0 +1,21 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker + +import ( + "time" + + "github.com/juju/juju/state" +) + +// SecretsState provides the secrets revoker controller facade access to secret +// backend issued tokens. +type SecretsState interface { + WatchSecretBackendIssuedTokenExpiry() state.StringsWatcher + ListSecretBackendIssuedTokenUntil( + until time.Time, + ) ([]state.SecretBackendIssuedToken, error) + RemoveSecretBackendIssuedTokens(uuids []string) error + NextSecretBackendIssuedTokenExpiry() (time.Time, error) +}
apiserver/facades/controller/undertaker/generatedmocks_test.go+793 −0 added@@ -0,0 +1,793 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/juju/juju/apiserver/facades/controller/undertaker (interfaces: State,Model) +// +// Generated by this command: +// +// mockgen -typed -package undertaker_test -destination generatedmocks_test.go github.com/juju/juju/apiserver/facades/controller/undertaker State,Model +// + +// Package undertaker_test is a generated GoMock package. +package undertaker_test + +import ( + reflect "reflect" + time "time" + + undertaker "github.com/juju/juju/apiserver/facades/controller/undertaker" + config "github.com/juju/juju/environs/config" + state "github.com/juju/juju/state" + names "github.com/juju/names/v5" + gomock "go.uber.org/mock/gomock" +) + +// MockState is a mock of State interface. +type MockState struct { + ctrl *gomock.Controller + recorder *MockStateMockRecorder +} + +// MockStateMockRecorder is the mock recorder for MockState. +type MockStateMockRecorder struct { + mock *MockState +} + +// NewMockState creates a new mock instance. +func NewMockState(ctrl *gomock.Controller) *MockState { + mock := &MockState{ctrl: ctrl} + mock.recorder = &MockStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockState) EXPECT() *MockStateMockRecorder { + return m.recorder +} + +// ControllerUUID mocks base method. +func (m *MockState) ControllerUUID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ControllerUUID") + ret0, _ := ret[0].(string) + return ret0 +} + +// ControllerUUID indicates an expected call of ControllerUUID. +func (mr *MockStateMockRecorder) ControllerUUID() *MockStateControllerUUIDCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerUUID", reflect.TypeOf((*MockState)(nil).ControllerUUID)) + return &MockStateControllerUUIDCall{Call: call} +} + +// MockStateControllerUUIDCall wrap *gomock.Call +type MockStateControllerUUIDCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStateControllerUUIDCall) Return(arg0 string) *MockStateControllerUUIDCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStateControllerUUIDCall) Do(f func() string) *MockStateControllerUUIDCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStateControllerUUIDCall) DoAndReturn(f func() string) *MockStateControllerUUIDCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// FindEntity mocks base method. +func (m *MockState) FindEntity(arg0 names.Tag) (state.Entity, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindEntity", arg0) + ret0, _ := ret[0].(state.Entity) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindEntity indicates an expected call of FindEntity. +func (mr *MockStateMockRecorder) FindEntity(arg0 any) *MockStateFindEntityCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindEntity", reflect.TypeOf((*MockState)(nil).FindEntity), arg0) + return &MockStateFindEntityCall{Call: call} +} + +// MockStateFindEntityCall wrap *gomock.Call +type MockStateFindEntityCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStateFindEntityCall) Return(arg0 state.Entity, arg1 error) *MockStateFindEntityCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStateFindEntityCall) Do(f func(names.Tag) (state.Entity, error)) *MockStateFindEntityCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStateFindEntityCall) DoAndReturn(f func(names.Tag) (state.Entity, error)) *MockStateFindEntityCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// IsController mocks base method. +func (m *MockState) IsController() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsController") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsController indicates an expected call of IsController. +func (mr *MockStateMockRecorder) IsController() *MockStateIsControllerCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsController", reflect.TypeOf((*MockState)(nil).IsController)) + return &MockStateIsControllerCall{Call: call} +} + +// MockStateIsControllerCall wrap *gomock.Call +type MockStateIsControllerCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStateIsControllerCall) Return(arg0 bool) *MockStateIsControllerCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStateIsControllerCall) Do(f func() bool) *MockStateIsControllerCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStateIsControllerCall) DoAndReturn(f func() bool) *MockStateIsControllerCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ListSecretBackendIssuedTokenUntil mocks base method. +func (m *MockState) ListSecretBackendIssuedTokenUntil(arg0 time.Time) ([]state.SecretBackendIssuedToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretBackendIssuedTokenUntil", arg0) + ret0, _ := ret[0].([]state.SecretBackendIssuedToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecretBackendIssuedTokenUntil indicates an expected call of ListSecretBackendIssuedTokenUntil. +func (mr *MockStateMockRecorder) ListSecretBackendIssuedTokenUntil(arg0 any) *MockStateListSecretBackendIssuedTokenUntilCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretBackendIssuedTokenUntil", reflect.TypeOf((*MockState)(nil).ListSecretBackendIssuedTokenUntil), arg0) + return &MockStateListSecretBackendIssuedTokenUntilCall{Call: call} +} + +// MockStateListSecretBackendIssuedTokenUntilCall wrap *gomock.Call +type MockStateListSecretBackendIssuedTokenUntilCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStateListSecretBackendIssuedTokenUntilCall) Return(arg0 []state.SecretBackendIssuedToken, arg1 error) *MockStateListSecretBackendIssuedTokenUntilCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStateListSecretBackendIssuedTokenUntilCall) Do(f func(time.Time) ([]state.SecretBackendIssuedToken, error)) *MockStateListSecretBackendIssuedTokenUntilCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStateListSecretBackendIssuedTokenUntilCall) DoAndReturn(f func(time.Time) ([]state.SecretBackendIssuedToken, error)) *MockStateListSecretBackendIssuedTokenUntilCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Model mocks base method. +func (m *MockState) Model() (undertaker.Model, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Model") + ret0, _ := ret[0].(undertaker.Model) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Model indicates an expected call of Model. +func (mr *MockStateMockRecorder) Model() *MockStateModelCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Model", reflect.TypeOf((*MockState)(nil).Model)) + return &MockStateModelCall{Call: call} +} + +// MockStateModelCall wrap *gomock.Call +type MockStateModelCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStateModelCall) Return(arg0 undertaker.Model, arg1 error) *MockStateModelCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStateModelCall) Do(f func() (undertaker.Model, error)) *MockStateModelCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStateModelCall) DoAndReturn(f func() (undertaker.Model, error)) *MockStateModelCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ModelUUID mocks base method. +func (m *MockState) ModelUUID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ModelUUID") + ret0, _ := ret[0].(string) + return ret0 +} + +// ModelUUID indicates an expected call of ModelUUID. +func (mr *MockStateMockRecorder) ModelUUID() *MockStateModelUUIDCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModelUUID", reflect.TypeOf((*MockState)(nil).ModelUUID)) + return &MockStateModelUUIDCall{Call: call} +} + +// MockStateModelUUIDCall wrap *gomock.Call +type MockStateModelUUIDCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStateModelUUIDCall) Return(arg0 string) *MockStateModelUUIDCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStateModelUUIDCall) Do(f func() string) *MockStateModelUUIDCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStateModelUUIDCall) DoAndReturn(f func() string) *MockStateModelUUIDCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ProcessDyingModel mocks base method. +func (m *MockState) ProcessDyingModel() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProcessDyingModel") + ret0, _ := ret[0].(error) + return ret0 +} + +// ProcessDyingModel indicates an expected call of ProcessDyingModel. +func (mr *MockStateMockRecorder) ProcessDyingModel() *MockStateProcessDyingModelCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessDyingModel", reflect.TypeOf((*MockState)(nil).ProcessDyingModel)) + return &MockStateProcessDyingModelCall{Call: call} +} + +// MockStateProcessDyingModelCall wrap *gomock.Call +type MockStateProcessDyingModelCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStateProcessDyingModelCall) Return(arg0 error) *MockStateProcessDyingModelCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStateProcessDyingModelCall) Do(f func() error) *MockStateProcessDyingModelCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStateProcessDyingModelCall) DoAndReturn(f func() error) *MockStateProcessDyingModelCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// RemoveDyingModel mocks base method. +func (m *MockState) RemoveDyingModel() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveDyingModel") + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveDyingModel indicates an expected call of RemoveDyingModel. +func (mr *MockStateMockRecorder) RemoveDyingModel() *MockStateRemoveDyingModelCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveDyingModel", reflect.TypeOf((*MockState)(nil).RemoveDyingModel)) + return &MockStateRemoveDyingModelCall{Call: call} +} + +// MockStateRemoveDyingModelCall wrap *gomock.Call +type MockStateRemoveDyingModelCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStateRemoveDyingModelCall) Return(arg0 error) *MockStateRemoveDyingModelCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStateRemoveDyingModelCall) Do(f func() error) *MockStateRemoveDyingModelCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStateRemoveDyingModelCall) DoAndReturn(f func() error) *MockStateRemoveDyingModelCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// RemoveSecretBackendIssuedTokens mocks base method. +func (m *MockState) RemoveSecretBackendIssuedTokens(arg0 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveSecretBackendIssuedTokens", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveSecretBackendIssuedTokens indicates an expected call of RemoveSecretBackendIssuedTokens. +func (mr *MockStateMockRecorder) RemoveSecretBackendIssuedTokens(arg0 any) *MockStateRemoveSecretBackendIssuedTokensCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSecretBackendIssuedTokens", reflect.TypeOf((*MockState)(nil).RemoveSecretBackendIssuedTokens), arg0) + return &MockStateRemoveSecretBackendIssuedTokensCall{Call: call} +} + +// MockStateRemoveSecretBackendIssuedTokensCall wrap *gomock.Call +type MockStateRemoveSecretBackendIssuedTokensCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStateRemoveSecretBackendIssuedTokensCall) Return(arg0 error) *MockStateRemoveSecretBackendIssuedTokensCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStateRemoveSecretBackendIssuedTokensCall) Do(f func([]string) error) *MockStateRemoveSecretBackendIssuedTokensCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStateRemoveSecretBackendIssuedTokensCall) DoAndReturn(f func([]string) error) *MockStateRemoveSecretBackendIssuedTokensCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WatchModelEntityReferences mocks base method. +func (m *MockState) WatchModelEntityReferences(arg0 string) state.NotifyWatcher { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WatchModelEntityReferences", arg0) + ret0, _ := ret[0].(state.NotifyWatcher) + return ret0 +} + +// WatchModelEntityReferences indicates an expected call of WatchModelEntityReferences. +func (mr *MockStateMockRecorder) WatchModelEntityReferences(arg0 any) *MockStateWatchModelEntityReferencesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchModelEntityReferences", reflect.TypeOf((*MockState)(nil).WatchModelEntityReferences), arg0) + return &MockStateWatchModelEntityReferencesCall{Call: call} +} + +// MockStateWatchModelEntityReferencesCall wrap *gomock.Call +type MockStateWatchModelEntityReferencesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStateWatchModelEntityReferencesCall) Return(arg0 state.NotifyWatcher) *MockStateWatchModelEntityReferencesCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStateWatchModelEntityReferencesCall) Do(f func(string) state.NotifyWatcher) *MockStateWatchModelEntityReferencesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStateWatchModelEntityReferencesCall) DoAndReturn(f func(string) state.NotifyWatcher) *MockStateWatchModelEntityReferencesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// MockModel is a mock of Model interface. +type MockModel struct { + ctrl *gomock.Controller + recorder *MockModelMockRecorder +} + +// MockModelMockRecorder is the mock recorder for MockModel. +type MockModelMockRecorder struct { + mock *MockModel +} + +// NewMockModel creates a new mock instance. +func NewMockModel(ctrl *gomock.Controller) *MockModel { + mock := &MockModel{ctrl: ctrl} + mock.recorder = &MockModelMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModel) EXPECT() *MockModelMockRecorder { + return m.recorder +} + +// DestroyTimeout mocks base method. +func (m *MockModel) DestroyTimeout() *time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DestroyTimeout") + ret0, _ := ret[0].(*time.Duration) + return ret0 +} + +// DestroyTimeout indicates an expected call of DestroyTimeout. +func (mr *MockModelMockRecorder) DestroyTimeout() *MockModelDestroyTimeoutCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DestroyTimeout", reflect.TypeOf((*MockModel)(nil).DestroyTimeout)) + return &MockModelDestroyTimeoutCall{Call: call} +} + +// MockModelDestroyTimeoutCall wrap *gomock.Call +type MockModelDestroyTimeoutCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockModelDestroyTimeoutCall) Return(arg0 *time.Duration) *MockModelDestroyTimeoutCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockModelDestroyTimeoutCall) Do(f func() *time.Duration) *MockModelDestroyTimeoutCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockModelDestroyTimeoutCall) DoAndReturn(f func() *time.Duration) *MockModelDestroyTimeoutCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ForceDestroyed mocks base method. +func (m *MockModel) ForceDestroyed() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ForceDestroyed") + ret0, _ := ret[0].(bool) + return ret0 +} + +// ForceDestroyed indicates an expected call of ForceDestroyed. +func (mr *MockModelMockRecorder) ForceDestroyed() *MockModelForceDestroyedCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceDestroyed", reflect.TypeOf((*MockModel)(nil).ForceDestroyed)) + return &MockModelForceDestroyedCall{Call: call} +} + +// MockModelForceDestroyedCall wrap *gomock.Call +type MockModelForceDestroyedCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockModelForceDestroyedCall) Return(arg0 bool) *MockModelForceDestroyedCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockModelForceDestroyedCall) Do(f func() bool) *MockModelForceDestroyedCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockModelForceDestroyedCall) DoAndReturn(f func() bool) *MockModelForceDestroyedCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Life mocks base method. +func (m *MockModel) Life() state.Life { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Life") + ret0, _ := ret[0].(state.Life) + return ret0 +} + +// Life indicates an expected call of Life. +func (mr *MockModelMockRecorder) Life() *MockModelLifeCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Life", reflect.TypeOf((*MockModel)(nil).Life)) + return &MockModelLifeCall{Call: call} +} + +// MockModelLifeCall wrap *gomock.Call +type MockModelLifeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockModelLifeCall) Return(arg0 state.Life) *MockModelLifeCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockModelLifeCall) Do(f func() state.Life) *MockModelLifeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockModelLifeCall) DoAndReturn(f func() state.Life) *MockModelLifeCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ModelConfig mocks base method. +func (m *MockModel) ModelConfig() (*config.Config, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ModelConfig") + ret0, _ := ret[0].(*config.Config) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ModelConfig indicates an expected call of ModelConfig. +func (mr *MockModelMockRecorder) ModelConfig() *MockModelModelConfigCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModelConfig", reflect.TypeOf((*MockModel)(nil).ModelConfig)) + return &MockModelModelConfigCall{Call: call} +} + +// MockModelModelConfigCall wrap *gomock.Call +type MockModelModelConfigCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockModelModelConfigCall) Return(arg0 *config.Config, arg1 error) *MockModelModelConfigCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockModelModelConfigCall) Do(f func() (*config.Config, error)) *MockModelModelConfigCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockModelModelConfigCall) DoAndReturn(f func() (*config.Config, error)) *MockModelModelConfigCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Name mocks base method. +func (m *MockModel) Name() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Name") + ret0, _ := ret[0].(string) + return ret0 +} + +// Name indicates an expected call of Name. +func (mr *MockModelMockRecorder) Name() *MockModelNameCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockModel)(nil).Name)) + return &MockModelNameCall{Call: call} +} + +// MockModelNameCall wrap *gomock.Call +type MockModelNameCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockModelNameCall) Return(arg0 string) *MockModelNameCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockModelNameCall) Do(f func() string) *MockModelNameCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockModelNameCall) DoAndReturn(f func() string) *MockModelNameCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Owner mocks base method. +func (m *MockModel) Owner() names.UserTag { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Owner") + ret0, _ := ret[0].(names.UserTag) + return ret0 +} + +// Owner indicates an expected call of Owner. +func (mr *MockModelMockRecorder) Owner() *MockModelOwnerCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Owner", reflect.TypeOf((*MockModel)(nil).Owner)) + return &MockModelOwnerCall{Call: call} +} + +// MockModelOwnerCall wrap *gomock.Call +type MockModelOwnerCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockModelOwnerCall) Return(arg0 names.UserTag) *MockModelOwnerCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockModelOwnerCall) Do(f func() names.UserTag) *MockModelOwnerCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockModelOwnerCall) DoAndReturn(f func() names.UserTag) *MockModelOwnerCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UUID mocks base method. +func (m *MockModel) UUID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UUID") + ret0, _ := ret[0].(string) + return ret0 +} + +// UUID indicates an expected call of UUID. +func (mr *MockModelMockRecorder) UUID() *MockModelUUIDCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UUID", reflect.TypeOf((*MockModel)(nil).UUID)) + return &MockModelUUIDCall{Call: call} +} + +// MockModelUUIDCall wrap *gomock.Call +type MockModelUUIDCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockModelUUIDCall) Return(arg0 string) *MockModelUUIDCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockModelUUIDCall) Do(f func() string) *MockModelUUIDCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockModelUUIDCall) DoAndReturn(f func() string) *MockModelUUIDCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Watch mocks base method. +func (m *MockModel) Watch() state.NotifyWatcher { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Watch") + ret0, _ := ret[0].(state.NotifyWatcher) + return ret0 +} + +// Watch indicates an expected call of Watch. +func (mr *MockModelMockRecorder) Watch() *MockModelWatchCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockModel)(nil).Watch)) + return &MockModelWatchCall{Call: call} +} + +// MockModelWatchCall wrap *gomock.Call +type MockModelWatchCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockModelWatchCall) Return(arg0 state.NotifyWatcher) *MockModelWatchCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockModelWatchCall) Do(f func() state.NotifyWatcher) *MockModelWatchCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockModelWatchCall) DoAndReturn(f func() state.NotifyWatcher) *MockModelWatchCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WatchForModelConfigChanges mocks base method. +func (m *MockModel) WatchForModelConfigChanges() state.NotifyWatcher { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WatchForModelConfigChanges") + ret0, _ := ret[0].(state.NotifyWatcher) + return ret0 +} + +// WatchForModelConfigChanges indicates an expected call of WatchForModelConfigChanges. +func (mr *MockModelMockRecorder) WatchForModelConfigChanges() *MockModelWatchForModelConfigChangesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchForModelConfigChanges", reflect.TypeOf((*MockModel)(nil).WatchForModelConfigChanges)) + return &MockModelWatchForModelConfigChangesCall{Call: call} +} + +// MockModelWatchForModelConfigChangesCall wrap *gomock.Call +type MockModelWatchForModelConfigChangesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockModelWatchForModelConfigChangesCall) Return(arg0 state.NotifyWatcher) *MockModelWatchForModelConfigChangesCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockModelWatchForModelConfigChangesCall) Do(f func() state.NotifyWatcher) *MockModelWatchForModelConfigChangesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockModelWatchForModelConfigChangesCall) DoAndReturn(f func() state.NotifyWatcher) *MockModelWatchForModelConfigChangesCall { + c.Call = c.Call.DoAndReturn(f) + return c +}
apiserver/facades/controller/undertaker/mock_test.go+90 −85 modified@@ -9,19 +9,24 @@ import ( "github.com/juju/errors" "github.com/juju/names/v5" "github.com/juju/utils/v3" + "go.uber.org/mock/gomock" "github.com/juju/juju/apiserver/facades/controller/undertaker" "github.com/juju/juju/cloud" "github.com/juju/juju/core/status" "github.com/juju/juju/environs/config" - "github.com/juju/juju/secrets/provider" "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" ) +//go:generate go run go.uber.org/mock/mockgen -typed -package undertaker_test -destination generatedmocks_test.go github.com/juju/juju/apiserver/facades/controller/undertaker State,Model +//go:generate go run go.uber.org/mock/mockgen -typed -package undertaker_test -destination secretsmocks_test.go github.com/juju/juju/secrets/provider SecretBackendProvider + // mockState implements State interface and allows inspection of called // methods. type mockState struct { + *MockState + model *mockModel removed bool isSystem bool @@ -32,24 +37,31 @@ type mockState struct { var _ undertaker.State = (*mockState)(nil) -func newMockState(modelOwner names.UserTag, modelName string, isSystem bool, modelCfg config.Config) *mockState { - +func newMockState( + ctrl *gomock.Controller, modelOwner names.UserTag, modelName string, + isSystem bool, modelCfg config.Config, +) *mockState { model := mockModel{ + MockModel: NewMockModel(ctrl), owner: modelOwner, name: modelName, uuid: "9d3d3b19-2b0c-4a3f-acde-0b1645586a72", life: state.Alive, modelConfig: modelCfg, } + model.legacy(ctrl) st := &mockState{ + MockState: NewMockState(ctrl), model: &model, isSystem: isSystem, controllerUUID: utils.MustNewUUID().String(), watcher: &mockWatcher{ changes: make(chan struct{}, 1), }, } + st.legacy(ctrl) + return st } @@ -60,52 +72,56 @@ func (m *mockState) EnsureModelRemoved() error { return nil } -func (m *mockState) RemoveDyingModel() error { - if m.model.life == state.Alive { - return errors.New("model not dying or dead") - } - m.removed = true - return nil -} - -func (m *mockState) ProcessDyingModel() error { - if m.model.life != state.Dying { - return errors.New("model is not dying") - } - m.model.life = state.Dead - return nil -} - -func (m *mockState) IsController() bool { - return m.isSystem -} - -func (m *mockState) Model() (undertaker.Model, error) { - return m.model, nil -} - -func (m *mockState) FindEntity(tag names.Tag) (state.Entity, error) { - if tag.Kind() == names.ModelTagKind && tag.Id() == m.model.UUID() { +func (m *mockState) legacy(ctrl *gomock.Controller) { + m.MockState.EXPECT().RemoveDyingModel().DoAndReturn(func() error { + if m.model.life == state.Alive { + return errors.New("model not dying or dead") + } + m.removed = true + return nil + }).AnyTimes() + + m.MockState.EXPECT().ProcessDyingModel().DoAndReturn(func() error { + if m.model.life != state.Dying { + return errors.New("model is not dying") + } + m.model.life = state.Dead + return nil + }).AnyTimes() + + m.MockState.EXPECT().IsController().DoAndReturn(func() bool { + return m.isSystem + }).AnyTimes() + + m.MockState.EXPECT().Model().DoAndReturn(func() (undertaker.Model, error) { return m.model, nil - } - return nil, errors.NotFoundf("entity with tag %q", tag.String()) -} + }).AnyTimes() -func (m *mockState) WatchModelEntityReferences(mUUID string) state.NotifyWatcher { - return m.watcher -} + m.MockState.EXPECT().FindEntity(gomock.Any()).DoAndReturn(func(tag names.Tag) (state.Entity, error) { + if tag.Kind() == names.ModelTagKind && tag.Id() == m.model.UUID() { + return m.model, nil + } + return nil, errors.NotFoundf("entity with tag %q", tag.String()) + }).AnyTimes() -func (m *mockState) ModelUUID() string { - return m.model.UUID() -} + m.MockState.EXPECT().WatchModelEntityReferences(gomock.Any()).DoAndReturn(func(mUUID string) state.NotifyWatcher { + return m.watcher + }).AnyTimes() + + m.MockState.EXPECT().ModelUUID().DoAndReturn(func() string { + return m.model.UUID() + }).AnyTimes() -func (m *mockState) ControllerUUID() string { - return m.controllerUUID + m.MockState.EXPECT().ControllerUUID().DoAndReturn(func() string { + return m.controllerUUID + }).AnyTimes() } // mockModel implements Model interface and allows inspection of called // methods. type mockModel struct { + *MockModel + owner names.UserTag life state.Life name string @@ -129,34 +145,10 @@ func (m *mockModel) Cloud() (cloud.Cloud, error) { return cloud.Cloud{}, errors.NotImplemented } -func (m *mockModel) Owner() names.UserTag { - return m.owner -} - -func (m *mockModel) Life() state.Life { - return m.life -} - -func (m *mockModel) ForceDestroyed() bool { - return m.forced -} - -func (m *mockModel) DestroyTimeout() *time.Duration { - return m.timeout -} - func (m *mockModel) Tag() names.Tag { return names.NewModelTag(m.uuid) } -func (m *mockModel) Name() string { - return m.name -} - -func (m *mockModel) UUID() string { - return m.uuid -} - func (m *mockModel) Destroy() error { m.life = state.Dying return nil @@ -169,16 +161,42 @@ func (m *mockModel) SetStatus(sInfo status.StatusInfo) error { return nil } -func (m *mockModel) WatchForModelConfigChanges() state.NotifyWatcher { - return nil -} +func (m *mockModel) legacy(ctrl *gomock.Controller) { + m.MockModel.EXPECT().Owner().DoAndReturn(func() names.UserTag { + return m.owner + }).AnyTimes() -func (m *mockModel) Watch() state.NotifyWatcher { - return nil -} + m.MockModel.EXPECT().Life().DoAndReturn(func() state.Life { + return m.life + }).AnyTimes() -func (m *mockModel) ModelConfig() (*config.Config, error) { - return &m.modelConfig, nil + m.MockModel.EXPECT().ForceDestroyed().DoAndReturn(func() bool { + return m.forced + }).AnyTimes() + + m.MockModel.EXPECT().DestroyTimeout().DoAndReturn(func() *time.Duration { + return m.timeout + }).AnyTimes() + + m.MockModel.EXPECT().Name().DoAndReturn(func() string { + return m.name + }).AnyTimes() + + m.MockModel.EXPECT().UUID().DoAndReturn(func() string { + return m.uuid + }).AnyTimes() + + m.MockModel.EXPECT().WatchForModelConfigChanges().DoAndReturn(func() state.NotifyWatcher { + return nil + }).AnyTimes() + + m.MockModel.EXPECT().Watch().DoAndReturn(func() state.NotifyWatcher { + return nil + }).AnyTimes() + + m.MockModel.EXPECT().ModelConfig().DoAndReturn(func() (*config.Config, error) { + return &m.modelConfig, nil + }).AnyTimes() } type mockWatcher struct { @@ -189,16 +207,3 @@ type mockWatcher struct { func (w *mockWatcher) Changes() <-chan struct{} { return w.changes } - -type mockSecrets struct { - provider.SecretBackendProvider - cleanedUUID string -} - -func (m *mockSecrets) CleanupModel(cfg *provider.ModelBackendConfig) error { - if cfg.BackendType != "some-backend" { - return errors.New("unknown backend " + cfg.BackendType) - } - m.cleanedUUID = cfg.ModelUUID - return nil -}
apiserver/facades/controller/undertaker/secretsmocks_test.go+348 −0 added@@ -0,0 +1,348 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/juju/juju/secrets/provider (interfaces: SecretBackendProvider) +// +// Generated by this command: +// +// mockgen -typed -package undertaker_test -destination secretsmocks_test.go github.com/juju/juju/secrets/provider SecretBackendProvider +// + +// Package undertaker_test is a generated GoMock package. +package undertaker_test + +import ( + reflect "reflect" + + provider "github.com/juju/juju/secrets/provider" + names "github.com/juju/names/v5" + gomock "go.uber.org/mock/gomock" +) + +// MockSecretBackendProvider is a mock of SecretBackendProvider interface. +type MockSecretBackendProvider struct { + ctrl *gomock.Controller + recorder *MockSecretBackendProviderMockRecorder +} + +// MockSecretBackendProviderMockRecorder is the mock recorder for MockSecretBackendProvider. +type MockSecretBackendProviderMockRecorder struct { + mock *MockSecretBackendProvider +} + +// NewMockSecretBackendProvider creates a new mock instance. +func NewMockSecretBackendProvider(ctrl *gomock.Controller) *MockSecretBackendProvider { + mock := &MockSecretBackendProvider{ctrl: ctrl} + mock.recorder = &MockSecretBackendProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSecretBackendProvider) EXPECT() *MockSecretBackendProviderMockRecorder { + return m.recorder +} + +// CleanupIssuedTokens mocks base method. +func (m *MockSecretBackendProvider) CleanupIssuedTokens(arg0 *provider.ModelBackendConfig, arg1 []string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupIssuedTokens", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CleanupIssuedTokens indicates an expected call of CleanupIssuedTokens. +func (mr *MockSecretBackendProviderMockRecorder) CleanupIssuedTokens(arg0, arg1 any) *MockSecretBackendProviderCleanupIssuedTokensCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupIssuedTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupIssuedTokens), arg0, arg1) + return &MockSecretBackendProviderCleanupIssuedTokensCall{Call: call} +} + +// MockSecretBackendProviderCleanupIssuedTokensCall wrap *gomock.Call +type MockSecretBackendProviderCleanupIssuedTokensCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderCleanupIssuedTokensCall) Return(arg0 []string, arg1 error) *MockSecretBackendProviderCleanupIssuedTokensCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderCleanupIssuedTokensCall) Do(f func(*provider.ModelBackendConfig, []string) ([]string, error)) *MockSecretBackendProviderCleanupIssuedTokensCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderCleanupIssuedTokensCall) DoAndReturn(f func(*provider.ModelBackendConfig, []string) ([]string, error)) *MockSecretBackendProviderCleanupIssuedTokensCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CleanupModel mocks base method. +func (m *MockSecretBackendProvider) CleanupModel(arg0 *provider.ModelBackendConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupModel", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// CleanupModel indicates an expected call of CleanupModel. +func (mr *MockSecretBackendProviderMockRecorder) CleanupModel(arg0 any) *MockSecretBackendProviderCleanupModelCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupModel", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupModel), arg0) + return &MockSecretBackendProviderCleanupModelCall{Call: call} +} + +// MockSecretBackendProviderCleanupModelCall wrap *gomock.Call +type MockSecretBackendProviderCleanupModelCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderCleanupModelCall) Return(arg0 error) *MockSecretBackendProviderCleanupModelCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderCleanupModelCall) Do(f func(*provider.ModelBackendConfig) error) *MockSecretBackendProviderCleanupModelCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderCleanupModelCall) DoAndReturn(f func(*provider.ModelBackendConfig) error) *MockSecretBackendProviderCleanupModelCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CleanupSecrets mocks base method. +func (m *MockSecretBackendProvider) CleanupSecrets(arg0 *provider.ModelBackendConfig, arg1 names.Tag, arg2 provider.SecretRevisions) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupSecrets", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// CleanupSecrets indicates an expected call of CleanupSecrets. +func (mr *MockSecretBackendProviderMockRecorder) CleanupSecrets(arg0, arg1, arg2 any) *MockSecretBackendProviderCleanupSecretsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupSecrets", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupSecrets), arg0, arg1, arg2) + return &MockSecretBackendProviderCleanupSecretsCall{Call: call} +} + +// MockSecretBackendProviderCleanupSecretsCall wrap *gomock.Call +type MockSecretBackendProviderCleanupSecretsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderCleanupSecretsCall) Return(arg0 error) *MockSecretBackendProviderCleanupSecretsCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderCleanupSecretsCall) Do(f func(*provider.ModelBackendConfig, names.Tag, provider.SecretRevisions) error) *MockSecretBackendProviderCleanupSecretsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderCleanupSecretsCall) DoAndReturn(f func(*provider.ModelBackendConfig, names.Tag, provider.SecretRevisions) error) *MockSecretBackendProviderCleanupSecretsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Initialise mocks base method. +func (m *MockSecretBackendProvider) Initialise(arg0 *provider.ModelBackendConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Initialise", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Initialise indicates an expected call of Initialise. +func (mr *MockSecretBackendProviderMockRecorder) Initialise(arg0 any) *MockSecretBackendProviderInitialiseCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialise", reflect.TypeOf((*MockSecretBackendProvider)(nil).Initialise), arg0) + return &MockSecretBackendProviderInitialiseCall{Call: call} +} + +// MockSecretBackendProviderInitialiseCall wrap *gomock.Call +type MockSecretBackendProviderInitialiseCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderInitialiseCall) Return(arg0 error) *MockSecretBackendProviderInitialiseCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderInitialiseCall) Do(f func(*provider.ModelBackendConfig) error) *MockSecretBackendProviderInitialiseCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderInitialiseCall) DoAndReturn(f func(*provider.ModelBackendConfig) error) *MockSecretBackendProviderInitialiseCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// IssuesTokens mocks base method. +func (m *MockSecretBackendProvider) IssuesTokens() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IssuesTokens") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IssuesTokens indicates an expected call of IssuesTokens. +func (mr *MockSecretBackendProviderMockRecorder) IssuesTokens() *MockSecretBackendProviderIssuesTokensCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssuesTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).IssuesTokens)) + return &MockSecretBackendProviderIssuesTokensCall{Call: call} +} + +// MockSecretBackendProviderIssuesTokensCall wrap *gomock.Call +type MockSecretBackendProviderIssuesTokensCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderIssuesTokensCall) Return(arg0 bool) *MockSecretBackendProviderIssuesTokensCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderIssuesTokensCall) Do(f func() bool) *MockSecretBackendProviderIssuesTokensCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderIssuesTokensCall) DoAndReturn(f func() bool) *MockSecretBackendProviderIssuesTokensCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// NewBackend mocks base method. +func (m *MockSecretBackendProvider) NewBackend(arg0 *provider.ModelBackendConfig) (provider.SecretsBackend, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBackend", arg0) + ret0, _ := ret[0].(provider.SecretsBackend) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewBackend indicates an expected call of NewBackend. +func (mr *MockSecretBackendProviderMockRecorder) NewBackend(arg0 any) *MockSecretBackendProviderNewBackendCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBackend", reflect.TypeOf((*MockSecretBackendProvider)(nil).NewBackend), arg0) + return &MockSecretBackendProviderNewBackendCall{Call: call} +} + +// MockSecretBackendProviderNewBackendCall wrap *gomock.Call +type MockSecretBackendProviderNewBackendCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderNewBackendCall) Return(arg0 provider.SecretsBackend, arg1 error) *MockSecretBackendProviderNewBackendCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderNewBackendCall) Do(f func(*provider.ModelBackendConfig) (provider.SecretsBackend, error)) *MockSecretBackendProviderNewBackendCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderNewBackendCall) DoAndReturn(f func(*provider.ModelBackendConfig) (provider.SecretsBackend, error)) *MockSecretBackendProviderNewBackendCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// RestrictedConfig mocks base method. +func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 string, arg4 names.Tag, arg5 []string, arg6, arg7 provider.SecretRevisions) (*provider.BackendConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(*provider.BackendConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestrictedConfig indicates an expected call of RestrictedConfig. +func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *MockSecretBackendProviderRestrictedConfigCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return &MockSecretBackendProviderRestrictedConfigCall{Call: call} +} + +// MockSecretBackendProviderRestrictedConfigCall wrap *gomock.Call +type MockSecretBackendProviderRestrictedConfigCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderRestrictedConfigCall) Return(arg0 *provider.BackendConfig, arg1 error) *MockSecretBackendProviderRestrictedConfigCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderRestrictedConfigCall) Do(f func(*provider.ModelBackendConfig, bool, bool, string, names.Tag, []string, provider.SecretRevisions, provider.SecretRevisions) (*provider.BackendConfig, error)) *MockSecretBackendProviderRestrictedConfigCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderRestrictedConfigCall) DoAndReturn(f func(*provider.ModelBackendConfig, bool, bool, string, names.Tag, []string, provider.SecretRevisions, provider.SecretRevisions) (*provider.BackendConfig, error)) *MockSecretBackendProviderRestrictedConfigCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Type mocks base method. +func (m *MockSecretBackendProvider) Type() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Type") + ret0, _ := ret[0].(string) + return ret0 +} + +// Type indicates an expected call of Type. +func (mr *MockSecretBackendProviderMockRecorder) Type() *MockSecretBackendProviderTypeCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Type", reflect.TypeOf((*MockSecretBackendProvider)(nil).Type)) + return &MockSecretBackendProviderTypeCall{Call: call} +} + +// MockSecretBackendProviderTypeCall wrap *gomock.Call +type MockSecretBackendProviderTypeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockSecretBackendProviderTypeCall) Return(arg0 string) *MockSecretBackendProviderTypeCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockSecretBackendProviderTypeCall) Do(f func() string) *MockSecretBackendProviderTypeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockSecretBackendProviderTypeCall) DoAndReturn(f func() string) *MockSecretBackendProviderTypeCall { + c.Call = c.Call.DoAndReturn(f) + return c +}
apiserver/facades/controller/undertaker/state.go+16 −0 modified@@ -41,6 +41,12 @@ type State interface { // ControllerUUID returns the UUID of the controller. ControllerUUID() string + + ListSecretBackendIssuedTokenUntil( + until time.Time, + ) ([]state.SecretBackendIssuedToken, error) + + RemoveSecretBackendIssuedTokens(uuids []string) error } type stateShim struct { @@ -51,6 +57,16 @@ func (s *stateShim) Model() (Model, error) { return s.State.Model() } +func (s *stateShim) ListSecretBackendIssuedTokenUntil( + until time.Time, +) ([]state.SecretBackendIssuedToken, error) { + return state.NewSecrets(s.State).ListSecretBackendIssuedTokenUntil(until) +} + +func (s *stateShim) RemoveSecretBackendIssuedTokens(uuids []string) error { + return state.NewSecrets(s.State).RemoveSecretBackendIssuedTokens(uuids) +} + // Model defines the needed methods of state.Model for // the work of the undertaker API. type Model interface {
apiserver/facades/controller/undertaker/undertaker.go+48 −0 modified@@ -4,6 +4,8 @@ package undertaker import ( + "time" + "github.com/juju/errors" "github.com/juju/names/v5" @@ -110,6 +112,34 @@ func (u *UndertakerAPI) RemoveModelSecrets() error { if err != nil { return errors.Annotate(err, "getting secrets backends config") } + + issuedTokens, err := u.st.ListSecretBackendIssuedTokenUntil(time.Now()) + if err != nil { + return errors.Annotate(err, "getting secrets issued tokens") + } + if len(issuedTokens) > 0 { + issuedTokensToBackend := map[string][]string{} + for _, ik := range issuedTokens { + b := issuedTokensToBackend[ik.BackendID] + b = append(b, ik.UUID) + issuedTokensToBackend[ik.BackendID] = b + } + for backendID, issuedTokenUUIDs := range issuedTokensToBackend { + cfg, ok := secretBackendCfg.Configs[backendID] + if !ok { + err := u.st.RemoveSecretBackendIssuedTokens(issuedTokenUUIDs) + if err != nil { + return errors.Trace(err) + } + continue + } + err := u.revokeSecretIssuedTokensForBackend(&cfg, issuedTokenUUIDs) + if err != nil { + return errors.Trace(err) + } + } + } + for _, cfg := range secretBackendCfg.Configs { if err := u.removeModelSecretsForBackend(&cfg); err != nil { return errors.Annotatef(err, "cleaning model from inactive secrets provider %q", cfg.BackendType) @@ -126,6 +156,24 @@ func (u *UndertakerAPI) removeModelSecretsForBackend(cfg *provider.ModelBackendC return p.CleanupModel(cfg) } +func (u *UndertakerAPI) revokeSecretIssuedTokensForBackend( + cfg *provider.ModelBackendConfig, + issuedTokenUUIDs []string, +) error { + p, err := GetProvider(cfg.BackendType) + if err != nil { + return errors.Trace(err) + } + removed, cleanupErr := p.CleanupIssuedTokens(cfg, issuedTokenUUIDs) + if len(removed) > 0 { + err := u.st.RemoveSecretBackendIssuedTokens(removed) + if err != nil { + return errors.Trace(err) + } + } + return errors.Trace(cleanupErr) +} + func (u *UndertakerAPI) modelEntitiesWatcher() params.NotifyWatchResult { var nothing params.NotifyWatchResult watch := u.st.WatchModelEntityReferences(u.st.ModelUUID())
apiserver/facades/controller/undertaker/undertaker_test.go+77 −19 modified@@ -9,6 +9,7 @@ import ( "github.com/juju/errors" "github.com/juju/names/v5" jc "github.com/juju/testing/checkers" + "go.uber.org/mock/gomock" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/facades/controller/undertaker" @@ -25,12 +26,12 @@ import ( type undertakerSuite struct { coretesting.BaseSuite - secrets *mockSecrets + secrets *MockSecretBackendProvider } var _ = gc.Suite(&undertakerSuite{}) -func (s *undertakerSuite) setupStateAndAPI(c *gc.C, isSystem bool, modelName string, secretsConfigError error) (*mockState, *undertaker.UndertakerAPI) { +func (s *undertakerSuite) setupStateAndAPI(c *gc.C, ctrl *gomock.Controller, isSystem bool, modelName string, secretsConfigError error) (*mockState, *undertaker.UndertakerAPI) { machineNo := "1" if isSystem { machineNo = "0" @@ -43,8 +44,8 @@ func (s *undertakerSuite) setupStateAndAPI(c *gc.C, isSystem bool, modelName str modelCfg, err := config.New(config.NoDefaults, coretesting.FakeConfig()) c.Assert(err, gc.IsNil) - st := newMockState(names.NewUserTag("admin"), modelName, isSystem, *modelCfg) - s.secrets = &mockSecrets{} + st := newMockState(ctrl, names.NewUserTag("admin"), modelName, isSystem, *modelCfg) + s.secrets = NewMockSecretBackendProvider(ctrl) s.PatchValue(&undertaker.GetProvider, func(string) (provider.SecretBackendProvider, error) { return s.secrets, nil }) secretBackendConfigGetter := func() (*provider.ModelBackendConfigInfo, error) { @@ -67,14 +68,17 @@ func (s *undertakerSuite) setupStateAndAPI(c *gc.C, isSystem bool, modelName str } func (s *undertakerSuite) TestNoPerms(c *gc.C) { + ctrl := gomock.NewController(c) + defer ctrl.Finish() + modelCfg, err := config.New(config.NoDefaults, coretesting.FakeConfig()) c.Assert(err, gc.IsNil) for _, authorizer := range []apiservertesting.FakeAuthorizer{{ Tag: names.NewMachineTag("0"), }, { Tag: names.NewUserTag("bob"), }} { - st := newMockState(names.NewUserTag("admin"), "admin", true, *modelCfg) + st := newMockState(ctrl, names.NewUserTag("admin"), "admin", true, *modelCfg) _, err := undertaker.NewUndertaker( st, nil, @@ -89,8 +93,11 @@ func (s *undertakerSuite) TestNoPerms(c *gc.C) { } func (s *undertakerSuite) TestModelInfo(c *gc.C) { - otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedmodel", nil) - st, api := s.setupStateAndAPI(c, true, "admin", nil) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + otherSt, hostedAPI := s.setupStateAndAPI(c, ctrl, false, "hostedmodel", nil) + st, api := s.setupStateAndAPI(c, ctrl, true, "admin", nil) for _, test := range []struct { st *mockState api *undertaker.UndertakerAPI @@ -125,7 +132,10 @@ func (s *undertakerSuite) TestModelInfo(c *gc.C) { } func (s *undertakerSuite) TestProcessDyingModel(c *gc.C) { - otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedmodel", nil) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + otherSt, hostedAPI := s.setupStateAndAPI(c, ctrl, false, "hostedmodel", nil) model, err := otherSt.Model() c.Assert(err, jc.ErrorIsNil) @@ -140,7 +150,10 @@ func (s *undertakerSuite) TestProcessDyingModel(c *gc.C) { } func (s *undertakerSuite) TestRemoveAliveModel(c *gc.C) { - otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedmodel", nil) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + otherSt, hostedAPI := s.setupStateAndAPI(c, ctrl, false, "hostedmodel", nil) _, err := otherSt.Model() c.Assert(err, jc.ErrorIsNil) @@ -149,7 +162,10 @@ func (s *undertakerSuite) TestRemoveAliveModel(c *gc.C) { } func (s *undertakerSuite) TestRemoveDyingModel(c *gc.C) { - otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedmodel", nil) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + otherSt, hostedAPI := s.setupStateAndAPI(c, ctrl, false, "hostedmodel", nil) // Set model to dying otherSt.model.life = state.Dying @@ -158,7 +174,10 @@ func (s *undertakerSuite) TestRemoveDyingModel(c *gc.C) { } func (s *undertakerSuite) TestDeadRemoveModel(c *gc.C) { - otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedmodel", nil) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + otherSt, hostedAPI := s.setupStateAndAPI(c, ctrl, false, "hostedmodel", nil) // Set model to dead otherSt.model.life = state.Dying @@ -172,31 +191,64 @@ func (s *undertakerSuite) TestDeadRemoveModel(c *gc.C) { } func (s *undertakerSuite) TestRemoveModelSecrets(c *gc.C) { - otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedmodel", nil) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + st, hostedAPI := s.setupStateAndAPI(c, ctrl, false, "hostedmodel", nil) + + cleanedUUID := "" + s.secrets.EXPECT().CleanupModel(gomock.Any()).DoAndReturn(func(cfg *provider.ModelBackendConfig) error { + if cfg.BackendType != "some-backend" { + return errors.New("unknown backend " + cfg.BackendType) + } + cleanedUUID = cfg.ModelUUID + return nil + }) + + tokenUUIDs := []string{"uuid-one", "uuid-two"} + tokens := []state.SecretBackendIssuedToken{{ + UUID: tokenUUIDs[0], + BackendID: "backend-id", + }, { + UUID: tokenUUIDs[1], + BackendID: "backend-id", + }} + st.EXPECT().ListSecretBackendIssuedTokenUntil( + gomock.Any()).Return(tokens, nil) + s.secrets.EXPECT().CleanupIssuedTokens( + gomock.Any(), tokenUUIDs).Return(tokenUUIDs, nil) + st.EXPECT().RemoveSecretBackendIssuedTokens(tokenUUIDs).Return(nil) err := hostedAPI.RemoveModelSecrets() c.Assert(err, jc.ErrorIsNil) - c.Assert(s.secrets.cleanedUUID, gc.Equals, otherSt.model.uuid) + c.Assert(cleanedUUID, gc.Equals, st.model.uuid) } func (s *undertakerSuite) TestRemoveModelSecretsConfigNotFound(c *gc.C) { - _, hostedAPI := s.setupStateAndAPI(c, false, "hostedmodel", errors.NotFound) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + _, hostedAPI := s.setupStateAndAPI(c, ctrl, false, "hostedmodel", errors.NotFound) err := hostedAPI.RemoveModelSecrets() c.Assert(err, jc.ErrorIsNil) - c.Assert(s.secrets.cleanedUUID, gc.Equals, "") } func (s *undertakerSuite) TestModelConfig(c *gc.C) { - _, hostedAPI := s.setupStateAndAPI(c, false, "hostedmodel", nil) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + _, hostedAPI := s.setupStateAndAPI(c, ctrl, false, "hostedmodel", nil) cfg, err := hostedAPI.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(cfg, gc.NotNil) } func (s *undertakerSuite) TestSetStatus(c *gc.C) { - mock, hostedAPI := s.setupStateAndAPI(c, false, "hostedmodel", nil) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + mock, hostedAPI := s.setupStateAndAPI(c, ctrl, false, "hostedmodel", nil) results, err := hostedAPI.SetStatus(params.SetStatus{ Entities: []params.EntityStatusArgs{{ @@ -213,7 +265,10 @@ func (s *undertakerSuite) TestSetStatus(c *gc.C) { } func (s *undertakerSuite) TestSetStatusControllerPermissions(c *gc.C) { - _, hostedAPI := s.setupStateAndAPI(c, true, "hostedmodel", nil) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + _, hostedAPI := s.setupStateAndAPI(c, ctrl, true, "hostedmodel", nil) results, err := hostedAPI.SetStatus(params.SetStatus{ Entities: []params.EntityStatusArgs{{ "model-6ada782f-bcd4-454b-a6da-d1793fbcb35e", status.Destroying.String(), @@ -226,7 +281,10 @@ func (s *undertakerSuite) TestSetStatusControllerPermissions(c *gc.C) { } func (s *undertakerSuite) TestSetStatusNonControllerPermissions(c *gc.C) { - _, hostedAPI := s.setupStateAndAPI(c, false, "hostedmodel", nil) + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + _, hostedAPI := s.setupStateAndAPI(c, ctrl, false, "hostedmodel", nil) results, err := hostedAPI.SetStatus(params.SetStatus{ Entities: []params.EntityStatusArgs{{ "model-6ada782f-bcd4-454b-a6da-d1793fbcb35e", status.Destroying.String(),
apiserver/facades/controller/usersecretsdrain/register.go+1 −1 modified@@ -50,7 +50,7 @@ func newUserSecretsDrainAPI(context facade.Context) (*SecretsDrainAPI, error) { } secretBackendConfigGetter := func(backendIDs []string, wantAll bool) (*provider.ModelBackendConfigInfo, error) { - return commonsecrets.BackendConfigInfo(commonsecrets.SecretsModel(model), true, backendIDs, wantAll, authTag, leadershipChecker) + return commonsecrets.BackendConfigInfo(commonsecrets.SecretsModel(model), true, backendIDs, wantAll, authTag, leadershipChecker, nil) } secretBackendDrainConfigGetter := func(backendID string) (*provider.ModelBackendConfigInfo, error) { return commonsecrets.DrainBackendConfigInfo(backendID, commonsecrets.SecretsModel(model), authTag, leadershipChecker)
apiserver/facades/controller/usersecrets/mocks/secretsbackend.go+33 −4 modified@@ -123,6 +123,21 @@ func (m *MockSecretBackendProvider) EXPECT() *MockSecretBackendProviderMockRecor return m.recorder } +// CleanupIssuedTokens mocks base method. +func (m *MockSecretBackendProvider) CleanupIssuedTokens(arg0 *provider.ModelBackendConfig, arg1 []string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupIssuedTokens", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CleanupIssuedTokens indicates an expected call of CleanupIssuedTokens. +func (mr *MockSecretBackendProviderMockRecorder) CleanupIssuedTokens(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupIssuedTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).CleanupIssuedTokens), arg0, arg1) +} + // CleanupModel mocks base method. func (m *MockSecretBackendProvider) CleanupModel(arg0 *provider.ModelBackendConfig) error { m.ctrl.T.Helper() @@ -165,6 +180,20 @@ func (mr *MockSecretBackendProviderMockRecorder) Initialise(arg0 any) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialise", reflect.TypeOf((*MockSecretBackendProvider)(nil).Initialise), arg0) } +// IssuesTokens mocks base method. +func (m *MockSecretBackendProvider) IssuesTokens() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IssuesTokens") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IssuesTokens indicates an expected call of IssuesTokens. +func (mr *MockSecretBackendProviderMockRecorder) IssuesTokens() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssuesTokens", reflect.TypeOf((*MockSecretBackendProvider)(nil).IssuesTokens)) +} + // NewBackend mocks base method. func (m *MockSecretBackendProvider) NewBackend(arg0 *provider.ModelBackendConfig) (provider.SecretsBackend, error) { m.ctrl.T.Helper() @@ -181,18 +210,18 @@ func (mr *MockSecretBackendProviderMockRecorder) NewBackend(arg0 any) *gomock.Ca } // RestrictedConfig mocks base method. -func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 names.Tag, arg4, arg5 provider.SecretRevisions) (*provider.BackendConfig, error) { +func (m *MockSecretBackendProvider) RestrictedConfig(arg0 *provider.ModelBackendConfig, arg1, arg2 bool, arg3 string, arg4 names.Tag, arg5 []string, arg6, arg7 provider.SecretRevisions) (*provider.BackendConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "RestrictedConfig", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(*provider.BackendConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // RestrictedConfig indicates an expected call of RestrictedConfig. -func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { +func (mr *MockSecretBackendProviderMockRecorder) RestrictedConfig(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestrictedConfig", reflect.TypeOf((*MockSecretBackendProvider)(nil).RestrictedConfig), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // Type mocks base method.
apiserver/resources_auth_test.go+152 −0 added@@ -0,0 +1,152 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + "encoding/json" + "fmt" + "mime" + "net/http" + "net/url" + "strings" + + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/v3" + gc "gopkg.in/check.v1" + + apitesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/core/permission" + "github.com/juju/juju/core/resources" + "github.com/juju/juju/rpc/params" + "github.com/juju/juju/state" + "github.com/juju/juju/testing/factory" +) + +type resourcesAuthSuite struct { + apiserverBaseSuite +} + +func (s *resourcesAuthSuite) resourcesURL(app, res string) *url.URL { + u := s.URL(fmt.Sprintf("/model/%s/applications/%s/resources/%s", s.Model.UUID(), app, res), nil) + return u +} + +func (s *resourcesAuthSuite) assertJSONErrorResponse(c *gc.C, resp *http.Response, expCode int, expError string) { + uploadResponse := s.assertResponse(c, resp, expCode) + c.Check(uploadResponse.Error, gc.NotNil) + c.Check(uploadResponse.Error.Message, gc.Matches, expError) +} + +func (s *resourcesAuthSuite) assertPlainErrorResponse(c *gc.C, resp *http.Response, expCode int, expError string) { + body := apitesting.AssertResponse(c, resp, expCode, "text/plain; charset=utf-8") + c.Assert(string(body), gc.Matches, expError+"\n") +} + +func (s *resourcesAuthSuite) assertResponse(c *gc.C, resp *http.Response, expStatus int) params.UploadResult { + body := apitesting.AssertResponse(c, resp, expStatus, params.ContentTypeJSON) + var uploadResult params.UploadResult + err := json.Unmarshal(body, &uploadResult) + c.Assert(err, jc.ErrorIsNil, gc.Commentf("Body: %s", body)) + return uploadResult +} + +var _ = gc.Suite(&resourcesAuthSuite{}) + +func (s *resourcesAuthSuite) TestResourcesUploadedSecurely(c *gc.C) { + url := s.resourcesURL("tomcat", "jdk") + url.Scheme = "http" + resp := apitesting.SendHTTPRequest(c, apitesting.HTTPRequestParams{ + Method: "PUT", + URL: url.String(), + ExpectStatus: http.StatusBadRequest, + }) + defer resp.Body.Close() +} + +func (s *resourcesAuthSuite) TestRequiresAuth(c *gc.C) { + resp := apitesting.SendHTTPRequest(c, apitesting.HTTPRequestParams{Method: "GET", URL: s.resourcesURL("tomcat", "jdk").String()}) + defer resp.Body.Close() + s.assertPlainErrorResponse(c, resp, http.StatusUnauthorized, "authentication failed: no credentials provided") +} + +func (s *resourcesAuthSuite) TestAuthRejectsNonsUser(c *gc.C) { + // Add a machine and try to login. + machine, err := s.State.AddMachine(state.UbuntuBase("12.10"), state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + err = machine.SetProvisioned("foo", "", "fake_nonce", nil) + c.Assert(err, jc.ErrorIsNil) + password, err := utils.RandomPassword() + c.Assert(err, jc.ErrorIsNil) + err = machine.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + + resp := apitesting.SendHTTPRequest(c, apitesting.HTTPRequestParams{ + Tag: machine.Tag().String(), + Password: password, + Method: "PUT", + URL: s.resourcesURL("tomcat", "jdk").String(), + Nonce: "fake_nonce", + }) + s.assertPlainErrorResponse( + c, resp, http.StatusForbidden, + "authorization failed: permission denied", + ) + resp.Body.Close() + + // Now try a user login. + content, err := resources.GenerateContent(strings.NewReader("resource")) + c.Assert(err, jc.ErrorIsNil) + filename := mime.BEncoding.Encode("utf-8", "foo.txt") + disp := mime.FormatMediaType( + "form-data", + map[string]string{"filename": filename}, + ) + + resp = s.sendHTTPRequest(c, apitesting.HTTPRequestParams{ + Method: "PUT", + URL: s.resourcesURL("tomcat", "jdk").String(), + ContentType: "application/octet-stream", + ExtraHeaders: map[string]string{ + "Content-Sha384": content.Fingerprint.String(), + "Content-Length": fmt.Sprintf("%d", content.Size), + "Content-Disposition": disp, + }, + Body: strings.NewReader("fake_nonce"), + }) + s.assertJSONErrorResponse(c, resp, http.StatusNotFound, `application "tomcat" not found`) + resp.Body.Close() +} + +func (s *resourcesAuthSuite) TestUploadAuthRejectsUserWithoutPermission(c *gc.C) { + s.Factory.MakeUser(c, &factory.UserParams{ + Name: "oryx", + Password: "gardener", + Access: permission.ReadAccess, + }) + s.assertAuthRejectsUserWithoutPermission(c, "PUT") +} + +func (s *resourcesAuthSuite) TestDownloadAuthRejectsUserWithoutPermission(c *gc.C) { + s.Factory.MakeUser(c, &factory.UserParams{ + Name: "oryx", + Password: "gardener", + NoModelUser: true, + }) + s.assertAuthRejectsUserWithoutPermission(c, "GET") +} + +func (s *resourcesAuthSuite) assertAuthRejectsUserWithoutPermission(c *gc.C, method string) { + + resp := apitesting.SendHTTPRequest(c, apitesting.HTTPRequestParams{ + Tag: "user-oryx", + Password: "gardener", + Method: method, + URL: s.resourcesURL("tomcat", "jdk").String(), + }) + defer resp.Body.Close() + s.assertPlainErrorResponse( + c, resp, http.StatusForbidden, + "authorization failed: permission denied", + ) +}
apiserver/resources.go+43 −18 modified@@ -40,16 +40,22 @@ type ResourcesBackend interface { UpdatePendingResource(applicationID, pendingID, userID string, res charmresource.Resource, r io.Reader) (resources.Resource, error) } -// ResourcesHandler is the HTTP handler for client downloads and +// ResourcesUploadHandler is the HTTP handler for client // uploads of resources. -type ResourcesHandler struct { - StateAuthFunc func(*http.Request, ...string) (ResourcesBackend, state.PoolHelper, names.Tag, error) +type ResourcesUploadHandler struct { ChangeAllowedFunc func(*http.Request) error + StateFunc func(*http.Request) (ResourcesBackend, state.PoolHelper, names.Tag, error) +} + +// ResourcesDownloadHandler is the HTTP handler for client +// downloads of resources. +type ResourcesDownloadHandler struct { + StateFunc func(*http.Request) (ResourcesBackend, state.PoolHelper, names.Tag, error) } // ServeHTTP implements http.Handler. -func (h *ResourcesHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { - backend, poolhelper, tag, err := h.StateAuthFunc(req, names.UserTagKind, names.MachineTagKind, names.ControllerAgentTagKind, names.ApplicationTagKind) +func (h *ResourcesDownloadHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + backend, poolhelper, _, err := h.StateFunc(req) if err != nil { if err := sendError(resp, err); err != nil { logger.Errorf("%v", err) @@ -75,6 +81,36 @@ func (h *ResourcesHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request if _, err := io.Copy(resp, reader); err != nil { logger.Errorf("resource download failed: %v", err) } + default: + if err := sendError(resp, errors.MethodNotAllowedf("unsupported method: %q", req.Method)); err != nil { + logger.Errorf("%v", err) + } + } +} + +func (h *ResourcesDownloadHandler) download(backend ResourcesBackend, req *http.Request) (io.ReadCloser, int64, error) { + defer req.Body.Close() + + query := req.URL.Query() + application := query.Get(":application") + name := query.Get(":resource") + + resource, reader, err := backend.OpenResource(application, name) + return reader, resource.Size, errors.Trace(err) +} + +// ServeHTTP implements http.Handler. +func (h *ResourcesUploadHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + backend, closer, tag, err := h.StateFunc(req) + if err != nil { + if err := sendError(resp, err); err != nil { + logger.Errorf("%v", err) + } + return + } + defer closer.Release() + + switch req.Method { case "PUT": if err := h.ChangeAllowedFunc(req); err != nil { if err := sendError(resp, err); err != nil { @@ -99,18 +135,7 @@ func (h *ResourcesHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request } } -func (h *ResourcesHandler) download(backend ResourcesBackend, req *http.Request) (io.ReadCloser, int64, error) { - defer req.Body.Close() - - query := req.URL.Query() - application := query.Get(":application") - name := query.Get(":resource") - - resource, reader, err := backend.OpenResource(application, name) - return reader, resource.Size, errors.Trace(err) -} - -func (h *ResourcesHandler) upload(backend ResourcesBackend, req *http.Request, username string) (*params.UploadResult, error) { +func (h *ResourcesUploadHandler) upload(backend ResourcesBackend, req *http.Request, username string) (*params.UploadResult, error) { defer req.Body.Close() uploaded, err := h.readResource(backend, req) @@ -153,7 +178,7 @@ type uploadedResource struct { } // readResource extracts the relevant info from the request. -func (h *ResourcesHandler) readResource(backend ResourcesBackend, req *http.Request) (*uploadedResource, error) { +func (h *ResourcesUploadHandler) readResource(backend ResourcesBackend, req *http.Request) (*uploadedResource, error) { uReq, err := extractUploadRequest(req) if err != nil { return nil, errors.Trace(err)
apiserver/resources_test.go+43 −40 modified@@ -14,7 +14,6 @@ import ( "time" charmresource "github.com/juju/charm/v12/resource" - "github.com/juju/collections/set" "github.com/juju/errors" "github.com/juju/names/v5" "github.com/juju/testing" @@ -34,12 +33,13 @@ import ( type ResourcesHandlerSuite struct { testing.IsolationSuite - stateAuthErr error - backend *fakeBackend - username string - req *http.Request - recorder *httptest.ResponseRecorder - handler *apiserver.ResourcesHandler + stateAuthErr error + backend *fakeBackend + username string + req *http.Request + recorder *httptest.ResponseRecorder + uploadHandler *apiserver.ResourcesUploadHandler + downloadHandler *apiserver.ResourcesDownloadHandler } var _ = gc.Suite(&ResourcesHandlerSuite{}) @@ -58,13 +58,16 @@ func (s *ResourcesHandlerSuite) SetUpTest(c *gc.C) { c.Assert(err, jc.ErrorIsNil) s.req = req s.recorder = httptest.NewRecorder() - s.handler = &apiserver.ResourcesHandler{ - StateAuthFunc: s.authState, + s.uploadHandler = &apiserver.ResourcesUploadHandler{ + StateFunc: s.stateFunc, ChangeAllowedFunc: func(*http.Request) error { return nil }, } + s.downloadHandler = &apiserver.ResourcesDownloadHandler{ + StateFunc: s.stateFunc, + } } -func (s *ResourcesHandlerSuite) authState(req *http.Request, tagKinds ...string) ( +func (s *ResourcesHandlerSuite) stateFunc(req *http.Request) ( apiserver.ResourcesBackend, state.PoolHelper, names.Tag, error, ) { if s.stateAuthErr != nil { @@ -76,45 +79,45 @@ func (s *ResourcesHandlerSuite) authState(req *http.Request, tagKinds ...string) return s.backend, ph, tag, nil } -func (s *ResourcesHandlerSuite) TestExpectedAuthTags(c *gc.C) { - expectedTags := set.NewStrings(names.UserTagKind, names.MachineTagKind, names.ControllerAgentTagKind, names.ApplicationTagKind) - - s.handler.StateAuthFunc = func(req *http.Request, tagKinds ...string) (apiserver.ResourcesBackend, state.PoolHelper, names.Tag, error) { - gotTags := set.NewStrings(tagKinds...) - if gotTags.Difference(expectedTags).Size() != 0 || expectedTags.Difference(gotTags).Size() != 0 { - c.Fatalf("unexpected tag kinds %v", tagKinds) - return nil, nil, nil, errors.NotValidf("tag kinds %v", tagKinds) - } - ph := apiservertesting.StubPoolHelper{StubRelease: func() bool { return false }} - tag := names.NewUserTag(s.username) - return s.backend, ph, tag, nil - } - s.req.Method = "GET" - s.handler.ServeHTTP(s.recorder, s.req) - s.checkResp(c, http.StatusOK, "application/octet-stream", resourceBody) +func (s *ResourcesHandlerSuite) TestDownloadStateAuthFailure(c *gc.C) { + failure, expected := apiFailure("<failure>", "") + s.stateAuthErr = failure + + s.downloadHandler.ServeHTTP(s.recorder, s.req) + + s.checkResp(c, http.StatusInternalServerError, "application/json", expected) } -func (s *ResourcesHandlerSuite) TestStateAuthFailure(c *gc.C) { +func (s *ResourcesHandlerSuite) TestUploadStateAuthFailure(c *gc.C) { failure, expected := apiFailure("<failure>", "") s.stateAuthErr = failure - s.handler.ServeHTTP(s.recorder, s.req) + s.uploadHandler.ServeHTTP(s.recorder, s.req) s.checkResp(c, http.StatusInternalServerError, "application/json", expected) } -func (s *ResourcesHandlerSuite) TestUnsupportedMethod(c *gc.C) { - s.req.Method = "POST" +func (s *ResourcesHandlerSuite) TestDownloadUnsupportedMethod(c *gc.C) { + s.req.Method = "PUT" + + s.downloadHandler.ServeHTTP(s.recorder, s.req) + + _, expected := apiFailure(`unsupported method: "PUT"`, params.CodeMethodNotAllowed) + s.checkResp(c, http.StatusMethodNotAllowed, "application/json", expected) +} + +func (s *ResourcesHandlerSuite) TestUploadUnsupportedMethod(c *gc.C) { + s.req.Method = "GET" - s.handler.ServeHTTP(s.recorder, s.req) + s.uploadHandler.ServeHTTP(s.recorder, s.req) - _, expected := apiFailure(`unsupported method: "POST"`, params.CodeMethodNotAllowed) + _, expected := apiFailure(`unsupported method: "GET"`, params.CodeMethodNotAllowed) s.checkResp(c, http.StatusMethodNotAllowed, "application/json", expected) } func (s *ResourcesHandlerSuite) TestGetSuccess(c *gc.C) { s.req.Method = "GET" - s.handler.ServeHTTP(s.recorder, s.req) + s.downloadHandler.ServeHTTP(s.recorder, s.req) s.checkResp(c, http.StatusOK, "application/octet-stream", resourceBody) } @@ -126,7 +129,7 @@ func (s *ResourcesHandlerSuite) TestPutSuccess(c *gc.C) { s.backend.ReturnSetResource = res req, _ := newUploadRequest(c, "spam", "a-application", uploadContent) - s.handler.ServeHTTP(s.recorder, req) + s.uploadHandler.ServeHTTP(s.recorder, req) expected := mustMarshalJSON(¶ms.UploadResult{ Resource: api.Resource2API(res), @@ -142,12 +145,12 @@ func (s *ResourcesHandlerSuite) TestPutChangeBlocked(c *gc.C) { s.backend.ReturnSetResource = res expectedError := apiservererrors.OperationBlockedError("test block") - s.handler.ChangeAllowedFunc = func(*http.Request) error { + s.uploadHandler.ChangeAllowedFunc = func(*http.Request) error { return expectedError } req, _ := newUploadRequest(c, "spam", "a-application", uploadContent) - s.handler.ServeHTTP(s.recorder, req) + s.uploadHandler.ServeHTTP(s.recorder, req) expected := mustMarshalJSON(¶ms.ErrorResult{apiservererrors.ServerError(expectedError)}) s.checkResp(c, http.StatusBadRequest, "application/json", string(expected)) @@ -161,7 +164,7 @@ func (s *ResourcesHandlerSuite) TestPutSuccessDockerResource(c *gc.C) { s.backend.ReturnSetResource = res req, _ := newUploadRequest(c, "spam", "a-application", uploadContent) - s.handler.ServeHTTP(s.recorder, req) + s.uploadHandler.ServeHTTP(s.recorder, req) expected := mustMarshalJSON(¶ms.UploadResult{ Resource: api.Resource2API(res), @@ -180,7 +183,7 @@ func (s *ResourcesHandlerSuite) TestPutExtensionMismatch(c *gc.C) { req, _ := newUploadRequest(c, "spam", "a-application", content) req.Header.Set("Content-Disposition", "form-data; filename=different.ext") - s.handler.ServeHTTP(s.recorder, req) + s.uploadHandler.ServeHTTP(s.recorder, req) _, expected := apiFailure(`incorrect extension on resource upload "different.ext", expected ".tgz"`, "") @@ -198,7 +201,7 @@ func (s *ResourcesHandlerSuite) TestPutWithPending(c *gc.C) { req, _ := newUploadRequest(c, "spam", "a-application", content) req.URL.RawQuery += "&pendingid=some-unique-id" - s.handler.ServeHTTP(s.recorder, req) + s.uploadHandler.ServeHTTP(s.recorder, req) expected := mustMarshalJSON(¶ms.UploadResult{ Resource: api.Resource2API(res), @@ -214,7 +217,7 @@ func (s *ResourcesHandlerSuite) TestPutSetResourceFailure(c *gc.C) { s.backend.SetResourceErr = failure req, _ := newUploadRequest(c, "spam", "a-application", content) - s.handler.ServeHTTP(s.recorder, req) + s.uploadHandler.ServeHTTP(s.recorder, req) s.checkResp(c, http.StatusInternalServerError, "application/json", expected) }
apiserver/restrict_caasmodel.go+5 −4 modified@@ -60,14 +60,15 @@ var commonModelFacadeNames = set.NewStrings( "RemoteRelations", "Resumer", "RetryStrategy", - "Secrets", - "SecretsManager", - "SecretsDrain", - "UserSecretsDrain", "SecretBackendsManager", "SecretBackendsRotateWatcher", + "Secrets", + "SecretsDrain", + "SecretsManager", "SecretsRevisionWatcher", + "SecretsRevoker", "SecretsTriggerWatcher", + "UserSecretsDrain", "UserSecretsManager", "Singular", "StatusHistory",
caas/Dockerfile+2 −2 modified@@ -13,8 +13,8 @@ RUN useradd --uid 171 --gid 171 --no-create-home --shell /usr/bin/bash sjuju RUN mkdir -p /etc/sudoers.d && echo "sjuju ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/sjuju # Some Python dependencies. -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ +RUN DEBIAN_FRONTEND=noninteractive apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ sudo \ python3-yaml \ python3-pip \
cmd/jujud/agent/agenttest/engine.go+2 −2 modified@@ -201,9 +201,9 @@ func AssertManifoldsDependencies(c *gc.C, manifolds dependency.Manifolds, expect names := set.NewStrings(keys(dependencies)...) expectedNames := set.NewStrings(keys(expected)...) // Unexpected names... - c.Assert(names.Difference(expectedNames), gc.DeepEquals, empty) + c.Check(names.Difference(expectedNames), gc.DeepEquals, empty) // Missing names... - c.Assert(expectedNames.Difference(names), gc.DeepEquals, empty) + c.Check(expectedNames.Difference(names), gc.DeepEquals, empty) for _, n := range manifoldNames.SortedValues() { c.Check(dependencies[n], jc.SameContents, expected[n], gc.Commentf("mismatched dependencies for worker %q", n))
cmd/jujud/agent/engine_test.go+2 −0 modified@@ -54,6 +54,7 @@ var ( "undertaker", "unit-assigner", // tertiary dependency: will be inactive because migration workers will be inactive "secrets-pruner", + "secrets-revoker", "user-secrets-drain-worker", } aliveModelWorkers = []string{ @@ -78,6 +79,7 @@ var ( "storage-provisioner", "unit-assigner", "secrets-pruner", + "secrets-revoker", "user-secrets-drain-worker", } migratingModelWorkers = []string{
cmd/jujud/agent/model/manifolds.go+12 −0 modified@@ -63,6 +63,7 @@ import ( "github.com/juju/juju/internal/worker/remoterelations" "github.com/juju/juju/internal/worker/secretsdrainworker" "github.com/juju/juju/internal/worker/secretspruner" + "github.com/juju/juju/internal/worker/secretsrevoker" "github.com/juju/juju/internal/worker/singular" "github.com/juju/juju/internal/worker/statushistorypruner" "github.com/juju/juju/internal/worker/storageprovisioner" @@ -344,6 +345,16 @@ func commonManifolds(config ManifoldsConfig) dependency.Manifolds { NewWorker: secretsdrainworker.NewWorker, NewBackendsClient: secretsdrainworker.NewUserSecretBackendsClient, })), + // secrets revoker worker deals with revoking authentication tokens from + // secret backends used by this model. In 4.0, this could probably be + // replaced with a cleanup job that schedules cleanups in the future. + secretsRevokerName: ifNotMigrating(secretsrevoker.Manifold(secretsrevoker.ManifoldConfig{ + APICallerName: apiCallerName, + Clock: config.Clock, + Logger: config.LoggingContext.GetLogger("juju.worker.secretsrevoker"), + NewSecretsFacade: secretsrevoker.NewSecretsFacade, + NewWorker: secretsrevoker.NewWorker, + })), } return result } @@ -744,6 +755,7 @@ const ( secretsPrunerName = "secrets-pruner" userSecretsDrainWorker = "user-secrets-drain-worker" + secretsRevokerName = "secrets-revoker" validCredentialFlagName = "valid-credential-flag" )
cmd/jujud/agent/model/manifolds_test.go+24 −0 modified@@ -61,6 +61,7 @@ func (s *ManifoldsSuite) TestIAASNames(c *gc.C) { "not-dead-flag", "remote-relations", "secrets-pruner", + "secrets-revoker", "state-cleaner", "status-history-pruner", "storage-provisioner", @@ -112,6 +113,7 @@ func (s *ManifoldsSuite) TestCAASNames(c *gc.C) { "not-dead-flag", "remote-relations", "secrets-pruner", + "secrets-revoker", "state-cleaner", "status-history-pruner", "undertaker", @@ -229,6 +231,17 @@ var expectedCAASModelManifoldsWithDependencies = map[string][]string{ "not-dead-flag", }, + "secrets-revoker": { + "agent", + "api-caller", + "environ-upgrade-gate", + "environ-upgraded-flag", + "is-responsible-flag", + "migration-fortress", + "migration-inactive-flag", + "not-dead-flag", + }, + "user-secrets-drain-worker": { "agent", "api-caller", @@ -474,6 +487,17 @@ var expectedIAASModelManifoldsWithDependencies = map[string][]string{ "not-dead-flag", }, + "secrets-revoker": { + "agent", + "api-caller", + "environ-upgrade-gate", + "environ-upgraded-flag", + "is-responsible-flag", + "migration-fortress", + "migration-inactive-flag", + "not-dead-flag", + }, + "user-secrets-drain-worker": { "agent", "api-caller",
cmd/juju/secrets/list_test.go+22 −8 modified@@ -5,6 +5,8 @@ package secrets_test import ( "fmt" + "slices" + "strings" "github.com/juju/cmd/v3/cmdtesting" jujutesting "github.com/juju/testing" @@ -81,13 +83,19 @@ ID Name Owner Rotation Revision Last updated func (s *ListSuite) TestListYAML(c *gc.C) { defer s.setup(c).Finish() - uri := coresecrets.NewURI() - uri2 := coresecrets.NewURI() - uri3 := coresecrets.NewURI() + // Sorted list for test output stability. + uris := []*coresecrets.URI{ + coresecrets.NewURI(), + coresecrets.NewURI(), + coresecrets.NewURI(), + } + slices.SortFunc(uris, func(a *coresecrets.URI, b *coresecrets.URI) int { + return strings.Compare(a.ID, b.ID) + }) s.secretsAPI.EXPECT().ListSecrets(false, coresecrets.Filter{}).Return( []apisecrets.SecretDetails{{ Metadata: coresecrets.SecretMetadata{ - URI: uri, RotatePolicy: coresecrets.RotateHourly, + URI: uris[0], RotatePolicy: coresecrets.RotateHourly, Version: 1, LatestRevision: 2, Description: "my secret", OwnerTag: "application-mysql", @@ -96,12 +104,12 @@ func (s *ListSuite) TestListYAML(c *gc.C) { Value: coresecrets.NewSecretValue(map[string]string{"foo": "YmFy"}), }, { Metadata: coresecrets.SecretMetadata{ - URI: uri2, Version: 1, LatestRevision: 1, OwnerTag: "application-mariadb", + URI: uris[1], Version: 1, LatestRevision: 1, OwnerTag: "application-mariadb", }, Error: "boom", }, { Metadata: coresecrets.SecretMetadata{ - URI: uri3, Version: 1, LatestRevision: 1, + URI: uris[2], Version: 1, LatestRevision: 1, Label: "my-secret", OwnerTag: coretesting.ModelTag.String(), }, }}, nil) @@ -110,7 +118,7 @@ func (s *ListSuite) TestListYAML(c *gc.C) { ctx, err := cmdtesting.RunCommand(c, secrets.NewListCommandForTest(s.store, s.secretsAPI), "--format", "yaml") c.Assert(err, jc.ErrorIsNil) out := cmdtesting.Stdout(ctx) - c.Assert(out, gc.Equals, fmt.Sprintf(` + c.Assert(out, jc.Contains, fmt.Sprintf(` %s: revision: 2 rotation: hourly @@ -119,19 +127,25 @@ func (s *ListSuite) TestListYAML(c *gc.C) { label: foobar created: 0001-01-01T00:00:00Z updated: 0001-01-01T00:00:00Z +`[1:], uris[0].ID)) + + c.Assert(out, jc.Contains, fmt.Sprintf(` %s: revision: 1 owner: mariadb created: 0001-01-01T00:00:00Z updated: 0001-01-01T00:00:00Z error: boom +`[1:], uris[1].ID)) + + c.Assert(out, jc.Contains, fmt.Sprintf(` %s: revision: 1 owner: <model> name: my-secret created: 0001-01-01T00:00:00Z updated: 0001-01-01T00:00:00Z -`[1:], uri.ID, uri2.ID, uri3.ID)) +`[1:], uris[2].ID)) } func (s *ListSuite) TestListJSON(c *gc.C) {
core/secrets/secretbackend.go+6 −0 modified@@ -11,6 +11,12 @@ import ( "github.com/juju/utils/v3" ) +const ( + // IssuedTokenValidity is the expected minimum duration for a token issued + // from a secret backend. + IssuedTokenValidity = 10 * time.Minute +) + // IsInternalSecretBackendID returns true if the supplied backend ID is the internal backend ID. func IsInternalSecretBackendID(backendID string) bool { return utils.IsValidUUIDString(backendID)
core/secrets/secret.go+54 −11 modified@@ -4,9 +4,12 @@ package secrets import ( + "crypto/rand" + "encoding/base32" "fmt" "net/url" "regexp" + "strconv" "strings" "time" @@ -47,16 +50,27 @@ type URI struct { const ( idSnippet = `[0-9a-z]{20}` uuidSnippet = `[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}` + revSnippet = `[0-9]+` // SecretScheme is the URL prefix for a secret. SecretScheme = "secret" ) -var validUUID = regexp.MustCompile(uuidSnippet) +var ( + validUUID = regexp.MustCompile(uuidSnippet) -var secretURIParse = regexp.MustCompile(`^` + - fmt.Sprintf(`((?P<source>%s)/)?(?P<id>%s)`, uuidSnippet, idSnippet) + - `$`) + secretURI = regexp.MustCompile(fmt.Sprintf( + `^((?P<source>%s)/)?(?P<id>%s)$`, uuidSnippet, idSnippet, + )) + secretURISourceIdx = secretURI.SubexpIndex("source") + secretURIIdIdx = secretURI.SubexpIndex("id") + + secretRevision = regexp.MustCompile(fmt.Sprintf( + `^(?P<id>%s)-(?P<rev>%s)$`, idSnippet, revSnippet, + )) + secretRevisionIdIdx = secretRevision.SubexpIndex("id") + secretRevisionRevIdx = secretRevision.SubexpIndex("rev") +) // ParseURI parses the specified string into a URI. func ParseURI(str string) (*URI, error) { @@ -77,15 +91,16 @@ func ParseURI(str string) (*URI, error) { if idStr == "" { idStr = u.Opaque } - valid := secretURIParse.MatchString(idStr) - if !valid { + + matched := secretURI.FindStringSubmatch(idStr) + if len(matched) <= max(secretURIIdIdx, secretURISourceIdx) { return nil, errors.NotValidf("secret URI %q", str) } - sourceUUID := secretURIParse.ReplaceAllString(idStr, "$source") + sourceUUID := matched[secretURISourceIdx] if sourceUUID == "" { sourceUUID = u.Host } - idPart := secretURIParse.ReplaceAllString(idStr, "$id") + idPart := matched[secretURIIdIdx] id, err := xid.FromString(idPart) if err != nil { return nil, errors.NotValidf("secret URI %q", str) @@ -97,10 +112,17 @@ func ParseURI(str string) (*URI, error) { return result, nil } +var xidEncoding = base32.NewEncoding( + "0123456789abcdefghijklmnopqrstuv", +).WithPadding(base32.NoPadding) + // NewURI returns a new secret URI. func NewURI() *URI { + var r [12]byte + _, _ = rand.Read(r[:]) + id := xidEncoding.EncodeToString(r[:]) return &URI{ - ID: xid.New().String(), + ID: id, } } @@ -116,9 +138,30 @@ func (u *URI) IsLocal(sourceUUID string) bool { return u.SourceUUID == "" || u.SourceUUID == sourceUUID } -// Name generates the secret name. +// Name generates the secret revision name. func (u URI) Name(revision int) string { - return fmt.Sprintf("%s-%d", u.ID, revision) + return RevisionName(u.ID, revision) +} + +// RevisionName generates the secret revision name. +func RevisionName(secretID string, revision int) string { + return fmt.Sprintf("%s-%d", secretID, revision) +} + +// ParseRevisionName parses the provided revision name, returning the secret ID +// and the revision number, or an error. +func ParseRevisionName(revisionName string) (string, int, error) { + matched := secretRevision.FindStringSubmatch(revisionName) + if len(matched) <= max(secretRevisionIdIdx, secretRevisionRevIdx) { + return "", 0, errors.NotValidf("secret revision %q", revisionName) + } + id := matched[secretRevisionIdIdx] + rev, err := strconv.Atoi(matched[secretRevisionRevIdx]) + if err != nil { + return "", 0, errors.NotValidf("secret revision %q: %v", + revisionName, err) + } + return id, rev, nil } // String prints the URI as a string.
docs/releasenotes/juju_2.9.x.md+26 −0 modified@@ -13,6 +13,32 @@ myst: ```{note} Juju 2.9 series is LTS ``` +### 🔸 **Juju 2.9.56** +🗓️ 18 Mar 2025 + +This is a security release for Juju 2.9, which includes fixes for several CVEs, +including Mongo Bleed and other vulnerabilities. + +**Important**: upgrading to this release will not update the MongoDB version used +by existing controllers.<br>To benefit from the Mongo Bleed fixes, you will need to +bootstrap a new controller with this release and migrate your models to it. + +🛠️ Fixes: + +#### Mongo Bleed +- fix: [CVE-2025-14847](https://github.com/juju/juju/security/advisories/GHSA-29v7-rr38-wf32) +- fix: mongodb accepts unauthenticated connection https://github.com/juju/juju/security/advisories/GHSA-9j5v-49f8-cpp8 + +#### Other CVEs +- fix: [CVE-2025-68152](https://github.com/juju/juju/security/advisories/GHSA-j6f6-jp3p-53mw) +- fix: [CVE-2025-68153](https://github.com/juju/juju/security/advisories/GHSA-245v-p8fj-vwm2) + +#### Other fixes +- fix: upgrade broken on k8s @wallyworld in https://github.com/juju/juju/issues/21979 +- fix: allow noble charms to be deployed with `--force` by @wallyworld in https://github.com/juju/juju/issues/21562 + +See the full list on the [Github release](https://github.com/juju/juju/releases/tag/v2.9.56). + ### 🔸 **Juju 2.9.53** 🗓️ 11 Dec 2025
docs/releasenotes/juju_3.6.x.md+88 −0 modified@@ -14,6 +14,94 @@ myst: Juju 3.6 series is LTS ``` +## 🔸 **Juju 3.6.19** +🗓️ 19 Mar 2025 + +This is a security and critical bug fix release for Juju 3.6. +Included are fixes for several CVEs, including Mongo Bleed and other vulnerabilities. +In addition, there's 2 critical bug fixes worth highlighting: +1. a fix to handle concurrent secret updates correctly +2. a fix to ensure MongoDB consistency in HA controllers + +We recommend that all users running Juju 3.6 upgrade to this release as soon +as possible to benefit from the security fixes and critical bug fixes. + +**Important**: upgrading to this release will not update the MongoDB version used +by existing controllers.<br>To benefit from the Mongo Bleed fixes, you will need to +bootstrap a new controller with this release and migrate your models to it. + +⚙️ **Features:** + +### Better management of Juju controller API port ingress + +Previously, the bootstrap process created a hardcoded global firewall rule +to open the controller API port `17070` to `0.0.0.0/0`. + +Juju now removes this hard coded rule and instead models the API port as an +asset of the controller charm, which is opened by the controller charm itself. +The result is that the user can now use the normal expose mechanism to limit +access to the API port, for example by specifying CIDR restrictions. + +eg `juju expose -m controller controller --to-cidrs 10.0.0.0/24` + +* feat: limit api port access for the controller using juju expose @adglkh in https://github.com/juju/juju/pull/20682 + +### Support for Ubuntu 26.04 workloads + +Ubuntu 26.04 workloads are now supported for charms with a compatible base. +This means that when deploying a charm with `--base=ubuntu@26.04` will now succeed. + +* chore: add support for ubuntu 26.04 @wallyworld in https://github.com/juju/juju/pull/21875 + +🛠️ **Fixes**: + +### Mongo Bleed +- fix: [CVE-2025-14847](https://github.com/juju/juju/security/advisories/GHSA-29v7-rr38-wf32) +- fix: mongodb accepts unauthenticated connection https://github.com/juju/juju/security/advisories/GHSA-9j5v-49f8-cpp8 + +### Other CVEs +- fix: [CVE-2026-32691](https://github.com/juju/juju/security/advisories/GHSA-gfgr-6hrj-85ww) +- fix: [CVE-2026-32692](https://github.com/juju/juju/security/advisories/GHSA-89x7-5m5m-mcmm) +- fix: [CVE-2026-32694](https://github.com/juju/juju/security/advisories/GHSA-5cj2-rqqf-hx9p) +- fix: [CVE-2026-32693](https://github.com/juju/juju/security/advisories/GHSA-439w-v2p7-pggc) + +### Handle concurrent secret updates correctly +When a secret owner adds a new revision to a secret at precisely the same moment as a secret consumer +is refreshing their secret content, it was possible that the latest secret revision could be considered +as obsolete and deleted. + +- fix: handle concurrent updates when marking obsolete secret revisions @wallyworld in https://github.com/juju/juju/pull/21779 + +### MongoDB consistency in HA controllers +When a Juju controller opens a mongodb connection, it was querying the db version to determine +whether the database supports server side transactions. An error doing this check was ignored +with a fallback to using client side transactions. This could lead to a mismatch in transaction +handling between controllers, and potentially bugs like [intermittent disconnects](https://github.com/juju/juju/issues/21664). + +Juju now always uses server side transactions so the entire pre-flight check is removed. + +- chore: update juju/txn dep for sstxn checking @wallyworld in https://github.com/juju/juju/pull/21939 +- fix: error in critical version preflight check ignored @hpidcock in https://github.com/juju/txn/pull/70 +- fix: do not feed state watcher from txn stash @wallyworld in https://github.com/juju/juju/pull/21885 + +### Handling deleted users showing offers +When displaying application offers with `show-offer`, if there were users who had previously +been granted access to the offer but have since been deleted, the result would include an incomplete +list of users. + +- fix(cmr): handle deleted users in show-offer @iyiguncevik in https://github.com/juju/juju/pull/21763 + +### Other fixes +- fix: upgrade broken on k8s @wallyworld in https://github.com/juju/juju/issues/21979 +- fix(ssh): disable PTY allocation when remote command is provided @kooltuoehias in https://github.com/juju/juju/pull/21716 +- fix: deduplicate DNS in container fallback path @goldberl in https://github.com/juju/juju/pull/21738 +- fix: retry caas provisioning if charm not ready @wallyworld in https://github.com/juju/juju/pull/21759 +- feat: always log offending ops on transaction errors @manadart in https://github.com/juju/juju/pull/21782 +- fix(k8s): delete orphaned StatefulSets before recreating to avoid PVC mismatch @marceloneppel in https://github.com/juju/juju/pull/21786 +- fix: retry writing agent config during migration @SimonRichardson in https://github.com/juju/juju/pull/21821 + +See the full list on the [Github release](https://github.com/juju/juju/releases/tag/v3.6.18). + ## 🔸 **Juju 3.6.14** 🗓️ 28 Jan 2026
environs/manual/sshprovisioner/provisioner_test.go+2 −2 modified@@ -9,7 +9,6 @@ import ( "os" jc "github.com/juju/testing/checkers" - "github.com/juju/utils/v3/shell" "github.com/juju/version/v2" gc "gopkg.in/check.v1" @@ -23,6 +22,7 @@ import ( "github.com/juju/juju/environs/manual/sshprovisioner" envtesting "github.com/juju/juju/environs/testing" envtools "github.com/juju/juju/environs/tools" + "github.com/juju/juju/internal/provider/common" "github.com/juju/juju/juju/testing" "github.com/juju/juju/rpc/params" jujuversion "github.com/juju/juju/version" @@ -193,6 +193,6 @@ func (s *provisionerSuite) TestProvisioningScript(c *gc.C) { c.Assert(err, jc.ErrorIsNil) removeLogFile := "rm -f '/var/log/cloud-init-output.log'\n" - expectedScript := removeLogFile + shell.DumpFileOnErrorScript("/var/log/cloud-init-output.log") + provisioningScript + expectedScript := removeLogFile + common.DumpOnErrorScript("/var/log/cloud-init-output.log") + provisioningScript c.Assert(script, gc.Equals, expectedScript) }
environs/manual/sshprovisioner/sshprovisioner.go+2 −2 modified@@ -13,7 +13,6 @@ import ( "github.com/juju/errors" "github.com/juju/utils/v3" - "github.com/juju/utils/v3/shell" "github.com/juju/utils/v3/ssh" "github.com/juju/juju/cloudconfig" @@ -25,6 +24,7 @@ import ( "github.com/juju/juju/core/instance" "github.com/juju/juju/core/model" "github.com/juju/juju/environs/manual" + "github.com/juju/juju/internal/provider/common" "github.com/juju/juju/rpc/params" "github.com/juju/juju/service" ) @@ -334,7 +334,7 @@ func ProvisioningScript(icfg *instancecfg.InstanceConfig) (string, error) { // Always remove the cloud-init-output.log file first, if it exists. fmt.Fprintf(&buf, "rm -f %s\n", utils.ShQuote(icfg.CloudInitOutputLog)) // If something goes wrong, dump cloud-init-output.log to stderr. - buf.WriteString(shell.DumpFileOnErrorScript(icfg.CloudInitOutputLog)) + buf.WriteString(common.DumpOnErrorScript(icfg.CloudInitOutputLog)) buf.WriteString(configScript) return buf.String(), nil }
.github/microk8s-launch-config-mirror.yaml+15 −0 added@@ -0,0 +1,15 @@ +--- +version: 0.1.0 +extraKubeletArgs: + --cluster-domain: cluster.local + --cluster-dns: 10.152.183.10 +addons: + - name: dns +containerdRegistryConfigs: + docker.io: | + [host."${DOCKERHUB_MIRROR}"] + capabilities = ["pull", "resolve"] + 10.152.183.69: | + [host."https://10.152.183.69:443"] + capabilities = ["pull", "resolve", "push"] + skip_verify = true
.github/microk8s-launch-config.yaml+12 −0 added@@ -0,0 +1,12 @@ +--- +version: 0.1.0 +extraKubeletArgs: + --cluster-domain: cluster.local + --cluster-dns: 10.152.183.10 +addons: + - name: dns +containerdRegistryConfigs: + 10.152.183.69: | + [host."https://10.152.183.69:443"] + capabilities = ["pull", "resolve", "push"] + skip_verify = true
.github/squid.sh+119 −0 added@@ -0,0 +1,119 @@ +#!/bin/bash + + +sudo DEBIAN_FRONTEND=noninteractive apt install -y squid nginx || +(sudo DEBIAN_FRONTEND=noninteractive apt update -y && + sudo DEBIAN_FRONTEND=noninteractive apt install -y squid nginx) + +sudo tee /etc/netplan/90-squid.yaml <<'EOF' +network: + version: 2 + dummy-devices: + squid: + addresses: + - 10.255.255.1/32 +EOF +sudo chmod 644 /etc/netplan/90-squid.yaml +sudo netplan apply + +sudo tee /etc/squid/squid.conf <<SQUID_EOF +http_port 10.255.255.1:3128 +acl localhost src 127.0.0.0/8 ::1 10.255.255.1 +acl rfc1918 src 10.0.0.0/8 +acl rfc1918 src 172.16.0.0/12 +acl rfc1918 src 192.168.0.0/16 +acl ipv6_local src fc00::/7 +acl ipv6_local src fe80::/10 +acl SSL_ports port 443 +acl Safe_ports port 80 +acl Safe_ports port 443 +acl CONNECT method CONNECT +http_access deny !Safe_ports +http_access deny CONNECT !SSL_ports +http_access allow localhost +http_access allow rfc1918 +http_access allow ipv6_local +http_access deny all +forward_max_tries 20 +connect_timeout 60 seconds +read_timeout 120 seconds +request_timeout 120 seconds +server_persistent_connections on +client_persistent_connections on +dns_retransmit_interval 2 seconds +dns_timeout 60 seconds +access_log stdio:/tmp/squid-access.log squid +cache_log /tmp/squid-cache.log +cache deny all +pid_filename /tmp/squid.pid +SQUID_EOF +sudo chmod 644 /etc/squid/squid.conf + +sudo squid -k parse + +sudo systemctl enable squid +sudo systemctl restart squid + +echo "HTTP_PROXY=http://10.255.255.1:3128" >> "$GITHUB_ENV" +echo "HTTPS_PROXY=http://10.255.255.1:3128" >> "$GITHUB_ENV" +echo "http_proxy=http://10.255.255.1:3128" >> "$GITHUB_ENV" +echo "https_proxy=http://10.255.255.1:3128" >> "$GITHUB_ENV" +echo "NO_PROXY=localhost,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,::1" >> "$GITHUB_ENV" +echo "no_proxy=localhost,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,::1" >> "$GITHUB_ENV" +sudo snap set system proxy.http="http://10.255.255.1:3128" +sudo snap set system proxy.https="http://10.255.255.1:3128" +sudo snap set system proxy.no-proxy="localhost,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,::1" + +sudo tee /etc/nginx/nginx.conf <<NGINX_EOF +worker_processes auto; +events { + worker_connections 1024; +} +http { + proxy_buffering on; + proxy_buffer_size 16k; + proxy_buffers 8193 64k; + proxy_busy_buffers_size 512m; + proxy_max_temp_file_size 1024m; + proxy_temp_path /var/cache/nginx/proxy_temp; + proxy_connect_timeout 10s; + proxy_read_timeout 300s; + proxy_send_timeout 60s; + upstream goproxy { + server proxy.golang.org:443; + keepalive 120; + } + server { + listen 10.255.255.1:8999; + server_name _; + error_page 500 502 503 504 =410 /410; + location /410 { + internal; + default_type text/plain; + return 410 "Gone"; + } + location / { + proxy_pass https://goproxy; + proxy_ssl_server_name on; + proxy_ssl_name proxy.golang.org; + proxy_set_header Host proxy.golang.org; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_intercept_errors on; + proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504; + proxy_next_upstream_tries 15; + proxy_next_upstream_timeout 600s; + } + } +} +NGINX_EOF +sudo chmod 644 /etc/nginx/nginx.conf +sudo mkdir -p /var/cache/nginx/proxy_temp +sudo chown www-data:www-data /var/cache/nginx/proxy_temp + +sudo nginx -t + +sudo systemctl enable nginx +sudo systemctl restart nginx + +echo "GOPROXY=http://10.255.255.1:8999/cached-only,direct" >> "$GITHUB_ENV"
.github/workflows/build.yml+13 −4 modified@@ -6,22 +6,31 @@ on: jobs: build: name: Build - runs-on: [self-hosted, linux, "${{ matrix.platform.host_arch }}", aws, large] + runs-on: + - self-hosted + - linux + - ${{ matrix.platform.host_arch }} + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - large strategy: fail-fast: false matrix: platform: - { os: linux, arch: amd64, host_arch: x64 } - { os: linux, arch: arm64, host_arch: arm64 } -# Until we get rid of musl, lets just disable these to save build time. -# - { os: linux, arch: s390x } -# - { os: linux, arch: ppc64le } + # Until we get rid of musl, lets just disable these to save build time. + # - { os: linux, arch: s390x } + # - { os: linux, arch: ppc64le } - { os: darwin, arch: arm64, host_arch: arm64 } steps: - name: "Checkout" uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: "Set up Go" uses: actions/setup-go@v5 with:
.github/workflows/cla.yml+1 −1 modified@@ -1,5 +1,5 @@ name: "CLA check" -on: [pull_request, workflow_dispatch] +on: [pull_request, workflow_dispatch, merge_group] jobs: cla-check:
.github/workflows/context-tests.yml+64 −21 modified@@ -7,27 +7,55 @@ on: pull_request: types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: + merge_group: permissions: contents: read + pull-requests: read jobs: changed-files: name: "Collate changed files" runs-on: ubuntu-latest outputs: check-build: ${{ steps.filter-files.outputs.check-build }} + check-unit-tests: ${{ steps.filter-files.outputs.check-unit-tests }} check-migrate: ${{ steps.filter-files.outputs.check-migrate }} check-upgrade: ${{ steps.filter-files.outputs.check-upgrade }} check-terraform: ${{ steps.filter-files.outputs.check-terraform }} check-snap: ${{ steps.filter-files.outputs.check-snap }} check-generate: ${{ steps.filter-files.outputs.check-generate }} check-docs: ${{ steps.filter-files.outputs.check-docs }} + ref: ${{ steps.refs.outputs.head_ref }} + base: ${{ steps.refs.outputs.base_ref }} steps: + - name: Checkout + if: github.event_name != 'pull_request' + uses: actions/checkout@v4 + + - name: Calculate Refs + id: refs + run: | + if [ "${{ github.event_name }}" = "pull_request" ]; then + echo "head_ref=${{ github.event.pull_request.head.sha }}" >> "$GITHUB_OUTPUT" + echo "base_ref=${{ github.event.pull_request.base.sha }}" >> "$GITHUB_OUTPUT" + elif [ "${{ github.event_name }}" = "push" ]; then + echo "head_ref=${{ github.event.push.after }}" >> "$GITHUB_OUTPUT" + echo "base_ref=${{ github.event.push.before }}" >> "$GITHUB_OUTPUT" + elif [ "${{ github.event_name }}" = "merge_group" ]; then + echo "head_ref=${{ github.event.merge_group.head_sha }}" >> "$GITHUB_OUTPUT" + echo "base_ref=${{ github.event.merge_group.base_sha }}" >> "$GITHUB_OUTPUT" + elif [ "${{ github.event_name }}" = "workflow_dispatch" ]; then + echo "head_ref=${{ github.event.ref }}" >> "$GITHUB_OUTPUT" + echo "base_ref=$(git log --max-parents=0 --first-parent --format=%H)" >> "$GITHUB_OUTPUT" + fi + - name: Filter Files id: filter-files uses: dorny/paths-filter@v3 with: + ref: ${{ steps.refs.outputs.head_ref }} + base: ${{ steps.refs.outputs.base_ref }} filters: | check-build: - '**.go' @@ -36,6 +64,13 @@ jobs: - 'scripts/dqlite/**' - 'Makefile' - 'make_functions.sh' + check-unit-tests: + - '**.go' + - 'go.mod' + - '.github/workflows/build.yml' + - 'scripts/dqlite/**' + - 'Makefile' + - 'make_functions.sh' check-migrate: - '**.go' - 'go.mod' @@ -80,60 +115,68 @@ jobs: if: github.event.pull_request.draft == false && needs.changed-files.outputs.check-build == 'true' uses: ./.github/workflows/build.yml + unit-tests: + needs: [changed-files] + name: Unit Tests + if: github.event.pull_request.draft == false && needs.changed-files.outputs.check-unit-tests == 'true' + uses: ./.github/workflows/unit-tests.yml + with: + base: ${{ needs.changed-files.outputs.base }} + snap: needs: [changed-files] name: Snap - if: github.event.pull_request.draft == false && needs.changed-files.outputs.check-snap == 'true' + if: github.event.pull_request.draft == false && needs.changed-files.outputs.check-snap == 'true' && github.event_name != 'merge_group' uses: ./.github/workflows/snap.yml generate: needs: [changed-files] name: Generate - if: github.event.pull_request.draft == false && needs.changed-files.outputs.check-generate == 'true' + if: github.event.pull_request.draft == false && needs.changed-files.outputs.check-generate == 'true' && github.event_name != 'merge_group' uses: ./.github/workflows/gen.yml docs: needs: [changed-files] name: Documentation - if: github.event.pull_request.draft == false && needs.changed-files.outputs.check-docs == 'true' + if: github.event.pull_request.draft == false && needs.changed-files.outputs.check-docs == 'true' && github.event_name != 'merge_group' uses: ./.github/workflows/docs.yml terraform: needs: [changed-files] name: Terraform Smoke # TODO - always skip terraform tests until they are made reliable on 3.x branches. - if: false && github.event.pull_request.draft == false && github.base_ref != 'main' && needs.changed-files.outputs.check-terraform == 'true' + if: false && github.event.pull_request.draft == false && github.base_ref != 'main' && needs.changed-files.outputs.check-terraform == 'true' && github.event_name != 'merge_group' uses: ./.github/workflows/terraform-smoke.yml migrate: needs: [changed-files] name: Migrate - if: github.event.pull_request.draft == false && needs.changed-files.outputs.check-migrate == 'true' + if: github.event.pull_request.draft == false && needs.changed-files.outputs.check-migrate == 'true' && github.event_name != 'merge_group' uses: ./.github/workflows/migrate.yml upgrade: needs: [changed-files] name: Upgrade - if: github.event.pull_request.draft == false && github.base_ref != 'main' && needs.changed-files.outputs.check-upgrade == 'true' + if: github.event.pull_request.draft == false && github.base_ref != 'main' && needs.changed-files.outputs.check-upgrade == 'true' && github.event_name != 'merge_group' uses: ./.github/workflows/upgrade.yml result-check: - needs: [build,snap,generate,docs,terraform,migrate,upgrade] + needs: [build, snap, generate, docs, terraform, migrate, upgrade] runs-on: ubuntu-latest name: Check Tests Passed if: always() && !cancelled() steps: - - name: Check Results - shell: bash - run: | - # TODO - add terraform once ready. - if ${{ needs.build.result == 'success' || needs.build.result == 'skipped' }} \ - && ${{ needs.snap.result == 'success' || needs.snap.result == 'skipped' }} \ - && ${{ needs.generate.result == 'success' || needs.generate.result == 'skipped' }} \ - && ${{ needs.docs.result == 'success' || needs.docs.result == 'skipped' }} \ - && ${{ needs.terraform.result != 'fix me' || needs.terraform.result == 'skipped' }} \ - && ${{ needs.migrate.result == 'success' || needs.migrate.result == 'skipped' }} \ - && ${{ needs.upgrade.result == 'success' || needs.upgrade.result == 'skipped' }}; then - exit 0 - fi - exit 1 + - name: Check Results + shell: bash + run: | + # TODO - add terraform once ready. + if ${{ needs.build.result == 'success' || needs.build.result == 'skipped' }} \ + && ${{ needs.snap.result == 'success' || needs.snap.result == 'skipped' }} \ + && ${{ needs.generate.result == 'success' || needs.generate.result == 'skipped' }} \ + && ${{ needs.docs.result == 'success' || needs.docs.result == 'skipped' }} \ + && ${{ needs.terraform.result != 'fix me' || needs.terraform.result == 'skipped' }} \ + && ${{ needs.migrate.result == 'success' || needs.migrate.result == 'skipped' }} \ + && ${{ needs.upgrade.result == 'success' || needs.upgrade.result == 'skipped' }}; then + exit 0 + fi + exit 1
.github/workflows/docs-sphinx-python-dependency-build-checks.yml+10 −2 modified@@ -15,19 +15,27 @@ on: - cron: "0 2 * * 1,5" # Runs at 02:00 AM on every Monday and Friday. workflow_dispatch: - concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: build: name: build - runs-on: [linux, self-hosted, x64, large, jammy] + runs-on: + - linux + - self-hosted + - x64 + - large + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} steps: - name: Checkout code uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: Install dependencies run: | set -ex
.github/workflows/docs.yml+13 −4 modified@@ -7,21 +7,30 @@ on: jobs: docs: name: Check Autogenerated Documentation - runs-on: [linux, self-hosted, x64, large] + runs-on: + - linux + - self-hosted + - x64 + - large + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} steps: - name: Checkout code uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: Set up Go uses: actions/setup-go@v5 with: - go-version-file: 'go.mod' + go-version-file: "go.mod" cache: true - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.10' + python-version: "3.10" - name: Set up Node.js uses: actions/setup-node@v4 @@ -69,4 +78,4 @@ jobs: echo "" echo "*****" exit 1 - fi \ No newline at end of file + fi
.github/workflows/gen.yml+10 −1 modified@@ -6,12 +6,21 @@ on: jobs: generate: name: Check Generated Mocks - runs-on: [self-hosted, linux, arm64, aws, xxlarge] + runs-on: + - self-hosted + - linux + - arm64 + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - ${{ (github.repository_owner == 'juju' && 'xxlarge') || (github.repository_owner == 'canonical' && 'xlarge') }} steps: - name: "Checkout" uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: "Set up Go" uses: actions/setup-go@v5 with:
.github/workflows/jaas-smoke.yml+13 −4 modified@@ -12,13 +12,22 @@ permissions: jobs: smoke: name: JAAS Smoke Test - runs-on: [self-hosted, linux, x64, aws, large] + runs-on: + - self-hosted + - linux + - x64 + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - large steps: - uses: actions/checkout@v4 - name: Set up Docker Compose uses: docker/setup-compose-action@v1 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: Install Dependencies shell: bash run: | @@ -32,7 +41,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version-file: 'go.mod' + go-version-file: "go.mod" cache: true - name: Setup env @@ -45,7 +54,7 @@ jobs: shell: bash run: | make go-install - + - name: Start JIMM, bootstrap a Juju controller and add it to JIMM. uses: canonical/jimm/.github/actions/test-server@v3 id: jaas @@ -54,7 +63,7 @@ jobs: ghcr-pat: ${{ secrets.GITHUB_TOKEN }} dump-logs: true use-charmed-k8s-action: "false" - + - name: Create a model, deploy an application and run juju status run: | juju add-model foo && \
.github/workflows/merge.yml+0 −98 removed@@ -1,98 +0,0 @@ -name: Merge -on: - push: - branches: ['2.9', '3.1', '3.3', '3.4', '3.5', '3.6'] - -jobs: - check-merge: - name: Check for conflicts - runs-on: ubuntu-latest - env: - MERGE_TARGETS: | - 2.9: 3.1 - 3.1: 3.3 - 3.3: 3.4 - 3.4: 3.5 - 3.5: 3.6 - 3.6: main - - steps: - - name: Determine source/target branches - id: branch - run: | - set -x - SOURCE_BRANCH=${{ github.ref_name }} - echo "source=$SOURCE_BRANCH" >> "$GITHUB_OUTPUT" - TARGET_BRANCH=$(echo "$MERGE_TARGETS" | yq ".\"$SOURCE_BRANCH\"") - echo "target=$TARGET_BRANCH" >> "$GITHUB_OUTPUT" - - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - ref: ${{ steps.branch.outputs.source }} - - - name: Set up Go - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - cache: false - - # We need to do this before trying to merge, because if the merge - # creates conflicts in go.mod, we won't be able to use the go command. - - name: Pre-compile try-merge script - run: | - go install ./scripts/try-merge - - - name: Attempt to merge - id: merge - env: - SOURCE_BRANCH: ${{ steps.branch.outputs.source }} - TARGET_BRANCH: ${{ steps.branch.outputs.target }} - run: | - set -x - git fetch origin "$TARGET_BRANCH" - git branch "$TARGET_BRANCH" "origin/$TARGET_BRANCH" - # Need to set Git username/email to do the merge (yawn) - git config user.name 'jujubot' - git config user.email 'fake@address.me' - - set +e - git switch "$TARGET_BRANCH" - git merge "$SOURCE_BRANCH" - case $? in - 0) - echo "conflicts=false" >> "$GITHUB_OUTPUT" - ;; - 1) - echo "conflicts=true" >> "$GITHUB_OUTPUT" - ;; - *) - exit $? - ;; - esac - - - name: Generate notification message - if: steps.merge.outputs.conflicts == 'true' - id: message - env: - SOURCE_BRANCH: ${{ steps.branch.outputs.source }} - TARGET_BRANCH: ${{ steps.branch.outputs.target }} - EMAIL_TO_MM_USER: ${{ secrets.EMAIL_TO_MM_USER }} - IGNORE_EMAILS: ${{ secrets.MERGE_NOTIFY_IGNORE_EMAILS }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - MESSAGE=$(try-merge errmsg) - echo "message=$MESSAGE" >> "$GITHUB_OUTPUT" - - - name: Notify if merge has conflicts - if: steps.merge.outputs.conflicts == 'true' && steps.message.outputs.message != '' - env: - MM_TOKEN: ${{ secrets.MM_TOKEN }} - MESSAGE: ${{ steps.message.outputs.message }} - run: | - # install mmctl - curl https://github.com/mattermost/mmctl/releases/download/v7.8.5/linux_amd64.tar -Lo mmctl.tar - tar -xvf mmctl.tar - ./mmctl auth login 'https://chat.charmhub.io' --name Charmhub --access-token $MM_TOKEN - ./mmctl post create Charmhub:juju-bot --message "$MESSAGE"
.github/workflows/microk8s-tests.yml+14 −2 modified@@ -14,7 +14,12 @@ permissions: jobs: build: name: Test Kubeflow - runs-on: [self-hosted, linux, arm64, aws, large] + runs-on: + - self-hosted + - linux + - arm64 + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - large if: github.event.pull_request.draft == false strategy: fail-fast: false @@ -25,6 +30,10 @@ jobs: - name: Checking out repo uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: Set up Go uses: actions/setup-go@v5 with: @@ -39,6 +48,7 @@ jobs: - name: Setup Docker Mirror shell: bash + if: github.repository_owner == 'juju' run: | (cat /etc/docker/daemon.json 2> /dev/null || echo "{}") | yq -o json '.registry-mirrors += ["https://docker-cache.us-west-2.aws.jujuqa.com:443"]' | sudo tee /etc/docker/daemon.json sudo systemctl restart docker @@ -49,7 +59,7 @@ jobs: channel: '${{ matrix.microk8s }}' # enable now to give microk8s more time to settle down. addons: '["dns", "hostpath-storage", "dashboard", "ingress", "metallb:10.64.140.43-10.64.140.49"]' - launch-configuration: "$GITHUB_WORKSPACE/.github/microk8s-launch-config-aws.yaml" + launch-configuration: ${{ github.repository_owner == 'juju' && '$GITHUB_WORKSPACE/.github/microk8s-launch-config-aws.yaml' || '' }} - name: Install Dependencies run: | @@ -60,6 +70,8 @@ jobs: sudo DEBIAN_FRONTEND=noninteractive apt update sudo DEBIAN_FRONTEND=noninteractive apt install -y libssl-dev python3-setuptools + sudo DEBIAN_FRONTEND=noninteractive apt install -y docker-buildx-plugin >/dev/null 2>&1 || \ + sudo DEBIAN_FRONTEND=noninteractive apt install -y docker-buildx >/dev/null 2>&1 sudo usermod -a -G microk8s $USER - name: Build juju and operator image
.github/workflows/migrate.yml+20 −11 modified@@ -10,25 +10,34 @@ jobs: migrate: name: migrate from ${{ matrix.channel }} via ${{ matrix.client }} client timeout-minutes: 30 - runs-on: [self-hosted, linux, arm64, aws, xlarge] + runs-on: + - self-hosted + - linux + - arm64 + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - xlarge strategy: fail-fast: false matrix: # TODO: add microk8s tests cloud: ["lxd"] # TODO: migration from 2.9 is broken due to a bug in the series pre-checks - # needs a 2.9 fix - channel: ["3.6/stable"] - client: ['source', 'target'] + # needs a 2.9 fix + channel: ["3.6/stable"] + client: ["source", "target"] steps: - name: Checkout code uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: Set up Go uses: actions/setup-go@v5 with: - go-version-file: 'go.mod' + go-version-file: "go.mod" cache: true - name: Set up Go env @@ -77,17 +86,17 @@ jobs: --model-default enable-os-upgrade=false juju switch controller juju wait-for application controller - + - name: Migrate model to target controller run: | # Determine which Juju client to use JUJU='juju' if [[ ${{ matrix.client }} == 'source' ]]; then JUJU='/snap/bin/juju' fi - + $JUJU switch source-controller - + # Ensure application is fully deployed # We have to use the old client to speak to the new controller, as # this is blocked otherwise. @@ -104,7 +113,7 @@ jobs: run: | set -x juju switch target-controller - + # Wait for 'test-migrate' model to come through attempt=0 while true; do @@ -119,9 +128,9 @@ jobs: exit 1 fi done - + juju switch test-migrate juju wait-for application ubuntu - + juju deploy ubuntu yet-another-ubuntu --base ubuntu@20.04 juju wait-for application yet-another-ubuntu
.github/workflows/smoke.yml+14 −3 modified@@ -9,24 +9,34 @@ on: jobs: smoke: name: Smoke - runs-on: [self-hosted, linux, arm64, aws, quad-xlarge] + runs-on: + - self-hosted + - linux + - arm64 + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - ${{ (github.repository_owner == 'juju' && 'quad-xlarge') || (github.repository_owner == 'canonical' && 'xlarge') }} if: github.event.pull_request.draft == false strategy: fail-fast: false matrix: cloud: ["localhost", "microk8s"] steps: - - name: Install Dependencies shell: bash run: | set -euxo pipefail echo "/snap/bin" >> $GITHUB_PATH sudo DEBIAN_FRONTEND=noninteractive apt install -y expect + sudo DEBIAN_FRONTEND=noninteractive apt install -y docker-buildx-plugin >/dev/null 2>&1 || \ + sudo DEBIAN_FRONTEND=noninteractive apt install -y docker-buildx >/dev/null 2>&1 - name: Checkout uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: Setup LXD if: matrix.cloud == 'localhost' uses: canonical/setup-lxd@2fa6235ef2dfd3288e0de09edac03f2ebf922968 @@ -41,6 +51,7 @@ jobs: - name: Setup Docker Mirror shell: bash + if: github.repository_owner == 'juju' run: | (cat /etc/docker/daemon.json 2> /dev/null || echo "{}") | yq -o json '.registry-mirrors += ["https://docker-cache.us-west-2.aws.jujuqa.com:443"]' | sudo tee /etc/docker/daemon.json sudo systemctl restart docker @@ -52,7 +63,7 @@ jobs: with: channel: "1.34-strict/stable" addons: '["dns", "hostpath-storage", "rbac"]' - launch-configuration: "$GITHUB_WORKSPACE/.github/microk8s-launch-config-aws.yaml" + launch-configuration: ${{ github.repository_owner == 'juju' && '$GITHUB_WORKSPACE/.github/microk8s-launch-config-aws.yaml' || '' }} - name: Set up Go uses: actions/setup-go@v5
.github/workflows/snap.yml+10 −1 modified@@ -9,7 +9,12 @@ permissions: jobs: snap: name: Build linux-arm64 - runs-on: [self-hosted, linux, arm64, aws, quad-xlarge] + runs-on: + - self-hosted + - linux + - arm64 + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - ${{ (github.repository_owner == 'juju' && 'quad-xlarge') || (github.repository_owner == 'canonical' && 'xlarge') }} steps: - name: Install Dependencies @@ -22,6 +27,10 @@ jobs: - name: Checkout uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: Setup LXD uses: canonical/setup-lxd@4e959f8e0d9c5feb27d44c5e4d9a330a782edee0
.github/workflows/static-analysis.yml+14 −5 modified@@ -4,23 +4,32 @@ on: branches: ["[0-9].[0-9]+", "[0-9].[0-9]+.[0-9]+", main] pull_request: types: [opened, synchronize, reopened, ready_for_review] -# paths: -# DON'T SET - these are "required" so they need to run on every PR + # paths: + # DON'T SET - these are "required" so they need to run on every PR workflow_dispatch: jobs: checks: name: Checks - runs-on: [self-hosted, linux, arm64, aws, xxlarge] + runs-on: + - self-hosted + - linux + - arm64 + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - ${{ (github.repository_owner == 'juju' && 'xxlarge') || (github.repository_owner == 'canonical' && 'xlarge') }} if: github.event.pull_request.draft == false steps: - name: Checkout uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: Set up Go uses: actions/setup-go@v5 with: - go-version-file: 'go.mod' + go-version-file: "go.mod" - name: Install Dependencies run: | @@ -38,7 +47,7 @@ jobs: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.6.1 sudo curl -sSfL https://github.com/mvdan/sh/releases/download/v3.7.0/shfmt_v3.7.0_linux_$(go env GOARCH) -o /usr/bin/shfmt sudo chmod +x /usr/bin/shfmt - + go install github.com/google/go-licenses@latest - name: Download Go Dependencies
.github/workflows/terraform-smoke.yml+12 −3 modified@@ -6,7 +6,12 @@ on: jobs: smoke: name: Terraform Smoke - runs-on: [self-hosted, linux, x64, aws, quad-xlarge] + runs-on: + - self-hosted + - linux + - x64 + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - ${{ (github.repository_owner == 'juju' && 'quad-xlarge') || (github.repository_owner == 'canonical' && 'xlarge') }} steps: - name: Install Dependencies @@ -19,10 +24,14 @@ jobs: - name: Checkout juju uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: Set up Go uses: actions/setup-go@v5 with: - go-version-file: 'go.mod' + go-version-file: "go.mod" cache: true - name: setup env @@ -57,7 +66,7 @@ jobs: - name: Find terraform provider for juju latest release uses: actions/checkout@v4 with: - repository: 'juju/terraform-provider-juju' + repository: "juju/terraform-provider-juju" #path: terraform-provider-juju fetch-depth: 0
.github/workflows/unit-tests.yml+60 −0 added@@ -0,0 +1,60 @@ +name: "Unit Tests" +on: + workflow_call: + inputs: + base: + required: true + type: string + +jobs: + test: + name: Test + runs-on: + - self-hosted + - linux + - x64 + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - ${{ (github.repository_owner == 'juju' && 'quad-xlarge') || (github.repository_owner == 'canonical' && 'xlarge') }} + + steps: + - name: "Checkout" + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + + - name: "Set up Go" + uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + cache: false + + - name: Setup LXD + uses: canonical/setup-lxd@4e959f8e0d9c5feb27d44c5e4d9a330a782edee0 + + - name: "Set up MongoDB" + run: | + sudo snap install juju-db --channel=4.4.30/stable + echo "/snap/bin" >> $GITHUB_PATH + + - name: "Setup Gochanged" + if: inputs.base + run: | + go install github.com/hpidcock/gochanged@latest + echo "$(go env GOPATH)/bin" >> $GITHUB_PATH + + - name: "Calculate Test Packages" + if: inputs.base + run: | + TEST_PACKAGE_LIST=$(mktemp) + gochanged -b "${{ inputs.base }}" | sort | tee "${TEST_PACKAGE_LIST}" + if [ -z "$(grep -F './...' ${TEST_PACKAGE_LIST})" ]; then + echo "TEST_PACKAGE_LIST=${TEST_PACKAGE_LIST}" >> $GITHUB_ENV + fi + + - name: "Test" + run: | + make race-test TEST_PACKAGE_LIST="${TEST_PACKAGE_LIST}"
.github/workflows/upgrade.yml+15 −4 modified@@ -26,7 +26,12 @@ jobs: upgrade: name: Upgrade Smoke - runs-on: [self-hosted, linux, x64, aws, quad-xlarge] + runs-on: + - self-hosted + - linux + - x64 + - ${{ (github.repository_owner == 'juju' && 'aws') || (github.repository_owner == 'canonical' && 'noble') }} + - ${{ (github.repository_owner == 'juju' && 'quad-xlarge') || (github.repository_owner == 'canonical' && 'xlarge') }} timeout-minutes: 30 needs: setup if: needs.setup.outputs.channel != '' @@ -47,6 +52,10 @@ jobs: - name: Checkout uses: actions/checkout@v4 + - name: "Squid" + if: github.repository_owner != 'juju' + run: ./.github/squid.sh + - name: Remove LXD if: env.RUN_TEST == 'RUN' run: | @@ -96,20 +105,21 @@ jobs: echo "/snap/bin" >> $GITHUB_PATH - name: Setup Docker Mirror - if: matrix.cloud == 'microk8s' + if: matrix.cloud == 'microk8s' && env.DOCKERHUB_MIRROR != '' shell: bash run: | - (cat /etc/docker/daemon.json 2> /dev/null || echo "{}") | yq -o json '.registry-mirrors += ["https://docker-cache.us-west-2.aws.jujuqa.com:443"]' | sudo tee /etc/docker/daemon.json + (cat /etc/docker/daemon.json 2> /dev/null || echo "{}") | yq -o json ".registry-mirrors += [\"${DOCKERHUB_MIRROR}\"]" | sudo tee /etc/docker/daemon.json sudo systemctl restart docker docker system info + envsubst < $GITHUB_WORKSPACE/.github/microk8s-launch-config-mirror.yaml > $GITHUB_WORKSPACE/.github/microk8s-launch-config.yaml - name: Setup k8s if: matrix.cloud == 'microk8s' uses: balchua/microk8s-actions@13f73436011eb4925c22526f64fb3ecdd81289a9 with: channel: "1.34-strict/stable" addons: '["dns", "hostpath-storage"]' - launch-configuration: "$GITHUB_WORKSPACE/.github/microk8s-launch-config-aws.yaml" + launch-configuration: $GITHUB_WORKSPACE/.github/microk8s-launch-config.yaml - name: Install k8s Dependencies if: matrix.cloud == 'microk8s' @@ -167,6 +177,7 @@ jobs: EOL podman build $BUILD_TEMP -t ${OCI_REGISTRY}/test-repo/jujud-operator:${SOURCE_JUJU_VERSION} podman push -f v2s2 "${OCI_REGISTRY}/test-repo/jujud-operator:${SOURCE_JUJU_VERSION}" "docker://${OCI_REGISTRY}/test-repo/jujud-operator:${SOURCE_JUJU_VERSION}" + OCI_REGISTRY_USERNAME=${OCI_REGISTRY}/test-repo LEGACY_JUJU_DB_VERSION=4.4 make seed-repository - name: Bootstrap Juju - localhost
go.mod+6 −6 modified@@ -109,8 +109,8 @@ require ( github.com/vishvananda/netlink v1.3.1 github.com/vmware/govmomi v0.34.1 go.uber.org/mock v0.6.0 - golang.org/x/crypto v0.48.0 - golang.org/x/net v0.51.0 + golang.org/x/crypto v0.49.0 + golang.org/x/net v0.52.0 golang.org/x/oauth2 v0.36.0 golang.org/x/sync v0.20.0 golang.org/x/sys v0.42.0 @@ -119,6 +119,7 @@ require ( google.golang.org/api v0.256.0 google.golang.org/grpc v1.77.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c + gopkg.in/errgo.v1 v1.0.1 gopkg.in/httprequest.v1 v1.2.1 gopkg.in/ini.v1 v1.67.0 gopkg.in/juju/environschema.v1 v1.0.1 @@ -300,14 +301,13 @@ require ( go.opentelemetry.io/otel/trace v1.38.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/mod v0.33.0 // indirect - golang.org/x/term v0.40.0 // indirect - golang.org/x/text v0.34.0 // indirect + golang.org/x/mod v0.34.0 // indirect + golang.org/x/term v0.41.0 // indirect + golang.org/x/text v0.35.0 // indirect google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba // indirect google.golang.org/protobuf v1.36.10 // indirect - gopkg.in/errgo.v1 v1.0.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/gobwas/glob.v0 v0.2.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
go.sum+10 −10 modified@@ -834,8 +834,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= -golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= +golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0= golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0= @@ -846,8 +846,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20150829230318-ea47fc708ee3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -869,8 +869,8 @@ golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= -golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= @@ -925,8 +925,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= -golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= +golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -936,8 +936,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= -golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
internal/provider/common/bootstrap.go+41 −2 modified@@ -20,7 +20,6 @@ import ( "github.com/juju/loggo" "github.com/juju/utils/v3" "github.com/juju/utils/v3/parallel" - "github.com/juju/utils/v3/shell" "github.com/juju/utils/v3/ssh" "github.com/juju/juju/cloudconfig" @@ -480,6 +479,43 @@ func GetCheckNonceCommand(instanceConfig *instancecfg.InstanceConfig) string { return checkNonceCommand } +// DumpOnErrorScript returns a bash script that may be used to dump the contents +// of the specified files to stderr when the shell exits with an error. +// Each file is preceded by a header line showing its path. +// It will also dump the whole journal if possible. +func DumpOnErrorScript(filenames ...string) string { + if len(filenames) == 0 { + return "" + } + lines := []string{ + "dump_logs() {", + " code=$?", + " if [ $code -ne 0 ]; then", + " echo \"<journal>\" >&2", + " sudo journalctl -m --no-pager --utc --no-hostname -o short-monotonic >&2", + " echo \"</journal>\" >&2", + } + for _, filename := range filenames { + quoted := utils.ShQuote(filename) + indent := " " + lines = append(lines, + fmt.Sprintf("%sif [ -e %s ]; then", indent, quoted), + fmt.Sprintf("%s echo \"<%s>\" >&2", indent, filename), + fmt.Sprintf("%s sudo cat %s >&2", indent, quoted), + fmt.Sprintf("%s echo \"</%s>\" >&2", indent, filename), + fmt.Sprintf("%sfi", indent), + ) + } + lines = append(lines, + " fi", + " exit $code", + "}", + "trap dump_logs EXIT", + "", + ) + return strings.Join(lines, "\n") +} + func ConfigureMachine( ctx environs.BootstrapContext, client ssh.Client, @@ -533,7 +569,10 @@ func ConfigureMachine( return errors.Annotate(err, "transporting files to machine") } - script := shell.DumpFileOnErrorScript(instanceConfig.CloudInitOutputLog) + configScript + script := DumpOnErrorScript( + "/var/snap/juju-db/common/logs/mongodb.log", + instanceConfig.CloudInitOutputLog, + ) + configScript ctx.Infof("Running machine configuration script...") // TODO(benhoyt) - plumb context through juju/utils/ssh? return sshinit.RunConfigureScript(script, sshinitConfig)
internal/provider/kubernetes/customresourcedefinitions.go+170 −8 modified@@ -5,6 +5,7 @@ package kubernetes import ( "context" + "encoding/json" "fmt" "strings" "time" @@ -239,30 +240,191 @@ func (k *kubernetesClient) deleteCustomResources(selectorGetter func(apiextensio return nil } -func (k *kubernetesClient) listCustomResources(selectorGetter func(apiextensionsv1.CustomResourceDefinition) k8slabels.Selector) (out []unstructured.Unstructured, err error) { - crds, err := k.extendedClient().ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), metav1.ListOptions{ - // CRDs might be provisioned by another application/charm from a different model. +// getAllNamespacesCustomResourceDefinitionClient returns a dynamic resource +// client for the given CRD and version that operates everywhere. For namespaced +// CRDs this returns the unscoped NamespaceableResourceInterface. +func (k *kubernetesClient) getAllNamespacesCustomResourceDefinitionClient( + crd *apiextensionsv1.CustomResourceDefinition, + version string, +) (dynamic.NamespaceableResourceInterface, error) { + if version == "" { + return nil, errors.NotValidf( + "empty version for custom resource definition %q", crd.GetName(), + ) + } + found := false + for _, v := range crd.Spec.Versions { + if !v.Served { + continue + } + if version == v.Name { + found = true + break + } + } + if !found { + return nil, errors.NotValidf( + "custom resource definition %s %s is not a supported and served version", + crd.GetName(), version, + ) + } + return k.dynamicClient().Resource(schema.GroupVersionResource{ + Group: crd.Spec.Group, + Version: version, + Resource: crd.Spec.Names.Plural, + }), nil +} + +// removeAllCustomResourceFinalizers lists all CRs everywhere that matches +// the selector, and patches each one to remove all finalisers. This must be +// done before deletion so that resources with finalisers are not left stuck +// in a terminating state. +func (k *kubernetesClient) removeAllCustomResourceFinalizers( + ctx context.Context, selector k8slabels.Selector, +) error { + client := k.extendedClient().ApiextensionsV1().CustomResourceDefinitions() + crds, err := client.List(ctx, metav1.ListOptions{ + LabelSelector: selector.String(), + }) + if err != nil { + return errors.Trace(err) + } + // finalizersPatch is the merge-patch payload that clears all finalizers. + finalizersPatch, err := json.Marshal(map[string]any{ + "metadata": map[string]any{ + "finalizers": []string{}, + }, + }) + if err != nil { + return errors.Trace(err) + } + patchAll := func( + crd *apiextensionsv1.CustomResourceDefinition, versionName string, + ) error { + crdClient, err := k.getAllNamespacesCustomResourceDefinitionClient( + crd, versionName) + if err != nil { + return errors.Trace(err) + } + list, err := crdClient.List(ctx, metav1.ListOptions{ + // CRs might be provisioned by another application/charm from a different model. + LabelSelector: "", + }) + if err != nil && !k8serrors.IsNotFound(err) { + return errors.Trace(err) + } + if list == nil { + return nil + } + for _, cr := range list.Items { + if len(cr.GetFinalizers()) == 0 { + continue + } + client := dynamic.ResourceInterface(crdClient) + if isCRDScopeNamespaced(crd.Spec.Scope) && cr.GetNamespace() != "" { + client = crdClient.Namespace(cr.GetNamespace()) + } + _, err = client.Patch( + context.TODO(), + cr.GetName(), + types.MergePatchType, + finalizersPatch, + metav1.PatchOptions{}, + ) + if err != nil && !k8serrors.IsNotFound(err) { + return errors.Annotatef( + err, "removing finalizers from custom resource %q (namespace %q)", + cr.GetName(), cr.GetNamespace(), + ) + } + } + return nil + } + for _, crd := range crds.Items { + if selector.Empty() { + continue + } + for _, version := range crd.Spec.Versions { + if !version.Served { + continue + } + err := patchAll(&crd, version.Name) + if err != nil { + return err + } + } + } + return nil +} + +// deleteAllCustomResourcesAllNamespaces deletes custom resources matching the +// supplied selector everywhere. +func (k *kubernetesClient) deleteAllCustomResourcesAllNamespaces( + ctx context.Context, selector k8slabels.Selector, +) error { + client := k.extendedClient().ApiextensionsV1().CustomResourceDefinitions() + crds, err := client.List(ctx, metav1.ListOptions{ + LabelSelector: selector.String(), + }) + if err != nil { + return errors.Trace(err) + } + for _, crd := range crds.Items { + if selector.Empty() { + continue + } + for _, version := range crd.Spec.Versions { + crdClient, err := k.getAllNamespacesCustomResourceDefinitionClient( + &crd, version.Name) + if err != nil { + return errors.Trace(err) + } + err = crdClient.DeleteCollection(ctx, metav1.DeleteOptions{ + PropagationPolicy: constants.DefaultPropagationPolicy(), + }, metav1.ListOptions{ + // CRs might be provisioned by another application/charm from a different model. + LabelSelector: "", + }) + if err != nil && !k8serrors.IsNotFound(err) { + return errors.Trace(err) + } + } + } + return nil +} + +// listAllCustomResourcesAllNamespaces lists custom resources matching the +// selector everywhere. +func (k *kubernetesClient) listAllCustomResourcesAllNamespaces( + ctx context.Context, selector k8slabels.Selector, +) (out []unstructured.Unstructured, err error) { + client := k.extendedClient().ApiextensionsV1().CustomResourceDefinitions() + crds, err := client.List(ctx, metav1.ListOptions{ + LabelSelector: selector.String(), }) if err != nil { return nil, errors.Trace(err) } for _, crd := range crds.Items { - selector := selectorGetter(crd) if selector.Empty() { continue } for _, version := range crd.Spec.Versions { - crdClient, err := k.getCustomResourceDefinitionClient(&crd, version.Name) + crdClient, err := k.getAllNamespacesCustomResourceDefinitionClient( + &crd, version.Name) if err != nil { return nil, errors.Trace(err) } - list, err := crdClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: selector.String(), + list, err := crdClient.List(ctx, metav1.ListOptions{ + // CRs might be provisioned by another application/charm from a different model. + LabelSelector: "", }) if err != nil && !k8serrors.IsNotFound(err) { return nil, errors.Trace(err) } - out = append(out, list.Items...) + if list != nil { + out = append(out, list.Items...) + } } } if len(out) == 0 {
internal/provider/kubernetes/export_test.go+7 −0 modified@@ -7,13 +7,15 @@ import ( "context" "sync" + jujuclock "github.com/juju/clock" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" apps "k8s.io/api/apps/v1" core "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + k8slabels "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" "github.com/juju/juju/caas" @@ -41,6 +43,7 @@ var ( CompileK8sCloudCheckers = compileK8sCloudCheckers CompileLifecycleApplicationRemovalSelector = compileLifecycleApplicationRemovalSelector CompileLifecycleModelTeardownSelector = compileLifecycleModelTeardownSelector + LabelSetToRequirements = labelSetToRequirements UpdateStrategyForDeployment = updateStrategyForDeployment UpdateStrategyForStatefulSet = updateStrategyForStatefulSet @@ -173,6 +176,10 @@ func (k *kubernetesClient) DeleteClusterScopeResourcesModelTeardown(ctx context. k.deleteClusterScopeResourcesModelTeardown(ctx, wg, errChan) } +func (k *kubernetesClient) DeleteClusterScopeAPIExtensionResourcesModelTeardown(ctx context.Context, selector k8slabels.Selector, clk jujuclock.Clock, wg *sync.WaitGroup, errChan chan<- error) { + k.deleteClusterScopeAPIExtensionResourcesModelTeardown(ctx, selector, clk, wg, errChan) +} + func (k *kubernetesClient) DeleteNamespaceModelTeardown(ctx context.Context, wg *sync.WaitGroup, errChan chan<- error) { k.deleteNamespaceModelTeardown(ctx, wg, errChan) }
internal/provider/kubernetes/k8s_test.go+30 −68 modified@@ -1695,7 +1695,7 @@ func (s *K8sBrokerSuite) assertDestroy(c *gc.C, isController bool, destroyFunc f }, }, } - // CRs of this namespaced scope CRD will be skipped. + // CRs of this namespaced scope CRD will also be deleted (across all namespaces). crdNamespacedScope := &apiextensionsv1.CustomResourceDefinition{ ObjectMeta: v1.ObjectMeta{ Name: "tfjobs.kubeflow.org", @@ -1779,80 +1779,42 @@ func (s *K8sBrokerSuite) assertDestroy(c *gc.C, isController bool, destroyFunc f ).Return(s.k8sNotFoundError()), ) - // timer +1. - s.mockNamespaceableResourceClient.EXPECT().List(gomock.Any(), - // list all custom resources for crd "v1alpha2". - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(&unstructured.UnstructuredList{}, nil).After( - s.mockDynamicClient.EXPECT().Resource( - schema.GroupVersionResource{ - Group: crdClusterScope.Spec.Group, - Version: "v1alpha2", - Resource: crdClusterScope.Spec.Names.Plural, - }, - ).Return(s.mockNamespaceableResourceClient), - ).After( - // list all custom resources for crd "v1". - s.mockNamespaceableResourceClient.EXPECT().List(gomock.Any(), - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(&unstructured.UnstructuredList{}, nil), - ).After( - s.mockDynamicClient.EXPECT().Resource( - schema.GroupVersionResource{ - Group: crdClusterScope.Spec.Group, - Version: "v1", - Resource: crdClusterScope.Spec.Names.Plural, - }, - ).Return(s.mockNamespaceableResourceClient), - ).After( - // list cluster wide all custom resource definitions for listing custom resources. - s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{}).AnyTimes(). - Return(&apiextensionsv1.CustomResourceDefinitionList{Items: []apiextensionsv1.CustomResourceDefinition{*crdClusterScope, *crdNamespacedScope}}, nil), - ).After( - // delete all custom resources for crd "v1alpha2". - s.mockNamespaceableResourceClient.EXPECT().DeleteCollection(gomock.Any(), - s.deleteOptions(v1.DeletePropagationForeground, ""), - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(nil), - ).After( - s.mockDynamicClient.EXPECT().Resource( - schema.GroupVersionResource{ - Group: crdClusterScope.Spec.Group, - Version: "v1alpha2", - Resource: crdClusterScope.Spec.Names.Plural, - }, - ).Return(s.mockNamespaceableResourceClient), - ).After( - // delete all custom resources for crd "v1". - s.mockNamespaceableResourceClient.EXPECT().DeleteCollection(gomock.Any(), - s.deleteOptions(v1.DeletePropagationForeground, ""), - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(nil), - ).After( - s.mockDynamicClient.EXPECT().Resource( - schema.GroupVersionResource{ - Group: crdClusterScope.Spec.Group, - Version: "v1", - Resource: crdClusterScope.Spec.Names.Plural, + crdLabelSelector := "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test" + + // list cluster wide all custom resource definitions (used by removeAllCustomResourceFinalizers, + // deleteAllCustomResourcesAllNamespaces and listAllCustomResourcesAllNamespaces). + s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{ + LabelSelector: crdLabelSelector, + }).Times(1).Return( + &apiextensionsv1.CustomResourceDefinitionList{ + Items: []apiextensionsv1.CustomResourceDefinition{ + *crdClusterScope, *crdNamespacedScope, }, - ).Return(s.mockNamespaceableResourceClient), - ).After( - // list cluster wide all custom resource definitions for deleting custom resources. - s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{}).AnyTimes(). - Return(&apiextensionsv1.CustomResourceDefinitionList{Items: []apiextensionsv1.CustomResourceDefinition{*crdClusterScope, *crdNamespacedScope}}, nil), + }, + nil, ) + // timer +1. + s.mockDynamicClient.EXPECT().Resource( + gomock.Any(), + ).Return(s.mockNamespaceableResourceClient).AnyTimes() + s.mockNamespaceableResourceClient.EXPECT().List( + gomock.Any(), gomock.Any(), + ).Return(&unstructured.UnstructuredList{}, nil).AnyTimes() + s.mockNamespaceableResourceClient.EXPECT().DeleteCollection(gomock.Any(), + s.deleteOptions(v1.DeletePropagationForeground, ""), + gomock.Any(), + ).Return(nil).AnyTimes() + // timer +1. s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{ LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test", }).AnyTimes(). - Return(&apiextensionsv1.CustomResourceDefinitionList{}, nil). - After( - s.mockCustomResourceDefinitionV1.EXPECT().DeleteCollection(gomock.Any(), - s.deleteOptions(v1.DeletePropagationForeground, ""), - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(s.k8sNotFoundError()), - ) + Return(&apiextensionsv1.CustomResourceDefinitionList{}, nil) + s.mockCustomResourceDefinitionV1.EXPECT().DeleteCollection(gomock.Any(), + s.deleteOptions(v1.DeletePropagationForeground, ""), + v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, + ).Return(s.k8sNotFoundError()) // timer +1. s.mockMutatingWebhookConfigurationV1.EXPECT().List(gomock.Any(), v1.ListOptions{LabelSelector: "model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}).
internal/provider/kubernetes/teardown.go+19 −15 modified@@ -10,7 +10,6 @@ import ( jujuclock "github.com/juju/clock" "github.com/juju/errors" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" k8slabels "k8s.io/apimachinery/pkg/labels" "github.com/juju/juju/core/watcher" @@ -95,31 +94,36 @@ func (k *kubernetesClient) deleteClusterScopeAPIExtensionResourcesModelTeardown( defer subwg.Wait() selector = mergeSelectors(selector, lifecycleModelTeardownSelector) - // Delete CRs first then CRDs. - k.deleteClusterScopeCustomResourcesModelTeardown(ctx, selector, clk, &subwg, errChan) + // Delete CRs everywhere first then CRDs. Finalizers are stripped from all + // matching CRs before deletion so that resources are not left stuck in a + // terminating state. + k.deleteAllNamespacesCustomResourcesModelTeardown(ctx, selector, clk, &subwg, errChan) k.deleteCustomResourceDefinitionsModelTeardown(ctx, selector, clk, &subwg, errChan) } -func (k *kubernetesClient) deleteClusterScopeCustomResourcesModelTeardown( +// deleteAllNamespacesCustomResourcesModelTeardown deletes custom resources +// matching the selector everywhere. Before issuing the delete it strips all +// finalizers from every matching CR so that nothing is left stuck in a +// terminating state. This must only ever be called during model teardown. +func (k *kubernetesClient) deleteAllNamespacesCustomResourcesModelTeardown( ctx context.Context, selector k8slabels.Selector, clk jujuclock.Clock, wg *sync.WaitGroup, errChan chan<- error, ) { - getSelector := func(crd apiextensionsv1.CustomResourceDefinition) k8slabels.Selector { - if !isCRDScopeNamespaced(crd.Spec.Scope) { - // We only delete cluster scope CRs here, namespaced CRs are deleted by namespace destroy process. - return selector - } - return k8slabels.NewSelector() - } ensureResourcesDeletedFunc(ctx, selector, clk, wg, errChan, - func(_ k8slabels.Selector) error { - return k.deleteCustomResources(getSelector) + func(selector k8slabels.Selector) error { + // Remove finalizers first so that the subsequent DeleteCollection + // is not blocked by termination hooks. + err := k.removeAllCustomResourceFinalizers(ctx, selector) + if err != nil { + return err + } + return k.deleteAllCustomResourcesAllNamespaces(ctx, selector) }, - func(_ k8slabels.Selector) error { - _, err := k.listCustomResources(getSelector) + func(selector k8slabels.Selector) error { + _, err := k.listAllCustomResourcesAllNamespaces(ctx, selector) return err }, )
internal/provider/kubernetes/teardown_test.go+264 −92 modified@@ -20,9 +20,13 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + k8slabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" + provider "github.com/juju/juju/internal/provider/kubernetes" + "github.com/juju/juju/internal/provider/kubernetes/utils" k8swatchertest "github.com/juju/juju/internal/provider/kubernetes/watcher/test" "github.com/juju/juju/testing" ) @@ -89,7 +93,7 @@ func (s *K8sBrokerSuite) TestDeleteClusterScopeResourcesModelTeardownSuccess(c * }, }, } - // CRs of this namespaced scope CRD will be skipped. + // CRs of this namespaced scope CRD will also be deleted (across all namespaces). crdNamespacedScope := &apiextensionsv1.CustomResourceDefinition{ ObjectMeta: v1.ObjectMeta{ Name: "tfjobs.kubeflow.org", @@ -168,68 +172,32 @@ func (s *K8sBrokerSuite) TestDeleteClusterScopeResourcesModelTeardownSuccess(c * ).Return(s.k8sNotFoundError()), ) - // timer +1. - s.mockNamespaceableResourceClient.EXPECT().List(gomock.Any(), - // list all custom resources for crd "v1alpha2". - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(&unstructured.UnstructuredList{}, nil).After( - s.mockDynamicClient.EXPECT().Resource( - schema.GroupVersionResource{ - Group: crdClusterScope.Spec.Group, - Version: "v1alpha2", - Resource: crdClusterScope.Spec.Names.Plural, - }, - ).Return(s.mockNamespaceableResourceClient), - ).After( - // list all custom resources for crd "v1". - s.mockNamespaceableResourceClient.EXPECT().List(gomock.Any(), - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(&unstructured.UnstructuredList{}, nil), - ).After( - s.mockDynamicClient.EXPECT().Resource( - schema.GroupVersionResource{ - Group: crdClusterScope.Spec.Group, - Version: "v1", - Resource: crdClusterScope.Spec.Names.Plural, - }, - ).Return(s.mockNamespaceableResourceClient), - ).After( - // list cluster wide all custom resource definitions for listing custom resources. - s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{}).AnyTimes(). - Return(&apiextensionsv1.CustomResourceDefinitionList{Items: []apiextensionsv1.CustomResourceDefinition{*crdClusterScope, *crdNamespacedScope}}, nil), - ).After( - // delete all custom resources for crd "v1alpha2". - s.mockNamespaceableResourceClient.EXPECT().DeleteCollection(gomock.Any(), - s.deleteOptions(v1.DeletePropagationForeground, ""), - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(nil), - ).After( - s.mockDynamicClient.EXPECT().Resource( - schema.GroupVersionResource{ - Group: crdClusterScope.Spec.Group, - Version: "v1alpha2", - Resource: crdClusterScope.Spec.Names.Plural, - }, - ).Return(s.mockNamespaceableResourceClient), - ).After( - // delete all custom resources for crd "v1". - s.mockNamespaceableResourceClient.EXPECT().DeleteCollection(gomock.Any(), - s.deleteOptions(v1.DeletePropagationForeground, ""), - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(nil), - ).After( - s.mockDynamicClient.EXPECT().Resource( - schema.GroupVersionResource{ - Group: crdClusterScope.Spec.Group, - Version: "v1", - Resource: crdClusterScope.Spec.Names.Plural, + crdLabelSelector := "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test" + + // list cluster wide all custom resource definitions (used by removeAllCustomResourceFinalizers, + // deleteAllCustomResourcesAllNamespaces and listAllCustomResourcesAllNamespaces). + s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{ + LabelSelector: crdLabelSelector, + }).AnyTimes().Return( + &apiextensionsv1.CustomResourceDefinitionList{ + Items: []apiextensionsv1.CustomResourceDefinition{ + *crdClusterScope, *crdNamespacedScope, }, - ).Return(s.mockNamespaceableResourceClient), - ).After( - // list cluster wide all custom resource definitions for deleting custom resources. - s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{}).AnyTimes(). - Return(&apiextensionsv1.CustomResourceDefinitionList{Items: []apiextensionsv1.CustomResourceDefinition{*crdClusterScope, *crdNamespacedScope}}, nil), - ) + }, + nil, + ).Times(3) + + // timer +1. + s.mockDynamicClient.EXPECT().Resource( + gomock.Any(), + ).Return(s.mockNamespaceableResourceClient).AnyTimes() + s.mockNamespaceableResourceClient.EXPECT().List( + gomock.Any(), gomock.Any(), + ).Return(&unstructured.UnstructuredList{}, nil).AnyTimes() + s.mockNamespaceableResourceClient.EXPECT().DeleteCollection(gomock.Any(), + s.deleteOptions(v1.DeletePropagationForeground, ""), + gomock.Any(), + ).Return(nil).AnyTimes() // timer +1. s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}).AnyTimes(). @@ -286,6 +254,7 @@ func (s *K8sBrokerSuite) TestDeleteClusterScopeResourcesModelTeardownSuccess(c * defer cancel() go s.broker.DeleteClusterScopeResourcesModelTeardown(ctx, &wg, errCh) + // 6 parallel tasks, then 1 more tick for the CR list checker. err := s.clock.WaitAdvance(time.Second, testing.ShortWait, 6) c.Assert(err, jc.ErrorIsNil) err = s.clock.WaitAdvance(time.Second, testing.ShortWait, 1) @@ -357,7 +326,7 @@ func (s *K8sBrokerSuite) TestDeleteClusterScopeResourcesModelTeardownTimeout(c * }, }, } - // CRs of this namespaced scope CRD will be skipped. + // CRs of this namespaced scope CRD will also be deleted (across all namespaces). crdNamespacedScope := &apiextensionsv1.CustomResourceDefinition{ ObjectMeta: v1.ObjectMeta{ Name: "tfjobs.kubeflow.org", @@ -424,38 +393,33 @@ func (s *K8sBrokerSuite) TestDeleteClusterScopeResourcesModelTeardownTimeout(c * v1.ListOptions{LabelSelector: "model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, ).Return(s.k8sNotFoundError()) - // delete all custom resources for crd "v1alpha2". - s.mockNamespaceableResourceClient.EXPECT().DeleteCollection(gomock.Any(), - s.deleteOptions(v1.DeletePropagationForeground, ""), - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(nil).After( - s.mockDynamicClient.EXPECT().Resource( - schema.GroupVersionResource{ - Group: crdClusterScope.Spec.Group, - Version: "v1alpha2", - Resource: crdClusterScope.Spec.Names.Plural, - }, - ).Return(s.mockNamespaceableResourceClient), - ).After( - // delete all custom resources for crd "v1". - s.mockNamespaceableResourceClient.EXPECT().DeleteCollection(gomock.Any(), - s.deleteOptions(v1.DeletePropagationForeground, ""), - v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, - ).Return(nil), - ).After( - s.mockDynamicClient.EXPECT().Resource( - schema.GroupVersionResource{ - Group: crdClusterScope.Spec.Group, - Version: "v1", - Resource: crdClusterScope.Spec.Names.Plural, + crdLabelSelector := "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test" + + // list cluster wide all custom resource definitions (used by removeAllCustomResourceFinalizers, + // deleteAllCustomResourcesAllNamespaces and listAllCustomResourcesAllNamespaces). + s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{ + LabelSelector: crdLabelSelector, + }).AnyTimes().Return( + &apiextensionsv1.CustomResourceDefinitionList{ + Items: []apiextensionsv1.CustomResourceDefinition{ + *crdClusterScope, *crdNamespacedScope, }, - ).Return(s.mockNamespaceableResourceClient), - ).After( - // list cluster wide all custom resource definitions for deleting custom resources. - s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{}).AnyTimes(). - Return(&apiextensionsv1.CustomResourceDefinitionList{Items: []apiextensionsv1.CustomResourceDefinition{*crdClusterScope, *crdNamespacedScope}}, nil), + }, + nil, ) + // timer +1. + s.mockDynamicClient.EXPECT().Resource( + gomock.Any(), + ).Return(s.mockNamespaceableResourceClient).AnyTimes() + s.mockNamespaceableResourceClient.EXPECT().List( + gomock.Any(), gomock.Any(), + ).Return(&unstructured.UnstructuredList{}, nil).AnyTimes() + s.mockNamespaceableResourceClient.EXPECT().DeleteCollection(gomock.Any(), + s.deleteOptions(v1.DeletePropagationForeground, ""), + gomock.Any(), + ).Return(nil).AnyTimes() + s.mockCustomResourceDefinitionV1.EXPECT().DeleteCollection(gomock.Any(), s.deleteOptions(v1.DeletePropagationForeground, ""), v1.ListOptions{LabelSelector: "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test"}, @@ -501,6 +465,214 @@ func (s *K8sBrokerSuite) TestDeleteClusterScopeResourcesModelTeardownTimeout(c * } } +// TestDeleteClusterScopeAPIExtensionResourcesNamespacedCRFinalizersStripped verifies +// that when a namespaced CRD has a CR with a finalizer in a namespace other than the +// model namespace, the finalizer is patched away (against the correct namespace) before +// the DeleteCollection is issued. +func (s *K8sBrokerSuite) TestDeleteClusterScopeAPIExtensionResourcesNamespacedCRFinalizersStripped(c *gc.C) { + ctrl := s.setupController(c) + defer ctrl.Finish() + + crdNamespaced := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: v1.ObjectMeta{Name: "widgets.example.com"}, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + {Name: "v1", Served: true, Storage: true}, + }, + Scope: apiextensionsv1.NamespaceScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "widgets", Kind: "Widget", Singular: "widget", + }, + }, + } + + crWithFinalizer := unstructured.Unstructured{} + crWithFinalizer.SetName("my-widget") + crWithFinalizer.SetNamespace("other-model-ns") + crWithFinalizer.SetFinalizers([]string{"foregroundDeletion"}) + + crdLabelSelector := "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test" + gvr := schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "widgets"} + + // CRD list is called three times: removeAllCustomResourceFinalizers, + // deleteAllCustomResourcesAllNamespaces, listAllCustomResourcesAllNamespaces. + s.mockCustomResourceDefinitionV1.EXPECT().List( + gomock.Any(), + v1.ListOptions{ + LabelSelector: crdLabelSelector, + }, + ).Return(&apiextensionsv1.CustomResourceDefinitionList{ + Items: []apiextensionsv1.CustomResourceDefinition{*crdNamespaced}, + }, nil).Times(3) + + s.mockDynamicClient.EXPECT().Resource(gvr).Return(s.mockNamespaceableResourceClient) + s.mockNamespaceableResourceClient.EXPECT().List( + gomock.Any(), gomock.Any(), + ).Return(&unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{crWithFinalizer}, + }, nil) + s.mockNamespaceableResourceClient.EXPECT().Namespace("other-model-ns").Return(s.mockResourceClient) + s.mockResourceClient.EXPECT().Patch( + gomock.Any(), + "my-widget", + types.MergePatchType, + gomock.Any(), + v1.PatchOptions{}, + ).Return(&crWithFinalizer, nil) + s.mockDynamicClient.EXPECT().Resource(gvr).Return(s.mockNamespaceableResourceClient) + s.mockNamespaceableResourceClient.EXPECT().DeleteCollection( + gomock.Any(), + s.deleteOptions(v1.DeletePropagationForeground, ""), + gomock.Any(), + ).Return(nil) + s.mockDynamicClient.EXPECT().Resource(gvr).Return(s.mockNamespaceableResourceClient) + s.mockNamespaceableResourceClient.EXPECT().List( + gomock.Any(), gomock.Any(), + ).Return(&unstructured.UnstructuredList{}, nil) + + // CRD deletion. + s.mockCustomResourceDefinitionV1.EXPECT().DeleteCollection(gomock.Any(), + s.deleteOptions(v1.DeletePropagationForeground, ""), + v1.ListOptions{LabelSelector: crdLabelSelector}, + ).Return(nil) + // CRD checker: empty list signals deletion complete. + s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), + v1.ListOptions{LabelSelector: crdLabelSelector}, + ).Return(&apiextensionsv1.CustomResourceDefinitionList{}, nil) + + var wg sync.WaitGroup + wg.Add(1) + errCh := make(chan error, 1) + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + modelSelector := k8slabels.NewSelector().Add( + provider.LabelSetToRequirements(utils.LabelsForModel( + s.broker.ModelName(), s.broker.ModelUUID(), s.broker.ControllerUUID(), s.broker.LabelVersion(), + ))..., + ) + go s.broker.DeleteClusterScopeAPIExtensionResourcesModelTeardown( + ctx, modelSelector, s.clock, &wg, errCh, + ) + + // The two sub-functions run sequentially, so we advance the clock twice: + // once for the CR deletion checker, then once for the CRD deletion checker. + err := s.clock.WaitAdvance(time.Second, testing.ShortWait, 1) + c.Assert(err, jc.ErrorIsNil) + err = s.clock.WaitAdvance(time.Second, testing.ShortWait, 1) + c.Assert(err, jc.ErrorIsNil) + + select { + case <-done: + case <-time.After(testing.LongWait): + c.Fatalf("timed out waiting for DeleteClusterScopeAPIExtensionResourcesModelTeardown to return") + } + select { + case err := <-errCh: + c.Fatalf("unexpected error: %v", err) + default: + } +} + +// TestDeleteClusterScopeAPIExtensionResourcesAllNamespacesDeleted verifies that CRs +// belonging to a namespaced CRD are deleted across ALL namespaces (via an unscoped +// DeleteCollection), not just the model's own namespace. +func (s *K8sBrokerSuite) TestDeleteClusterScopeAPIExtensionResourcesAllNamespacesDeleted(c *gc.C) { + ctrl := s.setupController(c) + defer ctrl.Finish() + + crdNamespaced := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: v1.ObjectMeta{Name: "foos.example.com"}, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + {Name: "v1", Served: true, Storage: true}, + }, + Scope: apiextensionsv1.NamespaceScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", Kind: "Foo", Singular: "foo", + }, + }, + } + + crdLabelSelector := "juju-resource-lifecycle notin (persistent),model.juju.is/id=deadbeef-0bad-400d-8000-4b1d0d06f00d,model.juju.is/name=test" + gvr := schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "foos"} + + s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), v1.ListOptions{ + LabelSelector: crdLabelSelector, + }).Return( + &apiextensionsv1.CustomResourceDefinitionList{ + Items: []apiextensionsv1.CustomResourceDefinition{*crdNamespaced}, + }, nil, + ).Times(3) + + s.mockDynamicClient.EXPECT().Resource(gvr).Return(s.mockNamespaceableResourceClient) + s.mockNamespaceableResourceClient.EXPECT().List(gomock.Any(), gomock.Any()). + Return(&unstructured.UnstructuredList{}, nil) + s.mockDynamicClient.EXPECT().Resource(gvr).Return(s.mockNamespaceableResourceClient) + s.mockNamespaceableResourceClient.EXPECT().DeleteCollection(gomock.Any(), + s.deleteOptions(v1.DeletePropagationForeground, ""), + gomock.Any(), + ).Return(nil) + s.mockDynamicClient.EXPECT().Resource(gvr).Return(s.mockNamespaceableResourceClient) + s.mockNamespaceableResourceClient.EXPECT().List(gomock.Any(), gomock.Any()). + Return(&unstructured.UnstructuredList{}, nil) + + // CRD deletion. + s.mockCustomResourceDefinitionV1.EXPECT().DeleteCollection(gomock.Any(), + s.deleteOptions(v1.DeletePropagationForeground, ""), + v1.ListOptions{LabelSelector: crdLabelSelector}, + ).Return(nil) + // CRD checker: empty list signals deletion complete. + s.mockCustomResourceDefinitionV1.EXPECT().List(gomock.Any(), + v1.ListOptions{LabelSelector: crdLabelSelector}, + ).Return(&apiextensionsv1.CustomResourceDefinitionList{}, nil) + + var wg sync.WaitGroup + wg.Add(1) + errCh := make(chan error, 1) + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + modelSelector := k8slabels.NewSelector().Add( + provider.LabelSetToRequirements(utils.LabelsForModel( + s.broker.ModelName(), s.broker.ModelUUID(), s.broker.ControllerUUID(), s.broker.LabelVersion(), + ))..., + ) + go s.broker.DeleteClusterScopeAPIExtensionResourcesModelTeardown( + ctx, modelSelector, s.clock, &wg, errCh, + ) + + // The two sub-functions run sequentially, so we advance the clock twice: + // once for the CR deletion checker, then once for the CRD deletion checker. + err := s.clock.WaitAdvance(time.Second, testing.ShortWait, 1) + c.Assert(err, jc.ErrorIsNil) + err = s.clock.WaitAdvance(time.Second, testing.ShortWait, 1) + c.Assert(err, jc.ErrorIsNil) + + select { + case <-done: + case <-time.After(testing.LongWait): + c.Fatalf("timed out waiting for DeleteClusterScopeAPIExtensionResourcesModelTeardown to return") + } + select { + case err := <-errCh: + c.Fatalf("unexpected error: %v", err) + default: + } +} + func (s *K8sBrokerSuite) TestDeleteNamespaceModelTeardown(c *gc.C) { ctrl := s.setupController(c) defer ctrl.Finish()
internal/worker/peergrouper/initiate.go+5 −3 modified@@ -43,23 +43,25 @@ func InitiateMongoServer(p InitiateMongoParams) error { } p.DialInfo.Direct = true + const attempts = 3 // Initiate may fail while mongo is initialising, so we retry until // we successfully populate the replicaset config. retryCallArgs := retry.CallArgs{ Clock: clock.WallClock, - MaxDuration: 60 * time.Second, + Attempts: attempts, Delay: 5 * time.Second, + MaxDuration: attempts * (mongo.SocketTimeout + p.DialInfo.Timeout + p.DialInfo.SyncTimeout), Func: func() error { return attemptInitiateMongoServer(p.DialInfo, p.MemberHostPort) }, NotifyFunc: func(lastError error, attempt int) { - logger.Debugf("replica set initiation attempt %d failed: %v", attempt, lastError) + logger.Warningf("replica set initiation attempt %d failed: %v", attempt, lastError) }, } err := retry.Call(retryCallArgs) if retry.IsAttemptsExceeded(err) || retry.IsDurationExceeded(err) { err = retry.LastError(err) - logger.Debugf("replica set initiation failed: %v", err) + logger.Errorf("replica set initiation failed: %v", err) } if err == nil { logger.Infof("replica set initiated")
internal/worker/secretsrevoker/doc.go+7 −0 added@@ -0,0 +1,7 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package secretsrevoker provides a worker for revoking issued backend tokens +// and cleaning them up when they expire. +// NOTE: In 4.0 this could be removed and become a cleanup job. +package secretsrevoker
internal/worker/secretsrevoker/manifold.go+82 −0 added@@ -0,0 +1,82 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker + +import ( + "github.com/juju/clock" + "github.com/juju/errors" + "github.com/juju/worker/v3" + "github.com/juju/worker/v3/dependency" + + "github.com/juju/juju/api/base" + secretsrevokerclient "github.com/juju/juju/api/controller/secretsrevoker" +) + +// ManifoldConfig describes the resources used by the secretsrevoker worker. +type ManifoldConfig struct { + APICallerName string + Logger Logger + Clock clock.Clock + + NewSecretsFacade func(base.APICaller) SecretsRevokerFacade + NewWorker func(Config) (worker.Worker, error) +} + +// NewSecretsFacade returns a new SecretsFacade. +func NewSecretsFacade(caller base.APICaller) SecretsRevokerFacade { + return secretsrevokerclient.NewClient(caller) +} + +// Manifold returns a Manifold that encapsulates the secretspruner worker. +func Manifold(config ManifoldConfig) dependency.Manifold { + return dependency.Manifold{ + Inputs: []string{ + config.APICallerName, + }, + Start: config.start, + } +} + +// Validate is called by start to check for bad configuration. +func (cfg ManifoldConfig) Validate() error { + if cfg.APICallerName == "" { + return errors.NotValidf("empty APICallerName") + } + if cfg.Logger == nil { + return errors.NotValidf("nil Logger") + } + if cfg.NewSecretsFacade == nil { + return errors.NotValidf("nil NewSecretsFacade") + } + if cfg.NewWorker == nil { + return errors.NotValidf("nil NewWorker") + } + if cfg.Clock == nil { + return errors.NotValidf("nil Clock") + } + return nil +} + +// start is a StartFunc for a Worker manifold. +func (cfg ManifoldConfig) start(context dependency.Context) (worker.Worker, error) { + if err := cfg.Validate(); err != nil { + return nil, errors.Trace(err) + } + + var apiCaller base.APICaller + if err := context.Get(cfg.APICallerName, &apiCaller); err != nil { + return nil, errors.Trace(err) + } + + worker, err := cfg.NewWorker(Config{ + Facade: cfg.NewSecretsFacade(apiCaller), + Logger: cfg.Logger, + Clock: cfg.Clock, + QuantiseTime: DefaultQuantiseTime, + }) + if err != nil { + return nil, errors.Trace(err) + } + return worker, nil +}
internal/worker/secretsrevoker/manifold_test.go+116 −0 added@@ -0,0 +1,116 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker_test + +import ( + "time" + + "github.com/juju/clock/testclock" + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/worker/v3" + dt "github.com/juju/worker/v3/dependency/testing" + "go.uber.org/mock/gomock" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/internal/worker/secretsrevoker" +) + +type manifoldSuite struct { + testing.IsolationSuite + config secretsrevoker.ManifoldConfig +} + +var _ = gc.Suite(&manifoldSuite{}) + +func (s *manifoldSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.config = s.validConfig() +} + +func (s *manifoldSuite) validConfig() secretsrevoker.ManifoldConfig { + return secretsrevoker.ManifoldConfig{ + Clock: testclock.NewDilatedWallClock(time.Millisecond), + APICallerName: "api-caller", + Logger: loggo.GetLogger("test"), + NewWorker: func(config secretsrevoker.Config) (worker.Worker, error) { + return nil, nil + }, + NewSecretsFacade: func(base.APICaller) secretsrevoker.SecretsRevokerFacade { return nil }, + } +} + +func (s *manifoldSuite) TestValid(c *gc.C) { + c.Check(s.config.Validate(), jc.ErrorIsNil) +} + +func (s *manifoldSuite) TestMissingAPICallerName(c *gc.C) { + s.config.APICallerName = "" + s.checkNotValid(c, "empty APICallerName not valid") +} + +func (s *manifoldSuite) TestMissingLogger(c *gc.C) { + s.config.Logger = nil + s.checkNotValid(c, "nil Logger not valid") +} + +func (s *manifoldSuite) TestMissingNewWorker(c *gc.C) { + s.config.NewWorker = nil + s.checkNotValid(c, "nil NewWorker not valid") +} + +func (s *manifoldSuite) TestMissingNewFacade(c *gc.C) { + s.config.NewSecretsFacade = nil + s.checkNotValid(c, "nil NewSecretsFacade not valid") +} + +func (s *manifoldSuite) TestMissingClock(c *gc.C) { + s.config.Clock = nil + s.checkNotValid(c, "nil Clock not valid") +} + +func (s *manifoldSuite) checkNotValid(c *gc.C, expect string) { + err := s.config.Validate() + c.Check(err, gc.ErrorMatches, expect) + c.Check(err, jc.ErrorIs, errors.NotValid) +} + +func (s *manifoldSuite) TestStart(c *gc.C) { + ctrl := gomock.NewController(c) + defer ctrl.Finish() + + facade := NewMockSecretsRevokerFacade(ctrl) + s.config.NewSecretsFacade = func(base.APICaller) secretsrevoker.SecretsRevokerFacade { + return facade + } + + called := false + s.config.NewWorker = func(config secretsrevoker.Config) (worker.Worker, error) { + called = true + mc := jc.NewMultiChecker() + mc.AddExpr(`_.Logger`, gc.NotNil) + mc.AddExpr(`_.Clock`, gc.NotNil) + mc.AddExpr(`_.QuantiseTime`, gc.NotNil) + c.Check(config, mc, secretsrevoker.Config{Facade: facade}) + return nil, nil + } + manifold := secretsrevoker.Manifold(s.config) + w, err := manifold.Start(dt.StubContext(nil, map[string]any{ + "api-caller": struct{ base.APICaller }{&mockAPICaller{}}, + })) + c.Assert(w, gc.IsNil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +type mockAPICaller struct { + base.APICaller +} + +func (*mockAPICaller) BestFacadeVersion(facade string) int { + return 1 +}
internal/worker/secretsrevoker/mocks_test.go+145 −0 added@@ -0,0 +1,145 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/juju/juju/internal/worker/secretsrevoker (interfaces: Logger,SecretsRevokerFacade) +// +// Generated by this command: +// +// mockgen -package secretsrevoker_test -destination mocks_test.go github.com/juju/juju/internal/worker/secretsrevoker Logger,SecretsRevokerFacade +// + +// Package secretsrevoker_test is a generated GoMock package. +package secretsrevoker_test + +import ( + reflect "reflect" + time "time" + + watcher "github.com/juju/juju/core/watcher" + gomock "go.uber.org/mock/gomock" +) + +// MockLogger is a mock of Logger interface. +type MockLogger struct { + ctrl *gomock.Controller + recorder *MockLoggerMockRecorder +} + +// MockLoggerMockRecorder is the mock recorder for MockLogger. +type MockLoggerMockRecorder struct { + mock *MockLogger +} + +// NewMockLogger creates a new mock instance. +func NewMockLogger(ctrl *gomock.Controller) *MockLogger { + mock := &MockLogger{ctrl: ctrl} + mock.recorder = &MockLoggerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLogger) EXPECT() *MockLoggerMockRecorder { + return m.recorder +} + +// Debugf mocks base method. +func (m *MockLogger) Debugf(arg0 string, arg1 ...any) { + m.ctrl.T.Helper() + varargs := []any{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Debugf", varargs...) +} + +// Debugf indicates an expected call of Debugf. +func (mr *MockLoggerMockRecorder) Debugf(arg0 any, arg1 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Debugf", reflect.TypeOf((*MockLogger)(nil).Debugf), varargs...) +} + +// Infof mocks base method. +func (m *MockLogger) Infof(arg0 string, arg1 ...any) { + m.ctrl.T.Helper() + varargs := []any{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Infof", varargs...) +} + +// Infof indicates an expected call of Infof. +func (mr *MockLoggerMockRecorder) Infof(arg0 any, arg1 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Infof", reflect.TypeOf((*MockLogger)(nil).Infof), varargs...) +} + +// Warningf mocks base method. +func (m *MockLogger) Warningf(arg0 string, arg1 ...any) { + m.ctrl.T.Helper() + varargs := []any{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Warningf", varargs...) +} + +// Warningf indicates an expected call of Warningf. +func (mr *MockLoggerMockRecorder) Warningf(arg0 any, arg1 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Warningf", reflect.TypeOf((*MockLogger)(nil).Warningf), varargs...) +} + +// MockSecretsRevokerFacade is a mock of SecretsRevokerFacade interface. +type MockSecretsRevokerFacade struct { + ctrl *gomock.Controller + recorder *MockSecretsRevokerFacadeMockRecorder +} + +// MockSecretsRevokerFacadeMockRecorder is the mock recorder for MockSecretsRevokerFacade. +type MockSecretsRevokerFacadeMockRecorder struct { + mock *MockSecretsRevokerFacade +} + +// NewMockSecretsRevokerFacade creates a new mock instance. +func NewMockSecretsRevokerFacade(ctrl *gomock.Controller) *MockSecretsRevokerFacade { + mock := &MockSecretsRevokerFacade{ctrl: ctrl} + mock.recorder = &MockSecretsRevokerFacadeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSecretsRevokerFacade) EXPECT() *MockSecretsRevokerFacadeMockRecorder { + return m.recorder +} + +// RevokeIssuedTokens mocks base method. +func (m *MockSecretsRevokerFacade) RevokeIssuedTokens(arg0 time.Time) (time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RevokeIssuedTokens", arg0) + ret0, _ := ret[0].(time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RevokeIssuedTokens indicates an expected call of RevokeIssuedTokens. +func (mr *MockSecretsRevokerFacadeMockRecorder) RevokeIssuedTokens(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RevokeIssuedTokens", reflect.TypeOf((*MockSecretsRevokerFacade)(nil).RevokeIssuedTokens), arg0) +} + +// WatchIssuedTokenExpiry mocks base method. +func (m *MockSecretsRevokerFacade) WatchIssuedTokenExpiry() (watcher.StringsWatcher, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WatchIssuedTokenExpiry") + ret0, _ := ret[0].(watcher.StringsWatcher) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WatchIssuedTokenExpiry indicates an expected call of WatchIssuedTokenExpiry. +func (mr *MockSecretsRevokerFacadeMockRecorder) WatchIssuedTokenExpiry() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchIssuedTokenExpiry", reflect.TypeOf((*MockSecretsRevokerFacade)(nil).WatchIssuedTokenExpiry)) +}
internal/worker/secretsrevoker/package_test.go+16 −0 added@@ -0,0 +1,16 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +//go:generate go run go.uber.org/mock/mockgen -package secretsrevoker_test -destination mocks_test.go github.com/juju/juju/internal/worker/secretsrevoker Logger,SecretsRevokerFacade + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +}
internal/worker/secretsrevoker/worker.go+179 −0 added@@ -0,0 +1,179 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker + +import ( + "time" + + "github.com/juju/clock" + "github.com/juju/errors" + "github.com/juju/worker/v3" + "github.com/juju/worker/v3/catacomb" + + "github.com/juju/juju/core/watcher" +) + +const ( + // quantTerm is the default quantisation term used in the default time + // quantisation function. + quantTerm = time.Minute +) + +// Logger is a logger interface. +type Logger interface { + Debugf(string, ...any) + Warningf(string, ...any) + Infof(string, ...any) +} + +// SecretsRevokerFacade is used by the secrets revoker to watch and act on the +// expiry of secret backend issued tokens. +type SecretsRevokerFacade interface { + WatchIssuedTokenExpiry() (watcher.StringsWatcher, error) + RevokeIssuedTokens(until time.Time) (time.Time, error) +} + +// QuantiseTimeFunc is used to pass the secrets revoker worker a quantisation +// function for time. +type QuantiseTimeFunc func(time.Time) time.Time + +// Config is the configuration for the secrets revoker worker. +type Config struct { + Facade SecretsRevokerFacade + Logger Logger + Clock clock.Clock + QuantiseTime QuantiseTimeFunc +} + +// Validate returns an error when the config is invalid. +func (config Config) Validate() error { + if config.Facade == nil { + return errors.NotValidf("nil Facade") + } + if config.Logger == nil { + return errors.NotValidf("nil Logger") + } + if config.Clock == nil { + return errors.NotValidf("nil Clock") + } + if config.QuantiseTime == nil { + return errors.NotValidf("nil QuantiseTime") + } + return nil +} + +// DefaultQuantiseTime is the default time quantisation function for the secret +// revoker worker's scheduler. +func DefaultQuantiseTime(t time.Time) time.Time { + return t.Truncate(quantTerm).Add(quantTerm) +} + +// NewWorker returns a new secrets revoker worker that is responsible for +// revoking secret backend issued tokens when they expire. +func NewWorker(config Config) (worker.Worker, error) { + if err := config.Validate(); err != nil { + return nil, errors.Trace(err) + } + + w := &revoker{config: config} + err := catacomb.Invoke(catacomb.Plan{ + Site: &w.catacomb, + Work: w.loop, + }) + return w, errors.Trace(err) +} + +// revoker is the secrets revoker worker. +type revoker struct { + catacomb catacomb.Catacomb + config Config +} + +// Kill fulfills worker.Worker. +func (w *revoker) Kill() { + w.catacomb.Kill(nil) +} + +// Wait fulfills worker.Worker. +func (w *revoker) Wait() error { + return w.catacomb.Wait() +} + +// loop handles watching for the expiry of secret backend issued tokens and +// scheduling in the future a time when to attempt to revoke those secret +// backend issued tokens. +func (w *revoker) loop() (err error) { + logger := w.config.Logger + clk := w.config.Clock + quantiseTime := w.config.QuantiseTime + + watcher, err := w.config.Facade.WatchIssuedTokenExpiry() + if err != nil { + return errors.Trace(err) + } + if err := w.catacomb.Add(watcher); err != nil { + return errors.Trace(err) + } + + var ( + alarm clock.Alarm + next time.Time + fire <-chan time.Time + ) + for { + select { + case <-w.catacomb.Dying(): + if !next.IsZero() { + logger.Warningf("revoker dying with scheduled token revocations") + } + return errors.Trace(w.catacomb.ErrDying()) + case changes, ok := <-watcher.Changes(): + if !ok { + return errors.New("secret issued token expiry watcher closed") + } + if len(changes) == 0 { + continue + } + earliest := next + for _, v := range changes { + ts, err := time.Parse(time.RFC3339, v) + if err != nil { + logger.Warningf("invalid issued token expiry time: %v", err) + continue + } + if earliest.IsZero() || earliest.After(ts) { + earliest = ts + } + } + if earliest.IsZero() { + continue + } + earliestQuantised := quantiseTime(earliest) + if !next.Equal(earliestQuantised) { + next = earliestQuantised + logger.Debugf("scheduling revoke at %v", next) + if alarm == nil { + alarm = clk.NewAlarm(next) + fire = alarm.Chan() + } else { + alarm.Reset(next) + } + } + case <-fire: + logger.Debugf("revoking issued tokens until %v", next) + nextRevoke, err := w.config.Facade.RevokeIssuedTokens(next) + if err != nil { + return errors.Annotate(err, "failed to revoke tokens") + } + if nextRevoke.IsZero() { + logger.Debugf("sleeping until token expiry trigger") + next = time.Time{} + continue + } + next = quantiseTime(nextRevoke) + logger.Debugf("scheduling revoke at %v", next) + alarm.Reset(next) + } + } +}
internal/worker/secretsrevoker/worker_test.go+246 −0 added@@ -0,0 +1,246 @@ +// Copyright 2025 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package secretsrevoker_test + +import ( + "math/rand" + "time" + + "github.com/juju/clock/testclock" + "github.com/juju/loggo" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/worker/v3/workertest" + "go.uber.org/mock/gomock" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/core/watcher/watchertest" + "github.com/juju/juju/internal/worker/secretsrevoker" +) + +type workerSuite struct { + testing.LoggingSuite + + facade *MockSecretsRevokerFacade +} + +var _ = gc.Suite(&workerSuite{}) + +func (s *workerSuite) setupMocks(c *gc.C) *gomock.Controller { + ctrl := gomock.NewController(c) + s.facade = NewMockSecretsRevokerFacade(ctrl) + return ctrl +} + +func (s *workerSuite) TestWorkerWithSingleRevoke(c *gc.C) { + defer s.setupMocks(c).Finish() + + clk := testclock.NewDilatedWallClock(time.Millisecond) + now := clk.Now() + last := now.Add(10 * time.Minute) + + ch := make(chan []string, 1) + ch <- []string(nil) + expiryWatcher := watchertest.NewMockStringsWatcher(ch) + defer workertest.CheckKilled(c, expiryWatcher) + s.facade.EXPECT().WatchIssuedTokenExpiry().Return(expiryWatcher, nil) + + done := make(chan struct{}) + s.facade.EXPECT().RevokeIssuedTokens( + gomock.Any(), + ).DoAndReturn(func(until time.Time) (time.Time, error) { + close(done) + c.Assert(until, jc.After, last) + return time.Time{}, nil + }) + + w, err := secretsrevoker.NewWorker(secretsrevoker.Config{ + Facade: s.facade, + Logger: loggo.GetLogger("test"), + Clock: clk, + QuantiseTime: secretsrevoker.DefaultQuantiseTime, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(w, gc.NotNil) + defer workertest.CleanKill(c, w) + + ch <- []string{last.Format(time.RFC3339)} + <-done +} + +func (s *workerSuite) TestWorkerWithMoreToRevoke(c *gc.C) { + defer s.setupMocks(c).Finish() + + clk := testclock.NewDilatedWallClock(time.Millisecond) + now := clk.Now().UTC() + first := now.Add(10 * time.Minute) + next := first.Add(10 * time.Minute) + + ch := make(chan []string, 1) + ch <- []string(nil) + expiryWatcher := watchertest.NewMockStringsWatcher(ch) + defer workertest.CheckKilled(c, expiryWatcher) + s.facade.EXPECT().WatchIssuedTokenExpiry().Return(expiryWatcher, nil) + + done := make(chan struct{}) + s.facade.EXPECT().RevokeIssuedTokens( + gomock.Any(), + ).DoAndReturn(func(until time.Time) (time.Time, error) { + if until.After(next) { + close(done) + return time.Time{}, nil + } + c.Assert(until, jc.After, first) + return next, nil + }).Times(2) + + w, err := secretsrevoker.NewWorker(secretsrevoker.Config{ + Facade: s.facade, + Logger: loggo.GetLogger("test"), + Clock: clk, + QuantiseTime: secretsrevoker.DefaultQuantiseTime, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(w, gc.NotNil) + defer workertest.CleanKill(c, w) + + ch <- []string{first.Format(time.RFC3339)} + <-done +} + +func (s *workerSuite) TestWorkerWithBreaks(c *gc.C) { + defer s.setupMocks(c).Finish() + + clk := testclock.NewDilatedWallClock(time.Millisecond) + + ch := make(chan []string, 1) + ch <- []string(nil) + expiryWatcher := watchertest.NewMockStringsWatcher(ch) + defer workertest.CheckKilled(c, expiryWatcher) + s.facade.EXPECT().WatchIssuedTokenExpiry().Return(expiryWatcher, nil) + + w, err := secretsrevoker.NewWorker(secretsrevoker.Config{ + Facade: s.facade, + Logger: loggo.GetLogger("test"), + Clock: clk, + QuantiseTime: secretsrevoker.DefaultQuantiseTime, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(w, gc.NotNil) + defer workertest.CleanKill(c, w) + + t0 := clk.Now().Add(30 * time.Second) + t1 := t0.Add(10 * time.Minute) + s.facade.EXPECT().RevokeIssuedTokens( + gomock.Any(), + ).DoAndReturn(func(until time.Time) (time.Time, error) { + c.Assert(until, jc.After, t0) + return t1, nil + }) + done := make(chan struct{}) + s.facade.EXPECT().RevokeIssuedTokens( + gomock.Any(), + ).DoAndReturn(func(until time.Time) (time.Time, error) { + defer close(done) + c.Assert(until, jc.After, t1) + return time.Time{}, nil + }) + ch <- []string{t0.Format(time.RFC3339)} + <-done + + // Break until a new send on the watcher. + + t2 := clk.Now().Add(30 * time.Second) + t3 := t2.Add(10 * time.Minute) + s.facade.EXPECT().RevokeIssuedTokens( + gomock.Any(), + ).DoAndReturn(func(until time.Time) (time.Time, error) { + c.Assert(until, jc.After, t2) + return t3, nil + }) + done2 := make(chan struct{}) + s.facade.EXPECT().RevokeIssuedTokens( + gomock.Any(), + ).DoAndReturn(func(until time.Time) (time.Time, error) { + defer close(done2) + c.Assert(until, jc.After, t3) + return time.Time{}, nil + }) + ch <- []string{t2.Format(time.RFC3339)} + <-done2 +} + +func (s *workerSuite) TestWorkerQuantisedSchedule(c *gc.C) { + defer s.setupMocks(c).Finish() + + const iterations = 100 + clk := testclock.NewDilatedWallClock(50 * time.Microsecond) + now := clk.Now().UTC().Truncate(time.Second) + times := map[time.Time]time.Time{} + first := time.Time{} + for i := range iterations { + next := now.Add(time.Duration(rand.Intn(600)) * time.Second) + nextQ := secretsrevoker.DefaultQuantiseTime(next) + times[next] = nextQ + if i == 0 { + first = next + } else { + s.facade.EXPECT().RevokeIssuedTokens(times[now]).Return(next, nil) + } + now = next + } + + done := make(chan struct{}) + s.facade.EXPECT().RevokeIssuedTokens( + times[now], + ).DoAndReturn(func(_ time.Time) (time.Time, error) { + defer close(done) + return time.Time{}, nil + }) + + ch := make(chan []string, 1) + ch <- []string(nil) + expiryWatcher := watchertest.NewMockStringsWatcher(ch) + defer workertest.CheckKilled(c, expiryWatcher) + s.facade.EXPECT().WatchIssuedTokenExpiry().Return(expiryWatcher, nil) + + quantiseTime := func(x time.Time) time.Time { + for k, v := range times { + if k.Equal(x) { + return v + } + } + c.Errorf("unexpected time %q", x) + return x + } + w, err := secretsrevoker.NewWorker(secretsrevoker.Config{ + Facade: s.facade, + Logger: loggo.GetLogger("test"), + Clock: clk, + QuantiseTime: quantiseTime, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(w, gc.NotNil) + defer workertest.CleanKill(c, w) + + ch <- []string{first.Format(time.RFC3339)} + <-done +} + +// TestDefaultQuantiseTimeFunction checks that the default time quantisation +// function creates minute buckets. +func (s *workerSuite) TestDefaultQuantiseTimeFunction(c *gc.C) { + unique := 0 + last := time.Time{}.Add(time.Minute) + accum := time.Time{} + for range 60 { + accum = accum.Add(10 * time.Second) + accumQuant := secretsrevoker.DefaultQuantiseTime(accum) + if last != accumQuant { + unique++ + last = accumQuant + } + } + c.Assert(unique, gc.Equals, 10) +}
mongo/mongo.go+4 −4 modified@@ -312,7 +312,7 @@ func ensureServer(ctx context.Context, args EnsureServerParams, mongoKernelTweak return retry.Call(retry.CallArgs{ Func: func() error { if err := svc.Start(); err != nil { - logger.Debugf("cannot start mongo service: %v", err) + logger.Warningf("cannot start mongo service: %v", err) } return ensureMongoServiceRunning(ctx, svc) }, @@ -322,7 +322,7 @@ func ensureServer(ctx context.Context, args EnsureServerParams, mongoKernelTweak return errors.Cause(err) == ErrMongoServiceNotInstalled }, NotifyFunc: func(err error, attempt int) { - logger.Debugf("attempt %d to start mongo service: %v", attempt, err) + logger.Warningf("attempt %d to start mongo service: %v", attempt, err) }, Stop: ctx.Done(), Attempts: -1, @@ -389,7 +389,7 @@ func setupDataDirectory(args EnsureServerParams) error { func truncateAndWriteIfExists(procFile, value string) error { if _, err := os.Stat(procFile); os.IsNotExist(err) { - logger.Debugf("%q does not exist, will not set %q", procFile, value) + logger.Warningf("%q does not exist, will not set %q", procFile, value) return errors.Errorf("%q does not exist, will not set %q", procFile, value) } f, err := os.OpenFile(procFile, os.O_WRONLY|os.O_TRUNC, 0600) @@ -431,7 +431,7 @@ func logVersion(mongoPath string) { logger.Infof("failed to read the output from %s --version: %v", mongoPath, err) return } - logger.Debugf("using mongod: %s --version:\n%s", mongoPath, output) + logger.Infof("using mongod: %s --version:\n%s", mongoPath, output) } func mongoSnapService(dataDir, configDir, snapChannel string) (MongoSnapService, error) {
mongo/open.go+2 −2 modified@@ -254,7 +254,7 @@ func DialInfo(info Info, opts DialOpts) (*mgo.DialInfo, error) { addr := server.TCPAddr().String() c, err := net.DialTimeout("tcp", addr, opts.Timeout) if err != nil { - logger.Debugf("mongodb connection failed, will retry: %v", err) + logger.Warningf("mongodb connection failed, will retry: %v", err) return nil, err } if tlsConfig != nil { @@ -268,7 +268,7 @@ func DialInfo(info Info, opts DialOpts) (*mgo.DialInfo, error) { } c = cc } - logger.Debugf("dialed mongodb server at %q", addr) + logger.Infof("dialed mongodb server at %q", addr) return c, nil }
rpc/params/secrets.go+7 −0 modified@@ -612,3 +612,10 @@ type SecretRevisionArgs struct { Unit Entity `json:"entity"` SecretURIs []string `json:"secret-uris"` } + +// RevokeIssuedTokensResult holds the result of revoking issued tokens, with +// optionally a time for the next revoke. +type RevokeIssuedTokensResult struct { + Next time.Time `json:"next"` + Error *Error `json:"error,omitempty"` +}
scripts/win-installer/setup.iss+1 −1 modified@@ -4,7 +4,7 @@ #if GetEnv('JUJU_VERSION') != "" #define MyAppVersion=GetEnv('JUJU_VERSION') #else -#define MyAppVersion="3.6.16" +#define MyAppVersion="3.6.20" #endif #define MyAppName "Juju"
secrets/provider/juju/provider.go+22 −4 modified@@ -28,31 +28,49 @@ func (p jujuProvider) Type() string { return BackendType } -// Initialise is not used. +// Initialise is not used because this provider does not have any external +// interactions outside the model. func (p jujuProvider) Initialise(*provider.ModelBackendConfig) error { return nil } -// CleanupModel is not used. +// CleanupModel is not used because this provider does not have any resources +// that exist outside of the model. func (p jujuProvider) CleanupModel(*provider.ModelBackendConfig) error { return nil } -// CleanupSecrets is not used. +// CleanupSecrets is not used because this provider does not store secrets +// externally to the model. func (p jujuProvider) CleanupSecrets(cfg *provider.ModelBackendConfig, tag names.Tag, removed provider.SecretRevisions) error { return nil } +// CleanupIssuedTokens is not used because this provider does not issue backend +// tokens. +func (p jujuProvider) CleanupIssuedTokens( + _ *provider.ModelBackendConfig, issuedTokenUUIDs []string, +) ([]string, error) { + return issuedTokenUUIDs, nil +} + // BuiltInConfig returns a minimal config for the Juju backend. func BuiltInConfig() provider.BackendConfig { return provider.BackendConfig{BackendType: BackendType} } +// IssuesTokens returns false since this provider does not create tokens. +func (p jujuProvider) IssuesTokens() bool { + return false +} + // RestrictedConfig returns the config needed to create a // secrets backend client restricted to manage the specified // owned secrets and read shared secrets for the given entity tag. func (p jujuProvider) RestrictedConfig( - adminCfg *provider.ModelBackendConfig, sameController, forDrain bool, tag names.Tag, owned provider.SecretRevisions, read provider.SecretRevisions, + *provider.ModelBackendConfig, + bool, bool, string, names.Tag, + []string, provider.SecretRevisions, provider.SecretRevisions, ) (*provider.BackendConfig, error) { return &provider.BackendConfig{ BackendType: BackendType,
secrets/provider/kubernetes/labels.go+12 −1 modified@@ -4,6 +4,7 @@ package kubernetes import ( + "github.com/juju/names/v5" "k8s.io/apimachinery/pkg/labels" "github.com/juju/juju/internal/provider/kubernetes/constants" @@ -13,6 +14,11 @@ import ( const ( labelJujuSecretModelName = "secrets.juju.is/model-name" labelJujuSecretModelUUID = "secrets.juju.is/model-id" + labelJujuSecretConsumer = "secrets.juju.is/consumer" +) + +const ( + annotationJujuSecretExpireAt = "secrets.juju.is/expire-at" ) func labelsForSecretRevision(modelName string, modelUUID string) labels.Set { @@ -24,12 +30,17 @@ func labelsForSecretRevision(modelName string, modelUUID string) labels.Set { return utils.LabelsMerge(utils.LabelsJuju, secretLabels) } -func labelsForServiceAccount(modelName string, modelUUID string) labels.Set { +func labelsForServiceAccount( + modelName string, modelUUID string, consumer names.Tag, +) labels.Set { secretLabels := map[string]string{ constants.LabelJujuModelName: modelName, labelJujuSecretModelName: modelName, labelJujuSecretModelUUID: modelUUID, } + if consumer != nil { + secretLabels[labelJujuSecretConsumer] = consumer.String() + } return utils.LabelsMerge(utils.LabelsJuju, secretLabels) }
secrets/provider/kubernetes/provider.go+424 −417 modified@@ -9,6 +9,8 @@ import ( "fmt" "net" "os" + "slices" + "strconv" "strings" "time" @@ -23,13 +25,12 @@ import ( rbacv1 "k8s.io/api/rbac/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" k8scloud "github.com/juju/juju/caas/kubernetes/cloud" - "github.com/juju/juju/core/model" + coresecrets "github.com/juju/juju/core/secrets" "github.com/juju/juju/environs/bootstrap" "github.com/juju/juju/environs/cloudspec" k8sprovider "github.com/juju/juju/internal/provider/kubernetes" @@ -190,8 +191,14 @@ func (p k8sProvider) CleanupSecrets(cfg *provider.ModelBackendConfig, tag names. if err != nil { return errors.Trace(err) } - _, err = broker.ensureSecretAccessToken(context.TODO(), tag, nil, nil, removed.RevisionIDs()) - return errors.Trace(err) + + ctx := context.TODO() + err = broker.dropSecretAccess(ctx, removed.RevisionIDs()) + if err != nil { + return errors.Trace(err) + } + + return nil } func cloudSpecToBackendConfig(spec cloudspec.CloudSpec) (*provider.BackendConfig, error) { @@ -253,13 +260,52 @@ func IsBuiltInName(backendName string) bool { return strings.HasSuffix(backendName, "-local") } +// IssuesTokens returns true if this secret backend provider needs to issue +// a token to provide a restricted (delegated) config. +func (p k8sProvider) IssuesTokens() bool { + return true +} + +// CleanupIssuedTokens removes all ACLs/tokens related to the given issued +// token UUIDs. It returns, even during error, the list of tokens it revoked +// so far. +func (p k8sProvider) CleanupIssuedTokens( + adminCfg *provider.ModelBackendConfig, issuedTokenUUIDs []string, +) ([]string, error) { + broker, err := p.getBroker(adminCfg) + if err != nil { + return nil, errors.Trace(err) + } + + ctx := context.TODO() + + for i, uuid := range issuedTokenUUIDs { + err = broker.revokeSecretAccessToken(ctx, uuid) + if err != nil { + // Return the tokens deleted so far. + return issuedTokenUUIDs[:i], errors.New( + "removing k8s secret backend issued tokens", + ) + } + } + + return issuedTokenUUIDs, nil +} + // RestrictedConfig returns the config needed to create a // secrets backend client restricted to manage the specified // owned secrets and read shared secrets for the given entity tag. func (p k8sProvider) RestrictedConfig( - adminCfg *provider.ModelBackendConfig, sameController, forDrain bool, consumer names.Tag, owned provider.SecretRevisions, read provider.SecretRevisions, + adminCfg *provider.ModelBackendConfig, + sameController, forDrain bool, + issuedTokenUUID string, + consumer names.Tag, + owned []string, + ownedRevs provider.SecretRevisions, + readRevs provider.SecretRevisions, ) (*provider.BackendConfig, error) { - logger.Tracef("getting k8s backend config for %q, owned %v, read %v", consumer, owned, read) + logger.Tracef("getting k8s backend config for %q, owned %v, readRevs %v", + consumer, owned, readRevs) if consumer == nil { return &adminCfg.BackendConfig, nil @@ -274,7 +320,35 @@ func (p k8sProvider) RestrictedConfig( return nil, errors.Trace(err) } ctx := context.TODO() - token, err := broker.ensureSecretAccessToken(ctx, consumer, owned.RevisionIDs(), read.RevisionIDs(), nil) + + // Kubernetes secrets cannot restrict create operations by name. To ensure + // a restricted config cannot create secrets with other names, we must add + // an extra pre-created secret object for the next revision. For secrets + // that have not yet been created, we must make the first revision secret + // object. + maxOwnedRev := make(map[string]int) + for _, rev := range ownedRevs.RevisionIDs() { + id, rev, err := coresecrets.ParseRevisionName(rev) + if err != nil { + return nil, errors.Trace(err) + } + maxOwnedRev[id] = max(maxOwnedRev[id], rev) + } + preCreateRevisions := make([]string, 0, len(owned)) + for _, id := range owned { + nextRev := maxOwnedRev[id] + 1 + preCreateRevisions = append(preCreateRevisions, + coresecrets.RevisionName(id, nextRev)) + } + err = broker.precreateSecretRevs(ctx, preCreateRevisions) + if err != nil { + return nil, errors.Trace(err) + } + + writeRevs := slices.Concat(ownedRevs.RevisionIDs(), preCreateRevisions) + token, err := broker.createSecretAccessToken( + ctx, issuedTokenUUID, consumer, writeRevs, readRevs.RevisionIDs(), + ) if err != nil { return nil, errors.Trace(err) } @@ -348,7 +422,9 @@ func (k *kubernetesClient) isExternalNamespace() (bool, error) { } // TODO: make this configurable. -var expiresInSeconds = int64(60 * 10) +const ( + minExpireSeconds = 600 +) func (k *kubernetesClient) createServiceAccount(ctx context.Context, sa *core.ServiceAccount) (*core.ServiceAccount, error) { if k.namespace == "" { @@ -377,17 +453,6 @@ func (k *kubernetesClient) createServiceAccount(ctx context.Context, sa *core.Se return out, errors.Trace(err) } -func (k *kubernetesClient) updateServiceAccount(ctx context.Context, sa *core.ServiceAccount) (*core.ServiceAccount, error) { - if k.namespace == "" { - return nil, errNoNamespace - } - out, err := k.client.CoreV1().ServiceAccounts(k.namespace).Update(ctx, sa, v1.UpdateOptions{}) - if k8serrors.IsNotFound(err) { - return nil, errors.NotFoundf("service account %q", sa.GetName()) - } - return out, errors.Trace(err) -} - func (k *kubernetesClient) deleteServiceAccount(ctx context.Context, name string, uid types.UID) error { if k.namespace == "" { return errNoNamespace @@ -426,42 +491,6 @@ func (k *kubernetesClient) deleteServiceAccounts(ctx context.Context) error { return nil } -// ensureServiceAccount creates or updates a service account, disambiguating the name if necessary. -// If a new service account is created, cleanups contain funcs than can be run to delete any new -// resources on error. -func (k *kubernetesClient) ensureServiceAccount( - ctx context.Context, serviceAccountName string, labels labels.Set, annotations map[string]string, disambiguateName bool, -) (out *core.ServiceAccount, cleanups []func(), err error) { - automountServiceAccountToken := true - sa := &core.ServiceAccount{ - ObjectMeta: v1.ObjectMeta{ - Name: serviceAccountName, - Labels: labels, - Annotations: annotations, - Namespace: k.namespace, - }, - AutomountServiceAccountToken: &automountServiceAccountToken, - } - if disambiguateName { - out, err = k.createDisambiguatedServiceAccount(ctx, sa) - } else { - out, err = k.createServiceAccount(ctx, sa) - if err != nil && !errors.Is(err, errors.AlreadyExists) { - return nil, nil, errors.Trace(err) - } - } - if err == nil { - logger.Debugf("service account %q created", out.GetName()) - cleanups = append(cleanups, func() { _ = k.deleteServiceAccount(ctx, out.GetName(), out.GetUID()) }) - return out, cleanups, nil - } - - // Service account already exists so update it. - out, err = k.updateServiceAccount(ctx, sa) - logger.Debugf("updating service account %q", sa.GetName()) - return out, cleanups, errors.Trace(err) -} - func (k *kubernetesClient) deleteSecrets(ctx context.Context) error { if k.namespace == "" { return errNoNamespace @@ -489,68 +518,20 @@ func (k *kubernetesClient) deleteSecrets(ctx context.Context) error { return nil } -func (k *kubernetesClient) createRole(ctx context.Context, role *rbacv1.Role) (*rbacv1.Role, error) { +func (k *kubernetesClient) createRole( + ctx context.Context, role *rbacv1.Role, +) (*rbacv1.Role, error) { if k.namespace == "" { return nil, errNoNamespace } - out, err := k.client.RbacV1().Roles(k.namespace).Create(ctx, role, v1.CreateOptions{FieldManager: resources.JujuFieldManager}) + out, err := k.client.RbacV1().Roles(k.namespace).Create( + ctx, role, v1.CreateOptions{FieldManager: resources.JujuFieldManager}) if k8serrors.IsAlreadyExists(err) { return nil, errors.AlreadyExistsf("role %q", role.GetName()) } return out, errors.Trace(err) } -// updateRole fetches the latest version of the specified role, -// replaces its Rules with those from the provided role, and updates it -// in the cluster. This method retries on conflicts using exponential backoff -// to handle concurrent modifications by other controllers. -// Note that only the Rules field is updated, all other fields from the latest role are preserved. -func (k *kubernetesClient) updateRole(ctx context.Context, role *rbacv1.Role) (*rbacv1.Role, error) { - if k.namespace == "" { - return nil, errNoNamespace - } - - api := k.client.RbacV1().Roles(k.namespace) - var out *rbacv1.Role - err := retry.Call(retry.CallArgs{ - Func: func() error { - patch := map[string]interface{}{ - "rules": role.Rules, - } - data, err := json.Marshal(patch) - if err != nil { - return errors.Trace(err) - } - out, err = api.Patch(ctx, role.GetName(), types.StrategicMergePatchType, data, v1.PatchOptions{ - FieldManager: resources.JujuFieldManager, - }) - if k8serrors.IsNotFound(err) { - return errors.NotFoundf("role %q", role.GetName()) - } - return errors.Trace(err) - }, - IsFatalError: func(err error) bool { - return !k8serrors.IsConflict(err) - }, - Clock: jujuclock.WallClock, - Attempts: 5, - Delay: time.Second, - BackoffFunc: retry.ExpBackoff(time.Second, 5*time.Second, 1.5, true), - }) - return out, errors.Trace(err) -} - -func (k *kubernetesClient) getRole(ctx context.Context, name string) (*rbacv1.Role, error) { - if k.namespace == "" { - return nil, errNoNamespace - } - out, err := k.client.RbacV1().Roles(k.namespace).Get(ctx, name, v1.GetOptions{}) - if k8serrors.IsNotFound(err) { - return nil, errors.NotFoundf("role %q", name) - } - return out, errors.Trace(err) -} - func (k *kubernetesClient) deleteRoles(ctx context.Context) error { if k.namespace == "" { return errNoNamespace @@ -605,6 +586,44 @@ func (k *kubernetesClient) deleteRoleBindings(ctx context.Context) error { return nil } +func (k *kubernetesClient) updateRole( + ctx context.Context, role *rbacv1.Role, +) (*rbacv1.Role, error) { + api := k.client.RbacV1().Roles(k.namespace) + + var out *rbacv1.Role + err := retry.Call(retry.CallArgs{ + Func: func() error { + patch := map[string]interface{}{ + "rules": role.Rules, + } + data, err := json.Marshal(patch) + if err != nil { + return errors.Annotatef(err, "marshaling role patch") + } + out, err = api.Patch( + ctx, role.GetName(), types.StrategicMergePatchType, data, + v1.PatchOptions{ + FieldManager: resources.JujuFieldManager, + }, + ) + if k8serrors.IsNotFound(err) { + return errors.NotFoundf("role %q", role.GetName()) + } + return errors.Annotatef(err, "patching role %q", role.GetName()) + }, + IsFatalError: func(err error) bool { + return !k8serrors.IsConflict(err) + }, + Clock: jujuclock.WallClock, + Attempts: 5, + Delay: time.Second, + BackoffFunc: retry.ExpBackoff(time.Second, 5*time.Second, 1.5, true), + }) + + return out, errors.Annotatef(err, "updating role %q", role.GetName()) +} + func (k *kubernetesClient) deleteRole(ctx context.Context, name string, uid types.UID) error { if k.namespace == "" { return errNoNamespace @@ -616,132 +635,81 @@ func (k *kubernetesClient) deleteRole(ctx context.Context, name string, uid type return errors.Trace(err) } -func (k *kubernetesClient) ensureRoleBinding( +func (k *kubernetesClient) createRoleBinding( ctx context.Context, rb *rbacv1.RoleBinding, ) (_ *rbacv1.RoleBinding, cleanups []func(), err error) { if k.namespace == "" { - return nil, cleanups, errNoNamespace + return nil, nil, errNoNamespace } api := k.client.RbacV1().RoleBindings(k.namespace) - out, err := api.Create(ctx, rb, v1.CreateOptions{ FieldManager: resources.JujuFieldManager, }) - if k8serrors.IsAlreadyExists(err) { - // we need to ensure that the rb is not empty for callers - // by getting rb from api again eg cases like resource name empty for - // attempting to get rb name in caller - out, err = api.Get(ctx, rb.Name, v1.GetOptions{}) - return out, cleanups, err - } - if err == nil { - cleanups = append(cleanups, func() { _ = k.deleteRoleBinding(ctx, out.GetName(), out.GetUID()) }) + if err != nil { + return nil, nil, errors.Trace(err) } + cleanups = append(cleanups, func() { + _ = k.deleteRoleBinding(ctx, out.GetName(), out.GetUID()) + }) - return out, cleanups, errors.Trace(err) + return out, cleanups, nil } -func (k *kubernetesClient) deleteRoleBinding(ctx context.Context, name string, uid types.UID) error { +func (k *kubernetesClient) deleteRoleBinding( + ctx context.Context, name string, uid types.UID, +) error { if k.namespace == "" { return errNoNamespace } - err := k.client.RbacV1().RoleBindings(k.namespace).Delete(ctx, name, utils.NewPreconditionDeleteOptions(uid)) + err := k.client.RbacV1().RoleBindings(k.namespace).Delete( + ctx, name, utils.NewPreconditionDeleteOptions(uid)) if k8serrors.IsNotFound(err) { return nil } return errors.Trace(err) } -func cleanRules(existing []rbacv1.PolicyRule, shouldRemove func(string) bool) []rbacv1.PolicyRule { - if len(existing) == 0 { - return nil - } - - i := 0 - for _, r := range existing { - if len(r.ResourceNames) == 1 && shouldRemove(r.ResourceNames[0]) { - continue - } - existing[i] = r - i++ - } - return existing[:i] -} - -func rulesForSecretAccess( - namespace string, isControllerModel bool, - existing []rbacv1.PolicyRule, owned, read, removed []string, +// policyRulesForSecretAccess returns the full policy rules required for +// secrets. +func policyRulesForSecretAccess( + namespace string, owned, read []string, ) []rbacv1.PolicyRule { - if len(existing) == 0 { - existing = []rbacv1.PolicyRule{ - { - APIGroups: []string{rbacv1.APIGroupAll}, - Resources: []string{"secrets"}, - Verbs: []string{ - "create", - "patch", // TODO: we really should only allow "create" but not patch but currently we uses .Apply() which requres patch!!! - }, + rules := []rbacv1.PolicyRule{{ + APIGroups: []string{rbacv1.APIGroupAll}, + Resources: []string{"namespaces"}, + Verbs: []string{"get", "list"}, + ResourceNames: []string{namespace}, + }} + if len(owned) > 0 { + // owned cannot be empty, otherwise this policy rule grants access to + // all secrets. + rules = append(rules, rbacv1.PolicyRule{ + APIGroups: []string{rbacv1.APIGroupAll}, + Resources: []string{"secrets"}, + Verbs: []string{ + // NOTE: create is not given here as it cannot be enforced due + // to kubernetes rbac limitation. + "get", "patch", "update", "replace", "delete", }, - } - if isControllerModel { - // We need to be able to list/get all namespaces for units in controller model. - existing = append(existing, rbacv1.PolicyRule{ - APIGroups: []string{rbacv1.APIGroupAll}, - Resources: []string{"namespaces"}, - Verbs: []string{"get", "list"}, - }) - } else { - // We just need to be able to list/get our own namespace for units in other models. - existing = append(existing, rbacv1.PolicyRule{ - APIGroups: []string{rbacv1.APIGroupAll}, - Resources: []string{"namespaces"}, - Verbs: []string{"get", "list"}, - ResourceNames: []string{namespace}, - }) - } - } - - ownedIDs := set.NewStrings(owned...) - readIDs := set.NewStrings(read...) - removedIDs := set.NewStrings(removed...) - - existing = cleanRules(existing, - func(s string) bool { - return ownedIDs.Contains(s) || readIDs.Contains(s) || removedIDs.Contains(s) - }, - ) - - for _, rName := range owned { - if removedIDs.Contains(rName) { - continue - } - existing = append(existing, rbacv1.PolicyRule{ - APIGroups: []string{rbacv1.APIGroupAll}, - Resources: []string{"secrets"}, - Verbs: []string{rbacv1.VerbAll}, - ResourceNames: []string{rName}, + ResourceNames: owned, }) } - for _, rName := range read { - if removedIDs.Contains(rName) { - continue - } - existing = append(existing, rbacv1.PolicyRule{ + if len(read) > 0 { + // read cannot be empty, otherwise this policy rule grants access to + // all secrets. + rules = append(rules, rbacv1.PolicyRule{ APIGroups: []string{rbacv1.APIGroupAll}, Resources: []string{"secrets"}, Verbs: []string{"get"}, - ResourceNames: []string{rName}, + ResourceNames: read, }) } - return existing + return rules } -// ensureBindingForSecretAccessToken creates the role and role binding needed to access the supplied secrets. -// If a new role is created, cleanups contain funcs than can be run to delete any new -// resources on error. -func (k *kubernetesClient) ensureBindingForSecretAccessToken( - ctx context.Context, sa *core.ServiceAccount, owned, read, removed []string, +func (k *kubernetesClient) createRoleAndBinding( + ctx context.Context, sa *core.ServiceAccount, rules []rbacv1.PolicyRule, ) (cleanups []func(), _ error) { role, err := k.createRole(ctx, &rbacv1.Role{ @@ -751,24 +719,15 @@ func (k *kubernetesClient) ensureBindingForSecretAccessToken( Labels: sa.Labels, Annotations: sa.Annotations, }, - Rules: rulesForSecretAccess(k.namespace, false, nil, owned, read, removed), + Rules: rules, }, ) - if errors.Is(err, errors.AlreadyExists) { - role, err = k.getRole(ctx, sa.Name) - if err != nil { - return cleanups, errors.Annotatef(err, "getting role %q", sa.Name) - } - role.Rules = rulesForSecretAccess(k.namespace, false, role.Rules, owned, read, removed) - _, err = k.updateRole(ctx, role) - if err != nil { - return cleanups, errors.Annotatef(err, "updating role %q", sa.Name) - } - } else if err != nil { + if err != nil { return cleanups, errors.Annotatef(err, "creating role %q", sa.Name) - } else { - cleanups = append(cleanups, func() { _ = k.deleteRole(ctx, role.GetName(), role.GetUID()) }) } + cleanups = append(cleanups, func() { + _ = k.deleteRole(ctx, role.GetName(), role.GetUID()) + }) rb := &rbacv1.RoleBinding{ ObjectMeta: v1.ObjectMeta{ @@ -790,18 +749,20 @@ func (k *kubernetesClient) ensureBindingForSecretAccessToken( }, }, } - out, rbCleanups, err := k.ensureRoleBinding(ctx, rb) + out, rbCleanups, err := k.createRoleBinding(ctx, rb) if err != nil { return cleanups, errors.Trace(err) } cleanups = append(cleanups, rbCleanups...) - // Ensure role binding exists before we return to avoid a race where a client - // attempts to perform an operation before the role is allowed. + // Ensure role binding exists before we return to avoid a race where a + // client attempts to perform an operation before the role is allowed. return cleanups, errors.Trace(retry.Call(retry.CallArgs{ Func: func() error { api := k.client.RbacV1().RoleBindings(k.namespace) - _, err := api.Get(ctx, out.Name, v1.GetOptions{ResourceVersion: out.ResourceVersion}) + _, err := api.Get(ctx, out.Name, v1.GetOptions{ + ResourceVersion: out.ResourceVersion, + }) if k8serrors.IsNotFound(err) { return errors.NewNotFound(err, "k8s") } @@ -932,36 +893,69 @@ func (k *kubernetesClient) deleteClusterRoleBindings(ctx context.Context) error return nil } -// ensureClusterBindingForSecretAccessToken creates the cluster role and role binding needed -// to access the supplied secrets. -// If a new cluster role is created, cleanups contain funcs than can be run to delete any new -// resources on error. -func (k *kubernetesClient) ensureClusterBindingForSecretAccessToken( - ctx context.Context, saName, baseName string, labels labels.Set, annotations map[string]string, owned, read, removed []string, +// ensureControllerClusterBindingForSecretAccessToken creates the cluster role +// and role binding needed to access the supplied secrets for the controller. +// If a new cluster role is created, cleanups contain funcs than can be run to +// delete any new resources on error. +func (k *kubernetesClient) createClusterRoleAndBinding( + ctx context.Context, sa *core.ServiceAccount, + rules []rbacv1.PolicyRule, ) (cleanups []func(), _ error) { - createRules := func(existing []rbacv1.PolicyRule) []rbacv1.PolicyRule { - return rulesForSecretAccess(k.namespace, true, existing, owned, read, removed) - } - clusterRole, crCleanups, err := k.ensureDisambiguatedClusterRole(ctx, baseName, labels, annotations, createRules) - if err == nil { - cleanups = append(cleanups, crCleanups...) - } else { - return cleanups, errors.Annotatef(err, "disambiguating cluster role name %q", baseName) + cr, err := k.createClusterRole(ctx, + &rbacv1.ClusterRole{ + ObjectMeta: v1.ObjectMeta{ + Name: sa.Name, + Labels: sa.Labels, + Annotations: sa.Annotations, + }, + Rules: rules, + }, + ) + if err != nil { + return cleanups, errors.Annotatef( + err, "creating cluster role %q", sa.Name) } + cleanups = append(cleanups, func() { + _ = k.deleteClusterRole(ctx, cr.GetName(), cr.GetUID()) + }) - clusterRoleBinding, crbCleanups, err := k.ensureDisambiguatedClusterRoleBinding(ctx, saName, baseName, clusterRole.Name, labels, annotations) - if err == nil { - cleanups = append(cleanups, crbCleanups...) - } else { - return cleanups, errors.Annotatef(err, "disambiguating cluster role binding name %q", baseName) + crb, err := k.createClusterRoleBinding(ctx, + &rbacv1.ClusterRoleBinding{ + ObjectMeta: v1.ObjectMeta{ + Name: sa.Name, + Labels: sa.Labels, + Annotations: sa.Annotations, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: sa.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: sa.Name, + Namespace: sa.Namespace, + }, + }, + }, + ) + if err != nil { + return cleanups, errors.Annotatef( + err, "creating cluster role binding %q", sa.Name) } + cleanups = append(cleanups, func() { + _ = k.deleteClusterRoleBinding(ctx, crb.GetName(), crb.GetUID()) + }) - // Ensure role binding exists before we return to avoid a race where a client - // attempts to perform an operation before the role is allowed. + // Ensure role binding exists before we return to avoid a race where a + // client attempts to perform an operation before the role is allowed. return cleanups, errors.Trace(retry.Call(retry.CallArgs{ Func: func() error { api := k.client.RbacV1().ClusterRoleBindings() - _, err := api.Get(ctx, clusterRoleBinding.Name, v1.GetOptions{ResourceVersion: clusterRoleBinding.ResourceVersion}) + _, err := api.Get(ctx, crb.Name, v1.GetOptions{ + ResourceVersion: crb.ResourceVersion, + }) if k8serrors.IsNotFound(err) { return errors.NewNotFound(err, "k8s") } @@ -976,13 +970,53 @@ func (k *kubernetesClient) ensureClusterBindingForSecretAccessToken( })) } -const ( - maxResourceNameLength = 63 - clusterResourcePrefix = "juju-secrets-" -) +// precreateSecretRevs ensures that a secret exists for a secret revision. +func (k *kubernetesClient) precreateSecretRevs( + ctx context.Context, revs []string, +) error { + labels := labelsForSecretRevision(k.modelName, k.modelUUID) + client := k.client.CoreV1().Secrets(k.namespace) + existingSecrets, err := client.List(ctx, v1.ListOptions{ + LabelSelector: labels.AsSelector().String(), + }) + if err != nil { + return errors.Trace(err) + } -func (k *kubernetesClient) ensureSecretAccessToken( - ctx context.Context, consumer names.Tag, owned, read, removed []string, + existing := set.NewStrings() + for _, secret := range existingSecrets.Items { + existing.Add(secret.Name) + } + + tmpl := &core.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: k.namespace, + Labels: labels, + }, + Type: core.SecretTypeOpaque, + } + for _, name := range revs { + if existing.Contains(name) { + continue + } + tmpl.Name = name + _, err := client.Create(ctx, tmpl, v1.CreateOptions{ + FieldManager: resources.JujuFieldManager, + }) + if err != nil && !k8serrors.IsAlreadyExists(err) { + return errors.Trace(err) + } + } + + return nil +} + +func (k *kubernetesClient) createSecretAccessToken( + ctx context.Context, + issuedTokenUUID string, + consumer names.Tag, + ownedRevs []string, + readRevs []string, ) (_ string, err error) { var cleanups []func() defer func() { @@ -995,10 +1029,13 @@ func (k *kubernetesClient) ensureSecretAccessToken( } }() - labels := labelsForServiceAccount(k.modelName, k.modelUUID) + expireAt := time.Now().Add(coresecrets.IssuedTokenValidity) + + labels := labelsForServiceAccount(k.modelName, k.modelUUID, consumer) annotations := map[string]string{ - controllerIdKey: k.controllerUUID, - modelIdKey: k.modelUUID, + controllerIdKey: k.controllerUUID, + modelIdKey: k.modelUUID, + annotationJujuSecretExpireAt: strconv.FormatInt(expireAt.Unix(), 10), } appName := consumer.Id() @@ -1010,53 +1047,63 @@ func (k *kubernetesClient) ensureSecretAccessToken( constants.LabelKubernetesAppName: appName, }) - // Compose the name of the service account and role and role binding. - // We'll use the tag string, but for models we'll use the model name, since - // the UUID will be used to disambiguate anyway if needed. - baseResourceName := consumer.String() - if consumer.Kind() == names.ModelTagKind { - baseResourceName = fmt.Sprintf("model-%s", k.modelName) - } - serviceAccountName := baseResourceName - // For the controller model, the resources are cluster scoped so - // given them a meaningful prefix. - if k.isControllerModel { - baseResourceName = clusterResourcePrefix + baseResourceName + // Service Account name and all the ACLs for this SA are derived from the + // issued token UUID. This allows juju to revoke the issued token and + // perform cleanup of tokens. + serviceAccountName := fmt.Sprintf( + "juju-secret-consumer-%s", issuedTokenUUID, + ) + + automountServiceAccountToken := true + sa := &core.ServiceAccount{ + ObjectMeta: v1.ObjectMeta{ + Name: serviceAccountName, + Labels: labels, + Annotations: annotations, + Namespace: k.namespace, + }, + AutomountServiceAccountToken: &automountServiceAccountToken, } - // If the resources are going to a namespace other than that of the host model, - // disambiguate the name. - disambiguateName, err := k.isExternalNamespace() + sa, err = k.createServiceAccount(ctx, sa) if err != nil { - return "", errors.Annotate(err, "checking if namespace is external") + return "", errors.Annotatef(err, "cannot ensure service account %q", serviceAccountName) } + cleanups = append(cleanups, func() { + _ = k.deleteServiceAccount(ctx, sa.Name, sa.UID) + }) - sa, saCleanups, err := k.ensureServiceAccount(ctx, serviceAccountName, labels, annotations, disambiguateName) - cleanups = append(cleanups, saCleanups...) + rules := policyRulesForSecretAccess(k.namespace, ownedRevs, readRevs) + rCleanups, err := k.createRoleAndBinding(ctx, sa, rules) + cleanups = append(cleanups, rCleanups...) if err != nil { - return "", errors.Annotatef(err, "cannot ensure service account %q", serviceAccountName) + return "", errors.Annotatef(err, "cannot ensure role binding for secret access token for %q", sa.Name) } if k.isControllerModel { - cbCleanups, err := k.ensureClusterBindingForSecretAccessToken(ctx, sa.Name, baseResourceName, labels, annotations, owned, read, removed) + // We need to be able to list/get all namespaces for units in controller + // model. + clusterRules := append([]rbacv1.PolicyRule{{ + APIGroups: []string{rbacv1.APIGroupAll}, + Resources: []string{"namespaces"}, + Verbs: []string{"get", "list"}, + }}, rules...) + cbCleanups, err := k.createClusterRoleAndBinding(ctx, sa, clusterRules) cleanups = append(cleanups, cbCleanups...) if err != nil { return "", errors.Annotatef(err, "cannot ensure cluster binding for secret access token for %q", sa.Name) } - } else { - // For roles and role bindings created in the namespace set up to hold the secrets, - // we assume that the service account, role, role binding all share the same disambiguated - // name as the service account. This is reasonable since it's not expected that anything - // other than Juju will be messing with such artefacts in that namespace. - rCleanups, err := k.ensureBindingForSecretAccessToken(ctx, sa, owned, read, removed) - cleanups = append(cleanups, rCleanups...) - if err != nil { - return "", errors.Annotatef(err, "cannot ensure role binding for secret access token for %q", sa.Name) - } } treq := &authenticationv1.TokenRequest{ + ObjectMeta: v1.ObjectMeta{ + Name: sa.Name, + }, Spec: authenticationv1.TokenRequestSpec{ - ExpirationSeconds: &expiresInSeconds, + ExpirationSeconds: func() *int64 { + until := time.Until(expireAt) + seconds := int64(max(minExpireSeconds, until.Seconds())) + return &seconds + }(), }, } tr, err := k.client.CoreV1().ServiceAccounts(k.namespace).CreateToken( @@ -1067,173 +1114,130 @@ func (k *kubernetesClient) ensureSecretAccessToken( return tr.Status.Token, nil } -// createDisambiguatedServiceAccount creates a service account with a disambiguated name. -func (k *kubernetesClient) createDisambiguatedServiceAccount( - ctx context.Context, sa *core.ServiceAccount, -) (*core.ServiceAccount, error) { +// revokeSecretAccessTokens removes all the roles, role bindings and service +// accounts related to the named issued token UUID. +func (k *kubernetesClient) revokeSecretAccessToken( + ctx context.Context, issuedTokenUUID string, +) error { if k.namespace == "" { - return nil, errNoNamespace + return errNoNamespace } - listOps := v1.ListOptions{ - LabelSelector: modelLabelSelector(k.modelName).String(), + serviceAccountName := fmt.Sprintf( + "juju-secret-consumer-%s", issuedTokenUUID, + ) + + err := k.client.RbacV1().ClusterRoleBindings().Delete( + ctx, serviceAccountName, *v1.NewDeleteOptions(0)) + if err != nil && !k8serrors.IsNotFound(err) { + return errors.Trace(err) } - existing, err := k.client.CoreV1().ServiceAccounts(k.namespace).List(ctx, listOps) - if err != nil { - return nil, errors.Trace(err) + + err = k.client.RbacV1().ClusterRoles().Delete( + ctx, serviceAccountName, v1.DeleteOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return errors.Trace(err) } - for _, existingServiceAccount := range existing.Items { - if existingServiceAccount.Annotations[modelIdKey] == k.modelUUID { - return &existingServiceAccount, nil - } + err = k.client.RbacV1().RoleBindings(k.namespace).Delete( + ctx, serviceAccountName, *v1.NewDeleteOptions(0)) + if err != nil && !k8serrors.IsNotFound(err) { + return errors.Trace(err) } - suffixLength := model.DefaultSuffixDigits - var proposedName string + err = k.client.RbacV1().Roles(k.namespace).Delete( + ctx, serviceAccountName, v1.DeleteOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return errors.Trace(err) + } - for { - if proposedName, err = model.DisambiguateResourceNameWithSuffixLength( - k.modelUUID, sa.Name, maxResourceNameLength, suffixLength); err != nil { - return nil, errors.Annotatef(err, "disambiguating service account name %q", sa.Name) - } - _, err = k.client.CoreV1().ServiceAccounts(k.namespace).Get(ctx, proposedName, v1.GetOptions{}) - if err == nil { - suffixLength = suffixLength + 1 - continue - } else if !k8serrors.IsNotFound(err) { - return nil, errors.Annotatef(err, "getting existing service account %q", proposedName) - } - sa.Name = proposedName - return k.createServiceAccount(ctx, sa) + err = k.client.CoreV1().ServiceAccounts(k.namespace).Delete( + ctx, serviceAccountName, v1.DeleteOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return errors.Trace(err) } + + return nil } -// ensureDisambiguatedClusterRole creates a cluster role with a disambiguated name. -// cleanups contain funcs than can be run to delete any new resources on error. -func (k *kubernetesClient) ensureDisambiguatedClusterRole( - ctx context.Context, baseName string, labels labels.Set, annotations map[string]string, createRules func(existing []rbacv1.PolicyRule) []rbacv1.PolicyRule, -) (_ *rbacv1.ClusterRole, cleanups []func(), _ error) { - listOps := v1.ListOptions{ - LabelSelector: modelLabelSelector(k.modelName).String(), +// filterRemovedSecretsPolicyRules removes from the given rules access to the +// specified secret revisions. When the second return value is false, the policy +// is already up to date. +func filterRemovedSecretsPolicyRules( + rules []rbacv1.PolicyRule, removed []string, +) ([]rbacv1.PolicyRule, bool) { + toRemove := set.NewStrings(removed...) + needChange := false + for _, rule := range rules { + if slices.Contains(rule.Resources, "secrets") && + slices.ContainsFunc(rule.ResourceNames, toRemove.Contains) { + needChange = true + break + } } - existing, err := k.client.RbacV1().ClusterRoles().List(ctx, listOps) - if err != nil { - return nil, cleanups, errors.Trace(err) + if !needChange { + return nil, false } - for _, clusterRole := range existing.Items { - if clusterRole.Annotations[modelIdKey] != k.modelUUID { - continue - } - clusterRole.Rules = createRules(clusterRole.Rules) - result, err := k.updateClusterRole(ctx, &clusterRole) - if err != nil { - return nil, cleanups, errors.Trace(err) + var out []rbacv1.PolicyRule + for _, rule := range rules { + if slices.Contains(rule.Resources, "secrets") { + rule.ResourceNames = slices.DeleteFunc( + rule.ResourceNames, toRemove.Contains) + if len(rule.ResourceNames) == 0 { + continue + } } - return result, cleanups, nil + out = append(out, rule) } + return out, true +} - suffixLength := model.DefaultSuffixDigits - var proposedName string +func (k *kubernetesClient) dropSecretAccess( + ctx context.Context, removed []string, +) error { + labels := labelsForServiceAccount(k.modelName, k.modelUUID, nil) - for { - if proposedName, err = model.DisambiguateResourceNameWithSuffixLength( - k.modelUUID, baseName, maxResourceNameLength, suffixLength); err != nil { - return nil, cleanups, errors.Annotatef(err, "disambiguating cluster role name %q", baseName) - } - _, err = k.client.RbacV1().ClusterRoles().Get(ctx, proposedName, v1.GetOptions{}) - if err == nil { - suffixLength = suffixLength + 1 + listOps := v1.ListOptions{ + LabelSelector: labels.AsSelector().String(), + } + + clusterRoles, err := k.client.RbacV1().ClusterRoles().List(ctx, listOps) + if err != nil { + return errors.Trace(err) + } + for _, clusterRole := range clusterRoles.Items { + var changed bool + clusterRole.Rules, changed = filterRemovedSecretsPolicyRules( + clusterRole.Rules, removed) + if !changed { continue - } else if !k8serrors.IsNotFound(err) { - return nil, cleanups, errors.Annotatef(err, "getting existing cluster role %q", proposedName) } - result, err := k.createClusterRole(ctx, - &rbacv1.ClusterRole{ - ObjectMeta: v1.ObjectMeta{ - Name: proposedName, - Labels: labels, - Annotations: annotations, - }, - Rules: createRules(nil), - }, - ) - if errors.Is(err, errors.AlreadyExists) { - suffixLength++ + _, err := k.updateClusterRole(ctx, &clusterRole) + if errors.Is(err, errors.NotFound) { continue + } else if err != nil { + return errors.Trace(err) } - if err == nil { - cleanups = append(cleanups, func() { _ = k.deleteClusterRole(ctx, result.GetName(), result.GetUID()) }) - } - return result, cleanups, nil } -} -// ensureDisambiguatedClusterRoleBinding ensures a cluster role binding with a -// disambiguated name exists. -// cleanups contain funcs than can be run to delete any new resources on error. -func (k *kubernetesClient) ensureDisambiguatedClusterRoleBinding( - ctx context.Context, saName, baseName, roleName string, labels labels.Set, annotations map[string]string, -) (_ *rbacv1.ClusterRoleBinding, cleanups []func(), _ error) { - listOps := v1.ListOptions{ - LabelSelector: modelLabelSelector(k.modelName).String(), - } - existing, err := k.client.RbacV1().ClusterRoleBindings().List(ctx, listOps) + roles, err := k.client.RbacV1().Roles(k.namespace).List(ctx, listOps) if err != nil { - return nil, cleanups, errors.Trace(err) - } - for _, clusterRoleBinding := range existing.Items { - if clusterRoleBinding.Annotations[modelIdKey] == k.modelUUID { - return &clusterRoleBinding, cleanups, nil - } + return errors.Trace(err) } - - suffixLength := model.DefaultSuffixDigits - var proposedName string - - for { - if proposedName, err = model.DisambiguateResourceNameWithSuffixLength( - k.modelUUID, baseName, maxResourceNameLength, suffixLength); err != nil { - return nil, cleanups, errors.Annotatef(err, "disambiguating cluster role name %q", baseName) - } - _, err = k.client.RbacV1().ClusterRoleBindings().Get(ctx, proposedName, v1.GetOptions{}) - if err == nil { - suffixLength++ + for _, role := range roles.Items { + var changed bool + role.Rules, changed = filterRemovedSecretsPolicyRules(role.Rules, removed) + if !changed { continue - } else if !k8serrors.IsNotFound(err) { - return nil, cleanups, errors.Annotatef(err, "getting existing cluster role binding %q", proposedName) } - result, err := k.createClusterRoleBinding(ctx, - &rbacv1.ClusterRoleBinding{ - ObjectMeta: v1.ObjectMeta{ - Name: proposedName, - Labels: labels, - Annotations: annotations, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: roleName, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: saName, - Namespace: k.namespace, - }, - }, - }, - ) - // someone might have created the cluster role binding - if errors.Is(err, errors.AlreadyExists) { - suffixLength++ + _, err := k.updateRole(ctx, &role) + if errors.Is(err, errors.NotFound) { continue + } else if err != nil { + return errors.Trace(err) } - if err == nil { - cleanups = append(cleanups, func() { _ = k.deleteClusterRoleBinding(ctx, result.GetName(), result.GetUID()) }) - } - return result, cleanups, errors.Trace(err) } + return nil } var errNoNamespace = errors.ConstError("no namespace") @@ -1273,6 +1277,9 @@ func (p k8sProvider) RefreshAuth(adminCfg *provider.ModelBackendConfig, validFor validForSeconds := int64(validFor.Truncate(time.Second).Seconds()) treq := &authenticationv1.TokenRequest{ + ObjectMeta: v1.ObjectMeta{ + Name: broker.serviceAccount, + }, Spec: authenticationv1.TokenRequestSpec{ ExpirationSeconds: &validForSeconds, },
secrets/provider/kubernetes/provider_test.go+469 −500 modified@@ -5,350 +5,151 @@ package kubernetes_test import ( "context" + "crypto/rand" "net" "os" + "strconv" "time" "github.com/juju/collections/set" - "github.com/juju/errors" "github.com/juju/names/v5" "github.com/juju/testing" jc "github.com/juju/testing/checkers" - "go.uber.org/mock/gomock" gc "gopkg.in/check.v1" authenticationv1 "k8s.io/api/authentication/v1" - core "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - kubernetes2 "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + k8s "k8s.io/client-go/kubernetes" + k8sfake "k8s.io/client-go/kubernetes/fake" + k8srest "k8s.io/client-go/rest" + k8stesting "k8s.io/client-go/testing" "github.com/juju/juju/core/secrets" - k8sconstants "github.com/juju/juju/internal/provider/kubernetes/constants" "github.com/juju/juju/secrets/provider" _ "github.com/juju/juju/secrets/provider/all" "github.com/juju/juju/secrets/provider/kubernetes" - "github.com/juju/juju/secrets/provider/kubernetes/mocks" coretesting "github.com/juju/juju/testing" ) type providerSuite struct { - testing.IsolationSuite - - k8sClient *mocks.MockInterface - mockDiscovery *mocks.MockDiscoveryInterface - mockSecrets *mocks.MockSecretInterface - mockRbacV1 *mocks.MockRbacV1Interface - mockNamespaces *mocks.MockNamespaceInterface - mockServiceAccounts *mocks.MockServiceAccountInterface - mockRoles *mocks.MockRoleInterface - mockClusterRoles *mocks.MockClusterRoleInterface - mockRoleBindings *mocks.MockRoleBindingInterface - mockClusterRoleBindings *mocks.MockClusterRoleBindingInterface + testing.CleanupSuite + + k8sClient *k8sfake.Clientset namespace string + tokens []string } var _ = gc.Suite(&providerSuite{}) func (s *providerSuite) SetUpTest(c *gc.C) { - s.namespace = "test" - s.PatchValue(&kubernetes.NewK8sClient, func(config *rest.Config) (kubernetes2.Interface, error) { + s.PatchValue(&kubernetes.NewK8sClient, func(config *k8srest.Config) (k8s.Interface, error) { return s.k8sClient, nil }) } -func (s *providerSuite) setupController(c *gc.C) *gomock.Controller { - ctrl := gomock.NewController(c) - - s.k8sClient = mocks.NewMockInterface(ctrl) - - s.mockDiscovery = mocks.NewMockDiscoveryInterface(ctrl) - s.k8sClient.EXPECT().Discovery().AnyTimes().Return(s.mockDiscovery) - - mockCoreV1 := mocks.NewMockCoreV1Interface(ctrl) - s.k8sClient.EXPECT().CoreV1().AnyTimes().Return(mockCoreV1) - s.mockNamespaces = mocks.NewMockNamespaceInterface(ctrl) - mockCoreV1.EXPECT().Namespaces().AnyTimes().Return(s.mockNamespaces) - - s.mockServiceAccounts = mocks.NewMockServiceAccountInterface(ctrl) - mockCoreV1.EXPECT().ServiceAccounts(s.namespace).AnyTimes().Return(s.mockServiceAccounts) - - s.mockSecrets = mocks.NewMockSecretInterface(ctrl) - mockCoreV1.EXPECT().Secrets(s.namespace).AnyTimes().Return(s.mockSecrets) - - s.mockRbacV1 = mocks.NewMockRbacV1Interface(ctrl) - s.k8sClient.EXPECT().RbacV1().AnyTimes().Return(s.mockRbacV1) - - s.mockRoles = mocks.NewMockRoleInterface(ctrl) - s.mockRbacV1.EXPECT().Roles(s.namespace).AnyTimes().Return(s.mockRoles) - s.mockClusterRoles = mocks.NewMockClusterRoleInterface(ctrl) - s.mockRbacV1.EXPECT().ClusterRoles().AnyTimes().Return(s.mockClusterRoles) - s.mockRoleBindings = mocks.NewMockRoleBindingInterface(ctrl) - s.mockRbacV1.EXPECT().RoleBindings(s.namespace).AnyTimes().Return(s.mockRoleBindings) - s.mockClusterRoleBindings = mocks.NewMockClusterRoleBindingInterface(ctrl) - s.mockRbacV1.EXPECT().ClusterRoleBindings().AnyTimes().Return(s.mockClusterRoleBindings) +func (s *providerSuite) setupK8s(c *gc.C) func() { + ctx := context.Background() + s.k8sClient = k8sfake.NewSimpleClientset() + if s.namespace == "" { + s.namespace = "test" + } + s.k8sClient.PrependReactor("create", "serviceaccounts", s.tokenReactor) + _, err := s.k8sClient.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.namespace, + }, + }, metav1.CreateOptions{}) + c.Assert(err, jc.ErrorIsNil) + return func() { + s.k8sClient = nil + s.namespace = "" + s.tokens = nil + } +} - return ctrl +// tokenReactor creates service account tokens to test against +func (s *providerSuite) tokenReactor( + action k8stesting.Action, +) (handled bool, ret k8sruntime.Object, err error) { + if action.GetSubresource() != "token" { + return + } + createAction, ok := action.(k8stesting.CreateActionImpl) + if !ok { + return + } + if createAction.Object == nil { + return + } + req, ok := createAction.Object.(*authenticationv1.TokenRequest) + if !ok { + return + } + _, err = s.k8sClient.Tracker().Get( + createAction.Resource, createAction.Namespace, createAction.Name) + if err != nil { + return false, nil, err + } + res := *req + res.Status.Token = rand.Text() + s.tokens = append(s.tokens, res.Status.Token) + return true, &res, nil } func (s *providerSuite) backendConfig() provider.BackendConfig { return provider.BackendConfig{ BackendType: kubernetes.BackendType, - Config: map[string]interface{}{ + Config: map[string]any{ "ca-certs": []string{"cert-data"}, "endpoint": "http://nowhere", "namespace": s.namespace, }, } } -func (s *providerSuite) k8sNotFoundError() *k8serrors.StatusError { - return k8serrors.NewNotFound(schema.GroupResource{}, "test") -} - -func (s *providerSuite) expectEnsureSecretAccessToken(consumer, appNameLabel string, owned, read []string) { - objMeta := v1.ObjectMeta{ - Name: consumer, - Labels: map[string]string{ - "app.kubernetes.io/managed-by": "juju", - "app.kubernetes.io/name": appNameLabel, - "model.juju.is/name": "fred", - "secrets.juju.is/model-name": "fred", - "secrets.juju.is/model-id": coretesting.ModelTag.Id(), - }, - Annotations: map[string]string{ - "model.juju.is/id": coretesting.ModelTag.Id(), - "controller.juju.is/id": coretesting.ControllerTag.Id(), - }, - Namespace: s.namespace, - } - - sa := &core.ServiceAccount{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: objMeta, - AutomountServiceAccountToken: ptr(true), - } - role := &rbacv1.Role{ - ObjectMeta: objMeta, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"create", "patch"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"*"}, - Resources: []string{"namespaces"}, - ResourceNames: []string{s.namespace}, - }, - }, - } - for _, rName := range owned { - role.Rules = append(role.Rules, rbacv1.PolicyRule{ - APIGroups: []string{rbacv1.APIGroupAll}, - Resources: []string{"secrets"}, - Verbs: []string{rbacv1.VerbAll}, - ResourceNames: []string{rName}, - }) - } - for _, rName := range read { - role.Rules = append(role.Rules, rbacv1.PolicyRule{ - APIGroups: []string{rbacv1.APIGroupAll}, - Resources: []string{"secrets"}, - Verbs: []string{"get"}, - ResourceNames: []string{rName}, - }) - } - - roleBinding := &rbacv1.RoleBinding{ - ObjectMeta: objMeta, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: role.Name, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: sa.Name, - Namespace: sa.Namespace, - }, - }, - } - - treq := &authenticationv1.TokenRequest{ - Spec: authenticationv1.TokenRequestSpec{ - ExpirationSeconds: ptr(int64(600)), - }, - } - - gomock.InOrder( - s.mockServiceAccounts.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: "model.juju.is/name=fred", - }).Return(&core.ServiceAccountList{}, nil), - s.mockServiceAccounts.EXPECT().Get(gomock.Any(), consumer, v1.GetOptions{}). - Return(nil, s.k8sNotFoundError()), - s.mockServiceAccounts.EXPECT().Create(gomock.Any(), sa, v1.CreateOptions{FieldManager: "juju"}).Return(sa, nil), - s.mockRoles.EXPECT().Create(gomock.Any(), role, v1.CreateOptions{FieldManager: "juju"}).Return(role, nil), - s.mockRoleBindings.EXPECT().Create(gomock.Any(), roleBinding, v1.CreateOptions{FieldManager: "juju"}).Return(roleBinding, nil), - s.mockRoleBindings.EXPECT().Get(gomock.Any(), consumer, v1.GetOptions{}).Return(roleBinding, nil), - s.mockServiceAccounts.EXPECT().CreateToken(gomock.Any(), consumer, treq, v1.CreateOptions{FieldManager: "juju"}). - Return(&authenticationv1.TokenRequest{ - Status: authenticationv1.TokenRequestStatus{Token: "token"}, - }, nil).AnyTimes(), - ) +func (s *providerSuite) checkEnsureSecretAccessToken(c *gc.C, consumer, appNameLabel string, owned, read []string) { + ctx := context.Background() + roles, err := s.k8sClient.RbacV1().Roles(s.namespace).List( + ctx, metav1.ListOptions{}) + c.Assert(err, jc.ErrorIsNil) + c.Check(roles.Items, gc.HasLen, 0) + roleBindings, err := s.k8sClient.RbacV1().RoleBindings(s.namespace).List( + ctx, metav1.ListOptions{}) + c.Assert(err, jc.ErrorIsNil) + c.Check(roleBindings.Items, gc.HasLen, 0) } func (s *providerSuite) expectEnsureControllerModelSecretAccessToken(unit string, owned, read []string, roleAlreadyExists bool) { - objMeta := v1.ObjectMeta{ - Name: unit + "-06f00d", - Labels: map[string]string{ - "app.kubernetes.io/managed-by": "juju", - "app.kubernetes.io/name": "gitlab", - "model.juju.is/name": "controller", - "secrets.juju.is/model-name": "controller", - "secrets.juju.is/model-id": coretesting.ModelTag.Id(), - }, - Annotations: map[string]string{ - "model.juju.is/id": coretesting.ModelTag.Id(), - "controller.juju.is/id": coretesting.ControllerTag.Id(), - }, - Namespace: s.namespace, - } - automountServiceAccountToken := true - sa := &core.ServiceAccount{ - ObjectMeta: objMeta, - AutomountServiceAccountToken: &automountServiceAccountToken, - } - name := "juju-secrets-" + unit + "-06f00d" - objMeta.Name = name - objMeta.Namespace = "" - clusterRole := &rbacv1.ClusterRole{ - ObjectMeta: objMeta, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"create", "patch"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"*"}, - Resources: []string{"namespaces"}, - }, - }, - } - for _, rName := range owned { - clusterRole.Rules = append(clusterRole.Rules, rbacv1.PolicyRule{ - APIGroups: []string{rbacv1.APIGroupAll}, - Resources: []string{"secrets"}, - Verbs: []string{rbacv1.VerbAll}, - ResourceNames: []string{rName}, - }) - } - for _, rName := range read { - clusterRole.Rules = append(clusterRole.Rules, rbacv1.PolicyRule{ - APIGroups: []string{rbacv1.APIGroupAll}, - Resources: []string{"secrets"}, - Verbs: []string{"get"}, - ResourceNames: []string{rName}, - }) - } - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: objMeta, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: clusterRole.Name, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: sa.Name, - Namespace: sa.Namespace, - }, - }, - } - expiresInSeconds := int64(60 * 10) - treq := &authenticationv1.TokenRequest{ - Spec: authenticationv1.TokenRequestSpec{ - ExpirationSeconds: &expiresInSeconds, - }, - } - - args := []any{ - s.mockNamespaces.EXPECT().Get(gomock.Any(), s.namespace, v1.GetOptions{}).Return(&core.Namespace{ - ObjectMeta: v1.ObjectMeta{Name: s.namespace}, - }, nil), - s.mockServiceAccounts.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: "model.juju.is/name=controller", - }).Return(&core.ServiceAccountList{}, nil), - s.mockServiceAccounts.EXPECT().Get(gomock.Any(), sa.Name, v1.GetOptions{}). - Return(nil, s.k8sNotFoundError()), - s.mockServiceAccounts.EXPECT().Create(gomock.Any(), sa, v1.CreateOptions{FieldManager: "juju"}). - Return(sa, nil), - } - if roleAlreadyExists { - args = append(args, - s.mockClusterRoles.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: "model.juju.is/name=controller", - }).Return(&rbacv1.ClusterRoleList{Items: []rbacv1.ClusterRole{*clusterRole}}, nil), - s.mockClusterRoles.EXPECT().Patch(gomock.Any(), clusterRole.Name, types.StrategicMergePatchType, - gomock.Any(), v1.PatchOptions{FieldManager: "juju"}).Return(clusterRole, nil).AnyTimes(), - ) - } else { - args = append(args, - s.mockClusterRoles.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: "model.juju.is/name=controller", - }).Return(&rbacv1.ClusterRoleList{}, nil), - s.mockClusterRoles.EXPECT().Get(gomock.Any(), name, v1.GetOptions{}).Return(nil, s.k8sNotFoundError()), - s.mockClusterRoles.EXPECT().Create(gomock.Any(), clusterRole, v1.CreateOptions{FieldManager: "juju"}).Return(clusterRole, nil), - ) - } - args = append(args, - s.mockClusterRoleBindings.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: "model.juju.is/name=controller", - }).Return(&rbacv1.ClusterRoleBindingList{}, nil), - s.mockClusterRoleBindings.EXPECT().Get(gomock.Any(), name, v1.GetOptions{}).Return(nil, s.k8sNotFoundError()), - s.mockClusterRoleBindings.EXPECT().Create(gomock.Any(), clusterRoleBinding, v1.CreateOptions{FieldManager: "juju"}).Return(clusterRoleBinding, nil), - s.mockClusterRoleBindings.EXPECT().Get(gomock.Any(), name, v1.GetOptions{}).Return(clusterRoleBinding, nil), - s.mockServiceAccounts.EXPECT().CreateToken(gomock.Any(), sa.Name, treq, v1.CreateOptions{FieldManager: "juju"}).Return( - &authenticationv1.TokenRequest{Status: authenticationv1.TokenRequestStatus{Token: "token"}}, nil, - ), - ) - gomock.InOrder(args...) } func (s *providerSuite) assertRestrictedConfigWithTag(c *gc.C, tag names.Tag, isControllerCloud, sameController bool) { - ctrl := s.setupController(c) - defer ctrl.Finish() + defer s.setupK8s(c)() + ctx := context.Background() appNameLabel := "gitlab" - consumer := tag.String() + "-06f00d" + consumer := tag.String() if tag.Kind() == names.ModelTagKind { - consumer = "model-fred-06f00d" + consumer = coretesting.ModelTag.String() appNameLabel = coretesting.ModelTag.Id() } - s.expectEnsureSecretAccessToken(consumer, appNameLabel, []string{"owned-rev-1"}, []string{"read-rev-1", "read-rev-2"}) + ownedURI := secrets.NewURI() + readURI := secrets.NewURI() - s.PatchValue(&kubernetes.InClusterConfig, func() (*rest.Config, error) { + s.PatchValue(&kubernetes.InClusterConfig, func() (*k8srest.Config, error) { host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") if len(host) == 0 || len(port) == 0 { - return nil, rest.ErrNotInCluster + return nil, k8srest.ErrNotInCluster } - tlsClientConfig := rest.TLSClientConfig{ + tlsClientConfig := k8srest.TLSClientConfig{ CAFile: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", } - return &rest.Config{ + return &k8srest.Config{ Host: "https://" + net.JoinHostPort(host, port), TLSClientConfig: tlsClientConfig, BearerToken: "token", @@ -370,25 +171,110 @@ func (s *providerSuite) assertRestrictedConfigWithTag(c *gc.C, tag names.Tag, is ModelName: "fred", BackendConfig: cfg, } - - backendCfg, err := p.RestrictedConfig(adminCfg, sameController, false, tag, - provider.SecretRevisions{"owned-a": set.NewStrings("owned-rev-1")}, - provider.SecretRevisions{"read-b": set.NewStrings("read-rev-1", "read-rev-2")}, + issuedTokenUUID := "some-uuid" + backendCfg, err := p.RestrictedConfig( + adminCfg, sameController, false, + issuedTokenUUID, tag, + []string{ownedURI.ID}, + provider.SecretRevisions{ownedURI.ID: set.NewStrings(ownedURI.Name(1))}, + provider.SecretRevisions{readURI.ID: set.NewStrings(readURI.Name(1), readURI.Name(2))}, ) c.Assert(err, jc.ErrorIsNil) + c.Assert(s.tokens, gc.HasLen, 1) expected := &provider.BackendConfig{ BackendType: kubernetes.BackendType, - Config: map[string]interface{}{ + Config: map[string]any{ "ca-certs": []string{"cert-data"}, "endpoint": "http://nowhere", "namespace": s.namespace, - "token": "token", + "token": s.tokens[0], }, } if isControllerCloud && sameController { expected.Config["endpoint"] = "https://8.6.8.6:8888" } c.Assert(backendCfg, jc.DeepEquals, expected) + + roles, err := s.k8sClient.RbacV1().Roles(s.namespace).List( + ctx, metav1.ListOptions{}) + c.Assert(err, jc.ErrorIsNil) + + mc := jc.NewMultiChecker() + mc.AddExpr(`_[_].ObjectMeta.Annotations["secrets.juju.is/expire-at"]`, jc.Satisfies, func(s string) bool { + i, err := strconv.Atoi(s) + if !c.Check(err, jc.ErrorIsNil) { + return false + } + return i > int(time.Now().Unix()) + }) + c.Check(roles.Items, mc, []rbacv1.Role{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "juju-secret-consumer-" + issuedTokenUUID, + Namespace: s.namespace, + Labels: map[string]string{ + "app.kubernetes.io/managed-by": "juju", + "app.kubernetes.io/name": appNameLabel, + "model.juju.is/name": "fred", + "secrets.juju.is/consumer": consumer, + "secrets.juju.is/model-id": coretesting.ModelTag.Id(), + "secrets.juju.is/model-name": "fred", + }, + Annotations: map[string]string{ + "controller.juju.is/id": coretesting.ControllerTag.Id(), + "model.juju.is/id": coretesting.ModelTag.Id(), + "secrets.juju.is/expire-at": "", + }, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"get", "list"}, + APIGroups: []string{"*"}, + Resources: []string{"namespaces"}, + ResourceNames: []string{"test"}, + }, { + Verbs: []string{"get", "patch", "update", "replace", "delete"}, + APIGroups: []string{"*"}, + Resources: []string{"secrets"}, + ResourceNames: []string{ownedURI.Name(1), ownedURI.Name(2)}, + }, { + Verbs: []string{"get"}, + APIGroups: []string{"*"}, + Resources: []string{"secrets"}, + ResourceNames: []string{readURI.Name(1), readURI.Name(2)}, + }}, + }}) + + roleBindings, err := s.k8sClient.RbacV1().RoleBindings(s.namespace).List( + ctx, metav1.ListOptions{}) + c.Assert(err, jc.ErrorIsNil) + c.Check(roleBindings.Items, mc, []rbacv1.RoleBinding{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "juju-secret-consumer-" + issuedTokenUUID, + Namespace: s.namespace, + Labels: map[string]string{ + "app.kubernetes.io/managed-by": "juju", + "app.kubernetes.io/name": appNameLabel, + "model.juju.is/name": "fred", + "secrets.juju.is/consumer": consumer, + "secrets.juju.is/model-id": coretesting.ModelTag.Id(), + "secrets.juju.is/model-name": "fred", + }, + Annotations: map[string]string{ + "controller.juju.is/id": coretesting.ControllerTag.Id(), + "model.juju.is/id": coretesting.ModelTag.Id(), + "secrets.juju.is/expire-at": "", + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Name: "juju-secret-consumer-" + issuedTokenUUID, + Kind: "Role", + }, + Subjects: []rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: "juju-secret-consumer-" + issuedTokenUUID, + Namespace: s.namespace, + }}, + }}) } func (s *providerSuite) TestRestrictedConfigWithUnitTag(c *gc.C) { @@ -407,58 +293,8 @@ func (s *providerSuite) TestRestrictedConfigWithTagWithControllerCloudDifferentC s.assertRestrictedConfigWithTag(c, names.NewUnitTag("gitlab/0"), true, false) } -func ptr[T any](v T) *T { - return &v -} - func (s *providerSuite) TestCleanupModel(c *gc.C) { - ctrl := s.setupController(c) - defer ctrl.Finish() - - selector := "model.juju.is/name=fred" - s.mockServiceAccounts.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: selector, - }).Return(&core.ServiceAccountList{}, nil) - s.mockRoles.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: selector, - }).Return(&rbacv1.RoleList{}, nil) - s.mockRoleBindings.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: selector, - }).Return(&rbacv1.RoleBindingList{}, nil) - s.mockClusterRoles.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: selector, - }).Return(&rbacv1.ClusterRoleList{Items: []rbacv1.ClusterRole{{ - ObjectMeta: v1.ObjectMeta{Name: "juju-secrets-role", Annotations: map[string]string{ - "model.juju.is/id": coretesting.ModelTag.Id(), - }}, - }, { - ObjectMeta: v1.ObjectMeta{Name: "other-role"}, - }}}, nil) - s.mockClusterRoles.EXPECT().Delete(gomock.Any(), "juju-secrets-role", v1.DeleteOptions{ - PropagationPolicy: k8sconstants.DefaultPropagationPolicy(), - }) - s.mockClusterRoleBindings.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: selector, - }).Return(&rbacv1.ClusterRoleBindingList{Items: []rbacv1.ClusterRoleBinding{{ - ObjectMeta: v1.ObjectMeta{Name: "juju-secrets-rolebinding", Annotations: map[string]string{ - "model.juju.is/id": coretesting.ModelTag.Id(), - }}, - }, { - ObjectMeta: v1.ObjectMeta{Name: "other-rolebinding"}, - }}}, nil) - s.mockClusterRoleBindings.EXPECT().Delete(gomock.Any(), "juju-secrets-rolebinding", v1.DeleteOptions{ - PropagationPolicy: k8sconstants.DefaultPropagationPolicy(), - }) - s.mockSecrets.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: selector, - }).Return(&core.SecretList{Items: []core.Secret{{ - ObjectMeta: v1.ObjectMeta{Name: "some-secret", Annotations: map[string]string{ - "model.juju.is/id": coretesting.ModelTag.Id(), - }}, - }}}, nil) - s.mockSecrets.EXPECT().Delete(gomock.Any(), "some-secret", v1.DeleteOptions{ - PropagationPolicy: k8sconstants.DefaultPropagationPolicy(), - }) + defer s.setupK8s(c)() p, err := provider.Provider(kubernetes.BackendType) c.Assert(err, jc.ErrorIsNil) @@ -474,12 +310,10 @@ func (s *providerSuite) TestCleanupModel(c *gc.C) { } func (s *providerSuite) TestCleanupSecrets(c *gc.C) { - ctrl := s.setupController(c) - defer ctrl.Finish() + defer s.setupK8s(c)() tag := names.NewUnitTag("gitlab/0") consumer := tag.String() + "-06f00d" - s.expectEnsureSecretAccessToken(consumer, "gitlab", nil, nil) p, err := provider.Provider(kubernetes.BackendType) c.Assert(err, jc.ErrorIsNil) @@ -492,34 +326,223 @@ func (s *providerSuite) TestCleanupSecrets(c *gc.C) { err = p.CleanupSecrets(adminCfg, tag, provider.SecretRevisions{"removed": set.NewStrings("rev-1", "rev-2")}) c.Assert(err, jc.ErrorIsNil) + + s.checkEnsureSecretAccessToken(c, consumer, "gitlab", nil, nil) +} + +func (s *providerSuite) TestCleanupSecretsOnlyUpdatesAffectedRoles(c *gc.C) { + defer s.setupK8s(c)() + ctx := context.Background() + + matchingLabels := map[string]string{ + "app.kubernetes.io/managed-by": "juju", + "model.juju.is/name": "fred", + "secrets.juju.is/model-name": "fred", + "secrets.juju.is/model-id": coretesting.ModelTag.Id(), + } + + // Create a role that references revisions to be removed (and one to keep). + _, err := s.k8sClient.RbacV1().Roles(s.namespace).Create(ctx, &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "affected-role", + Namespace: s.namespace, + Labels: matchingLabels, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"get"}, + APIGroups: []string{"*"}, + Resources: []string{"secrets"}, + ResourceNames: []string{"rev-1", "rev-2", "rev-keep"}, + }}, + }, metav1.CreateOptions{}) + c.Assert(err, jc.ErrorIsNil) + + // Create a role that does NOT reference any revisions to be removed. + _, err = s.k8sClient.RbacV1().Roles(s.namespace).Create(ctx, &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unaffected-role", + Namespace: s.namespace, + Labels: matchingLabels, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"get"}, + APIGroups: []string{"*"}, + Resources: []string{"secrets"}, + ResourceNames: []string{"rev-3", "rev-4"}, + }}, + }, metav1.CreateOptions{}) + c.Assert(err, jc.ErrorIsNil) + + // Clear recorded actions from setup, this is required to make sure that no + // call was made to patch the unaffected roles. + s.k8sClient.ClearActions() + + tag := names.NewUnitTag("gitlab/0") + p, err := provider.Provider(kubernetes.BackendType) + c.Assert(err, jc.ErrorIsNil) + adminCfg := &provider.ModelBackendConfig{ + ControllerUUID: coretesting.ControllerTag.Id(), + ModelUUID: coretesting.ModelTag.Id(), + ModelName: "fred", + BackendConfig: s.backendConfig(), + } + + err = p.CleanupSecrets(adminCfg, tag, provider.SecretRevisions{ + "secret-1": set.NewStrings("rev-1", "rev-2"), + }) + c.Assert(err, jc.ErrorIsNil) + + // Check that only one role had a call to patch it. + for _, action := range s.k8sClient.Actions() { + if !action.Matches("patch", "roles") { + continue + } + patched := action.(k8stesting.PatchAction) + c.Check(patched.GetName(), gc.Equals, "affected-role") + } + + // Check that the role now has the right resource names. + res, err := s.k8sClient.RbacV1().Roles(s.namespace).Get( + ctx, "affected-role", metav1.GetOptions{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res.Rules, gc.HasLen, 1) + c.Check(res.Rules[0].ResourceNames, jc.DeepEquals, []string{"rev-keep"}) + + // Verify unaffected role is unchanged. + unaffectedRole, err := s.k8sClient.RbacV1().Roles(s.namespace).Get( + ctx, "unaffected-role", metav1.GetOptions{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(unaffectedRole.Rules, gc.HasLen, 1) + c.Check(unaffectedRole.Rules[0].ResourceNames, jc.DeepEquals, []string{ + "rev-3", "rev-4", + }) +} + +func (s *providerSuite) TestCleanupSecretsOnlyUpdatesAffectedClusterRoles(c *gc.C) { + defer s.setupK8s(c)() + ctx := context.Background() + + matchingLabels := map[string]string{ + "app.kubernetes.io/managed-by": "juju", + "model.juju.is/name": "fred", + "secrets.juju.is/model-name": "fred", + "secrets.juju.is/model-id": coretesting.ModelTag.Id(), + } + + // Create a cluster role that references the revisions to be removed. + _, err := s.k8sClient.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "affected-cluster-role", + Labels: matchingLabels, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"get"}, + APIGroups: []string{"*"}, + Resources: []string{"secrets"}, + ResourceNames: []string{"rev-1", "rev-2", "rev-keep"}, + }}, + }, metav1.CreateOptions{}) + c.Assert(err, jc.ErrorIsNil) + + // Create a cluster role that does NOT reference any revisions to be removed. + _, err = s.k8sClient.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unaffected-cluster-role", + Labels: matchingLabels, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"get"}, + APIGroups: []string{"*"}, + Resources: []string{"secrets"}, + ResourceNames: []string{"rev-3", "rev-4"}, + }}, + }, metav1.CreateOptions{}) + c.Assert(err, jc.ErrorIsNil) + + // Clear recorded actions from setup, this is required to make sure that no + // call was made to patch the unaffected roles. + s.k8sClient.ClearActions() + + tag := names.NewUnitTag("gitlab/0") + p, err := provider.Provider(kubernetes.BackendType) + c.Assert(err, jc.ErrorIsNil) + adminCfg := &provider.ModelBackendConfig{ + ControllerUUID: coretesting.ControllerTag.Id(), + ModelUUID: coretesting.ModelTag.Id(), + ModelName: "fred", + BackendConfig: s.backendConfig(), + } + + err = p.CleanupSecrets(adminCfg, tag, provider.SecretRevisions{ + "secret-1": set.NewStrings("rev-1", "rev-2"), + }) + c.Assert(err, jc.ErrorIsNil) + + // Check that only one role had a call to patch it. + for _, action := range s.k8sClient.Actions() { + if !action.Matches("patch", "clusterroles") { + continue + } + patched := action.(k8stesting.PatchAction) + c.Check(patched.GetName(), gc.Equals, "affected-cluster-role") + } + + // Check that the role now has the right resource names. + res, err := s.k8sClient.RbacV1().ClusterRoles().Get( + ctx, "affected-cluster-role", metav1.GetOptions{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res.Rules, gc.HasLen, 1) + c.Check(res.Rules[0].ResourceNames, jc.DeepEquals, []string{"rev-keep"}) + + // Check the other role is unchanged. + other, err := s.k8sClient.RbacV1().ClusterRoles().Get( + ctx, "unaffected-cluster-role", metav1.GetOptions{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(other.Rules, gc.HasLen, 1) + c.Check(other.Rules[0].ResourceNames, jc.DeepEquals, []string{ + "rev-3", + "rev-4", + }) } func (s *providerSuite) TestNewBackend(c *gc.C) { - ctrl := s.setupController(c) - defer ctrl.Finish() + defer s.setupK8s(c)() - s.mockDiscovery.EXPECT().ServerVersion().Return(nil, errors.New("boom")) + cfg := provider.BackendConfig{ + BackendType: kubernetes.BackendType, + Config: map[string]any{ + "ca-certs": []string{"cert-data"}, + "endpoint": "http://nowhere", + "namespace": "missing-namespace", + }, + } p, err := provider.Provider(kubernetes.BackendType) c.Assert(err, jc.ErrorIsNil) b, err := p.NewBackend(&provider.ModelBackendConfig{ ControllerUUID: coretesting.ControllerTag.Id(), ModelUUID: coretesting.ModelTag.Id(), ModelName: "fred", - BackendConfig: s.backendConfig(), + BackendConfig: cfg, }) c.Assert(err, jc.ErrorIsNil) err = b.Ping() - c.Assert(err, gc.ErrorMatches, "backend not reachable: boom") + c.Assert(err, gc.ErrorMatches, + `backend not reachable: checking secrets namespace: `+ + `namespaces "missing-namespace" not found`, + ) } func (s *providerSuite) TestEnsureSecretAccessTokenControllerModelCreate(c *gc.C) { s.namespace = "juju-secrets" - ctrl := s.setupController(c) - defer ctrl.Finish() + defer s.setupK8s(c)() + + ownedURI := secrets.NewURI() + readURI := secrets.NewURI() s.expectEnsureControllerModelSecretAccessToken( - "unit-gitlab-0", []string{"owned-rev-1"}, []string{"read-rev-1", "read-rev-2"}, false) + "unit-gitlab-0", []string{ownedURI.Name(1)}, + []string{readURI.Name(1), readURI.Name(2)}, false) p, err := provider.Provider(kubernetes.BackendType) c.Assert(err, jc.ErrorIsNil) @@ -531,104 +554,35 @@ func (s *providerSuite) TestEnsureSecretAccessTokenControllerModelCreate(c *gc.C } tag := names.NewUnitTag("gitlab/0") - backendCfg, err := p.RestrictedConfig(adminCfg, false, false, tag, - provider.SecretRevisions{"owned-a": set.NewStrings("owned-rev-1")}, - provider.SecretRevisions{"read-b": set.NewStrings("read-rev-1", "read-rev-2")}, + issuedTokenUUID := "some-uuid" + backendCfg, err := p.RestrictedConfig( + adminCfg, false, false, + issuedTokenUUID, tag, + []string{ownedURI.ID}, + provider.SecretRevisions{ownedURI.ID: set.NewStrings(ownedURI.Name(1))}, + provider.SecretRevisions{readURI.ID: set.NewStrings(readURI.Name(1), readURI.Name(2))}, ) c.Assert(err, jc.ErrorIsNil) + c.Assert(s.tokens, gc.HasLen, 1) expected := &provider.BackendConfig{ BackendType: kubernetes.BackendType, - Config: map[string]interface{}{ + Config: map[string]any{ "ca-certs": []string{"cert-data"}, "endpoint": "http://nowhere", "namespace": s.namespace, - "token": "token", + "token": s.tokens[0], }, } c.Assert(backendCfg, jc.DeepEquals, expected) c.Assert(err, jc.ErrorIsNil) } func (s *providerSuite) TestEnsureSecretAccessTokenUpdate(c *gc.C) { - ctrl := s.setupController(c) - defer ctrl.Finish() + defer s.setupK8s(c)() tag := names.NewUnitTag("gitlab/0") - name := tag.String() + "-06f00d" - objMeta := v1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - "app.kubernetes.io/managed-by": "juju", - "app.kubernetes.io/name": "gitlab", - "model.juju.is/name": "fred", - "secrets.juju.is/model-name": "fred", - "secrets.juju.is/model-id": coretesting.ModelTag.Id(), - }, - Annotations: map[string]string{ - "model.juju.is/id": coretesting.ModelTag.Id(), - "controller.juju.is/id": coretesting.ControllerTag.Id(), - }, - Namespace: s.namespace, - } - automountServiceAccountToken := true - sa := &core.ServiceAccount{ - ObjectMeta: objMeta, - AutomountServiceAccountToken: &automountServiceAccountToken, - } - role := &rbacv1.Role{ - ObjectMeta: objMeta, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"create", "patch"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"*"}, - Resources: []string{"namespaces"}, - ResourceNames: []string{"test"}, - }, - }, - } - roleBinding := &rbacv1.RoleBinding{ - ObjectMeta: objMeta, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: role.Name, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: sa.Name, - Namespace: sa.Namespace, - }, - }, - } - expiresInSeconds := int64(60 * 10) - treq := &authenticationv1.TokenRequest{ - Spec: authenticationv1.TokenRequestSpec{ - ExpirationSeconds: &expiresInSeconds, - }, - } - gomock.InOrder( - s.mockServiceAccounts.EXPECT().List(gomock.Any(), v1.ListOptions{ - LabelSelector: "model.juju.is/name=fred", - }).Return(&core.ServiceAccountList{}, nil), - s.mockServiceAccounts.EXPECT().Get(gomock.Any(), name, v1.GetOptions{}). - Return(nil, s.k8sNotFoundError()), - s.mockServiceAccounts.EXPECT().Create(gomock.Any(), sa, v1.CreateOptions{FieldManager: "juju"}). - Return(sa, nil), - s.mockRoles.EXPECT().Create(gomock.Any(), gomock.Any(), v1.CreateOptions{FieldManager: "juju"}).Return(nil, errors.AlreadyExists), - s.mockRoles.EXPECT().Get(gomock.Any(), name, v1.GetOptions{}).Return(role, nil), - s.mockRoles.EXPECT().Patch(gomock.Any(), role.Name, types.StrategicMergePatchType, gomock.Any(), v1.PatchOptions{FieldManager: "juju"}).Return(role, nil), - s.mockRoleBindings.EXPECT().Create(gomock.Any(), roleBinding, v1.CreateOptions{FieldManager: "juju"}).Return(roleBinding, nil), - s.mockRoleBindings.EXPECT().Get(gomock.Any(), roleBinding.Name, v1.GetOptions{}).Return(roleBinding, nil), - s.mockServiceAccounts.EXPECT().CreateToken(gomock.Any(), name, treq, v1.CreateOptions{FieldManager: "juju"}).Return( - &authenticationv1.TokenRequest{Status: authenticationv1.TokenRequestStatus{Token: "token"}}, nil, - ), - ) + ownedURI := secrets.NewURI() + readURI := secrets.NewURI() p, err := provider.Provider(kubernetes.BackendType) c.Assert(err, jc.ErrorIsNil) @@ -639,30 +593,39 @@ func (s *providerSuite) TestEnsureSecretAccessTokenUpdate(c *gc.C) { BackendConfig: s.backendConfig(), } - backendCfg, err := p.RestrictedConfig(adminCfg, false, false, tag, - provider.SecretRevisions{"owned-a": set.NewStrings("owned-rev-1")}, - provider.SecretRevisions{"read-b": set.NewStrings("read-rev-1", "read-rev-2")}, + issuedTokenUUID := "some-uuid" + backendCfg, err := p.RestrictedConfig( + adminCfg, false, false, + issuedTokenUUID, tag, + []string{ownedURI.ID}, + provider.SecretRevisions{ownedURI.ID: set.NewStrings(ownedURI.Name(1))}, + provider.SecretRevisions{readURI.ID: set.NewStrings(readURI.Name(1), readURI.Name(2))}, ) c.Assert(err, jc.ErrorIsNil) + c.Assert(s.tokens, gc.HasLen, 1) expected := &provider.BackendConfig{ BackendType: kubernetes.BackendType, - Config: map[string]interface{}{ + Config: map[string]any{ "ca-certs": []string{"cert-data"}, "endpoint": "http://nowhere", "namespace": s.namespace, - "token": "token", + "token": s.tokens[0], }, } c.Assert(backendCfg, jc.DeepEquals, expected) c.Assert(err, jc.ErrorIsNil) } func (s *providerSuite) TestEnsureSecretAccessTokeControllerModelUpdate(c *gc.C) { - ctrl := s.setupController(c) - defer ctrl.Finish() + defer s.setupK8s(c)() + + ownedURI := secrets.NewURI() + readURI := secrets.NewURI() s.expectEnsureControllerModelSecretAccessToken( - "unit-gitlab-0", []string{"owned-rev-1"}, []string{"read-rev-1", "read-rev-2"}, true) + "unit-gitlab-0", []string{ownedURI.Name(1)}, + []string{readURI.Name(1), readURI.Name(2)}, true, + ) p, err := provider.Provider(kubernetes.BackendType) c.Assert(err, jc.ErrorIsNil) @@ -674,40 +637,44 @@ func (s *providerSuite) TestEnsureSecretAccessTokeControllerModelUpdate(c *gc.C) } tag := names.NewUnitTag("gitlab/0") - backendCfg, err := p.RestrictedConfig(adminCfg, false, false, tag, - provider.SecretRevisions{"owned-a": set.NewStrings("owned-rev-1")}, - provider.SecretRevisions{"read-b": set.NewStrings("read-rev-1", "read-rev-2")}, + issuedTokenUUID := "some-uuid" + backendCfg, err := p.RestrictedConfig( + adminCfg, false, false, + issuedTokenUUID, tag, + []string{ownedURI.ID}, + provider.SecretRevisions{ownedURI.ID: set.NewStrings(ownedURI.Name(1))}, + provider.SecretRevisions{readURI.ID: set.NewStrings(readURI.Name(1), readURI.Name(2))}, ) c.Assert(err, jc.ErrorIsNil) + c.Assert(s.tokens, gc.HasLen, 1) expected := &provider.BackendConfig{ BackendType: kubernetes.BackendType, - Config: map[string]interface{}{ + Config: map[string]any{ "ca-certs": []string{"cert-data"}, "endpoint": "http://nowhere", "namespace": s.namespace, - "token": "token", + "token": s.tokens[0], }, } c.Assert(backendCfg, jc.DeepEquals, expected) c.Assert(err, jc.ErrorIsNil) } func (s *providerSuite) TestGetContent(c *gc.C) { - ctrl := s.setupController(c) - defer ctrl.Finish() + defer s.setupK8s(c)() + ctx := context.Background() uri := secrets.NewURI() - secret := &core.Secret{ - ObjectMeta: v1.ObjectMeta{ - Name: uri.ID + "-1", - Namespace: s.namespace, + + _, err := s.k8sClient.CoreV1().Secrets(s.namespace).Create(ctx, &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: uri.Name(1), }, - Type: core.SecretTypeOpaque, Data: map[string][]byte{ "foo": []byte("bar"), }, - } - s.mockSecrets.EXPECT().Get(gomock.Any(), uri.ID+"-1", v1.GetOptions{}).Return(secret, nil) + }, metav1.CreateOptions{}) + c.Assert(err, jc.ErrorIsNil) p, err := provider.Provider(kubernetes.BackendType) c.Assert(err, jc.ErrorIsNil) @@ -719,36 +686,16 @@ func (s *providerSuite) TestGetContent(c *gc.C) { }) c.Assert(err, jc.ErrorIsNil) - content, err := b.GetContent(context.Background(), uri.ID+"-1") + content, err := b.GetContent(context.Background(), uri.Name(1)) c.Assert(err, jc.ErrorIsNil) c.Assert(content.EncodedValues(), jc.DeepEquals, map[string]string{"foo": "YmFy"}) } func (s *providerSuite) TestSaveContent(c *gc.C) { - ctrl := s.setupController(c) - defer ctrl.Finish() + ctx := context.Background() + defer s.setupK8s(c)() uri := secrets.NewURI() - secret := &core.Secret{ - ObjectMeta: v1.ObjectMeta{ - Name: uri.ID + "-1", - Labels: map[string]string{ - "app.kubernetes.io/managed-by": "juju", - "model.juju.is/name": "fred", - "secrets.juju.is/model-name": "fred", - "secrets.juju.is/model-id": coretesting.ModelTag.Id(), - }, - Namespace: s.namespace, - }, - Type: core.SecretTypeOpaque, - StringData: map[string]string{ - "foo": "bar", - }, - } - s.mockSecrets.EXPECT().Create(gomock.Any(), secret, v1.CreateOptions{FieldManager: "juju"}).Return(secret, nil) - s.mockSecrets.EXPECT().Patch( - gomock.Any(), uri.ID+"-1", types.StrategicMergePatchType, gomock.Any(), v1.PatchOptions{FieldManager: "juju"}). - Return(nil, s.k8sNotFoundError()) p, err := provider.Provider(kubernetes.BackendType) c.Assert(err, jc.ErrorIsNil) @@ -760,25 +707,43 @@ func (s *providerSuite) TestSaveContent(c *gc.C) { }) c.Assert(err, jc.ErrorIsNil) - name, err := b.SaveContent(context.Background(), uri, 1, secrets.NewSecretValue(map[string]string{"foo": "YmFy"})) + sv := secrets.NewSecretValue(map[string]string{"foo": "YmFy"}) + name, err := b.SaveContent(ctx, uri, 1, sv) c.Assert(err, jc.ErrorIsNil) - c.Assert(name, gc.Equals, uri.ID+"-1") + c.Assert(name, gc.Equals, uri.Name(1)) + + res, err := s.k8sClient.CoreV1().Secrets(s.namespace).List( + ctx, metav1.ListOptions{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res.Items, gc.HasLen, 1) + secret := res.Items[0] + c.Check(secret.Name, gc.Equals, uri.Name(1)) + c.Check(secret.Labels, gc.DeepEquals, map[string]string{ + "app.kubernetes.io/managed-by": "juju", + "secrets.juju.is/model-id": coretesting.ModelTag.Id(), + "model.juju.is/name": "fred", + "secrets.juju.is/model-name": "fred", + }) + c.Check(secret.StringData, gc.DeepEquals, map[string]string{ + "foo": "bar", + }) } func (s *providerSuite) TestDeleteContent(c *gc.C) { - ctrl := s.setupController(c) - defer ctrl.Finish() + ctx := context.Background() + defer s.setupK8s(c)() uri := secrets.NewURI() - secret := &core.Secret{ - ObjectMeta: v1.ObjectMeta{ - Name: uri.ID + "-1", - Namespace: s.namespace, + + _, err := s.k8sClient.CoreV1().Secrets(s.namespace).Create(ctx, + &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: uri.Name(1), + }, }, - } - s.mockSecrets.EXPECT().Get(gomock.Any(), uri.ID+"-1", v1.GetOptions{}).Return(secret, nil) - s.mockSecrets.EXPECT().Delete(gomock.Any(), uri.ID+"-1", v1.DeleteOptions{ - PropagationPolicy: k8sconstants.DefaultPropagationPolicy()}) + metav1.CreateOptions{}, + ) + c.Assert(err, jc.ErrorIsNil) p, err := provider.Provider(kubernetes.BackendType) c.Assert(err, jc.ErrorIsNil) @@ -790,23 +755,26 @@ func (s *providerSuite) TestDeleteContent(c *gc.C) { }) c.Assert(err, jc.ErrorIsNil) - err = b.DeleteContent(context.Background(), uri.ID+"-1") + err = b.DeleteContent(context.Background(), uri.Name(1)) + c.Assert(err, jc.ErrorIsNil) + + res, err := s.k8sClient.CoreV1().Secrets(s.namespace).List( + ctx, metav1.ListOptions{}) c.Assert(err, jc.ErrorIsNil) + c.Assert(res.Items, gc.HasLen, 0) } func (s *providerSuite) TestRefreshAuth(c *gc.C) { - ctrl := s.setupController(c) - defer ctrl.Finish() + defer s.setupK8s(c)() + ctx := context.Background() - treq := &authenticationv1.TokenRequest{ - Spec: authenticationv1.TokenRequestSpec{ - ExpirationSeconds: ptr(int64(3600)), - }, - } - s.mockServiceAccounts.EXPECT().CreateToken(gomock.Any(), "default", treq, v1.CreateOptions{FieldManager: "juju"}). - Return(&authenticationv1.TokenRequest{ - Status: authenticationv1.TokenRequestStatus{Token: "token2"}, - }, nil) + _, err := s.k8sClient.CoreV1().ServiceAccounts(s.namespace).Create(ctx, + &v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + }, metav1.CreateOptions{}) + c.Assert(err, jc.ErrorIsNil) p, err := provider.Provider(kubernetes.BackendType) c.Assert(err, jc.ErrorIsNil) @@ -824,5 +792,6 @@ func (s *providerSuite) TestRefreshAuth(c *gc.C) { BackendConfig: cfg, }, validFor) c.Assert(err, jc.ErrorIsNil) - c.Assert(newCfg.Config["token"], gc.Equals, "token2") + c.Assert(s.tokens, gc.HasLen, 1) + c.Assert(newCfg.Config["token"], gc.Equals, s.tokens[0]) }
secrets/provider/kubernetes/rbac_test.go+0 −215 modified@@ -11,9 +11,7 @@ import ( jc "github.com/juju/testing/checkers" "github.com/juju/utils/v3" gc "gopkg.in/check.v1" - core "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" @@ -52,56 +50,6 @@ func (s *rbacSuite) SetUpTest(c *gc.C) { s.k8sclient = s.getFakeClient(c) } -func (s *rbacSuite) TestEnsureRoleBinding(c *gc.C) { - ctx := context.Background() - rbName := "rb-name" - // Check that role binding does not exist initially. - _, err := s.k8sclient.client.RbacV1().RoleBindings(s.k8sclient.namespace).Get(ctx, rbName, v1.GetOptions{}) - c.Assert(k8serrors.IsNotFound(err), gc.Equals, true) - - // Ensure role binding should create the role binding. - rb, cleanupsForCreate, err := s.k8sclient.ensureRoleBinding(ctx, &rbacv1.RoleBinding{ - ObjectMeta: v1.ObjectMeta{ - Name: rbName, - Namespace: s.k8sclient.namespace, - Labels: map[string]string{"app.kubernetes.io/managed-by": "juju", "app.kubernetes.io/name": "app-name"}, - Annotations: map[string]string{ - "hello": "world", - "fred": "mary", - }, - }, - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cleanupsForCreate, gc.HasLen, 1) - c.Assert(rb.Name, gc.Equals, rbName) - - // Get the role binding to check it was created correctly. - res, err := s.k8sclient.client.RbacV1().RoleBindings(s.k8sclient.namespace).Get(ctx, rbName, v1.GetOptions{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(res.Name, gc.Equals, rbName) - c.Assert(res.Labels, gc.DeepEquals, map[string]string{"app.kubernetes.io/managed-by": "juju", "app.kubernetes.io/name": "app-name"}) - c.Assert(res.Annotations, gc.DeepEquals, map[string]string{"hello": "world", "fred": "mary"}) - - // Ensure role binding should get the current role binding with no cleanups if it already exists. - rb, cleanupsForUpdate, err := s.k8sclient.ensureRoleBinding(ctx, &rbacv1.RoleBinding{ - ObjectMeta: v1.ObjectMeta{ - Name: rbName, - }, - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(rb.Name, gc.Equals, rbName) - c.Assert(cleanupsForUpdate, gc.HasLen, 0) - c.Assert(rb.Labels, gc.DeepEquals, map[string]string{"app.kubernetes.io/managed-by": "juju", "app.kubernetes.io/name": "app-name"}) - c.Assert(rb.Annotations, gc.DeepEquals, map[string]string{"hello": "world", "fred": "mary"}) - - // Run cleanups and verify resources are removed. - for _, fn := range cleanupsForCreate { - fn() - } - _, err = s.k8sclient.client.RbacV1().RoleBindings(s.k8sclient.namespace).Get(ctx, rbName, v1.GetOptions{}) - c.Assert(k8serrors.IsNotFound(err), gc.Equals, true) -} - func (s *rbacSuite) TestUpdateClusterRole(c *gc.C) { ctx := context.Background() @@ -148,166 +96,3 @@ func (s *rbacSuite) TestUpdateClusterRole(c *gc.C) { c.Assert(cr.Name, gc.Equals, "cluster-role-name") c.Assert(cr.Rules, gc.DeepEquals, clusterRoleUpdate.Rules) } - -func (s *rbacSuite) makeSA(ns, name string, lbls, ann map[string]string) *core.ServiceAccount { - return &core.ServiceAccount{ - ObjectMeta: v1.ObjectMeta{ - Name: name, - Namespace: ns, - Labels: lbls, - Annotations: ann, - }, - } -} - -func (s *rbacSuite) TestEnsureBinding_CreateRoleAndCreateBinding(c *gc.C) { - ctx := context.Background() - ns := s.k8sclient.namespace - sa := s.makeSA(ns, "sa", - map[string]string{"app.kubernetes.io/managed-by": "juju"}, - map[string]string{"foo": "bar"}, - ) - - owned := []string{"sec-owned-a"} - read := []string{"sec-read-b"} - removed := []string{"sec-removed-c"} - - cleanups, err := s.k8sclient.ensureBindingForSecretAccessToken(ctx, sa, owned, read, removed) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cleanups, gc.HasLen, 2) - - // Check if role was created with correct metadata and rules. - role, err := s.k8sclient.client.RbacV1().Roles(ns).Get(ctx, sa.Name, v1.GetOptions{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(role.Labels, gc.DeepEquals, sa.Labels) - c.Assert(role.Annotations, gc.DeepEquals, sa.Annotations) - c.Assert(role.Rules, gc.DeepEquals, rulesForSecretAccess(ns, false, nil, owned, read, removed)) - - // Check if rolebinding was created. - rb, err := s.k8sclient.client.RbacV1().RoleBindings(ns).Get(ctx, sa.Name, v1.GetOptions{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(rb.RoleRef.Kind, gc.Equals, "Role") - c.Assert(rb.RoleRef.Name, gc.Equals, sa.Name) - c.Assert(rb.Subjects, gc.HasLen, 1) - c.Assert(rb.Subjects[0].Kind, gc.Equals, "ServiceAccount") - c.Assert(rb.Subjects[0].Name, gc.Equals, sa.Name) - c.Assert(rb.Subjects[0].Namespace, gc.Equals, ns) - - // Run cleanups and verify resources are removed correctly. - for _, fn := range cleanups { - fn() - } - _, err = s.k8sclient.client.RbacV1().Roles(ns).Get(ctx, sa.Name, v1.GetOptions{}) - c.Assert(k8serrors.IsNotFound(err), gc.Equals, true) - _, err = s.k8sclient.client.RbacV1().RoleBindings(ns).Get(ctx, sa.Name, v1.GetOptions{}) - c.Assert(k8serrors.IsNotFound(err), gc.Equals, true) -} - -func (s *rbacSuite) TestEnsureBinding_UpdateRoleAndCreateBinding(c *gc.C) { - ctx := context.Background() - ns := s.k8sclient.namespace - sa := s.makeSA(ns, "sa", map[string]string{"x": "y"}, nil) - - // Pre-create Role with some existing rules to be updated. - existing := &rbacv1.Role{ - ObjectMeta: v1.ObjectMeta{Name: sa.Name, Namespace: ns, Labels: sa.Labels}, - Rules: []rbacv1.PolicyRule{{ - APIGroups: []string{""}, - Resources: []string{"secrets"}, - Verbs: []string{"list"}, - }}, - } - _, err := s.k8sclient.client.RbacV1().Roles(ns).Create(ctx, existing, v1.CreateOptions{}) - c.Assert(err, jc.ErrorIsNil) - - owned := []string{"o1"} - read := []string{"r1", "r2"} - removed := []string{"z1"} - - cleanups, err := s.k8sclient.ensureBindingForSecretAccessToken(ctx, sa, owned, read, removed) - c.Assert(err, jc.ErrorIsNil) - - // Role existed -> no role cleanup - // Rolebinding newly created -> 1 rolebinding cleanup - c.Assert(cleanups, gc.HasLen, 1) - - // Role rules updated. - updatedRules, err := s.k8sclient.client.RbacV1().Roles(ns).Get(ctx, sa.Name, v1.GetOptions{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(updatedRules.Rules, gc.DeepEquals, rulesForSecretAccess(ns, false, existing.Rules, owned, read, removed)) - - // Rolebinding created. - _, err = s.k8sclient.client.RbacV1().RoleBindings(ns).Get(ctx, sa.Name, v1.GetOptions{}) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *rbacSuite) TestEnsureBinding_UpdateRoleAndNotCreateBinding(c *gc.C) { - ctx := context.Background() - ns := s.k8sclient.namespace - sa := s.makeSA(ns, "sa", - map[string]string{"app.kubernetes.io/managed-by": "juju", "app.kubernetes.io/name": "test"}, - map[string]string{"hello": "world"}, - ) - // Pre-create Role. - existingRole, err := s.k8sclient.client.RbacV1().Roles(ns).Create(ctx, &rbacv1.Role{ - ObjectMeta: v1.ObjectMeta{Name: sa.Name, Namespace: ns}, - }, v1.CreateOptions{}) - c.Assert(err, jc.ErrorIsNil) - - // Pre-create RoleBinding. - _, err = s.k8sclient.client.RbacV1().RoleBindings(ns).Create(ctx, &rbacv1.RoleBinding{ - ObjectMeta: v1.ObjectMeta{Name: sa.Name, Namespace: ns}, - RoleRef: rbacv1.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "Role", Name: sa.Name}, - Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: sa.Name, Namespace: ns}}, - }, v1.CreateOptions{}) - c.Assert(err, jc.ErrorIsNil) - - owned := []string{"o1"} - read := []string{"r1", "r2"} - removed := []string{"z1"} - - cleanups, err := s.k8sclient.ensureBindingForSecretAccessToken(ctx, sa, owned, read, removed) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cleanups, gc.HasLen, 0) - - // Check role was indeed updated. - updatedRole, err := s.k8sclient.client.RbacV1().Roles(ns).Get(ctx, sa.Name, v1.GetOptions{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(updatedRole.Rules, gc.DeepEquals, rulesForSecretAccess(ns, false, existingRole.Rules, owned, read, removed)) - - // Check rolebinding was indeed created. - _, err = s.k8sclient.client.RbacV1().RoleBindings(ns).Get(ctx, sa.Name, v1.GetOptions{}) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *rbacSuite) TestEnsureBinding_CreateRoleAndNotCreateBinding(c *gc.C) { - ctx := context.Background() - ns := s.k8sclient.namespace - sa := s.makeSA(ns, "sa", - map[string]string{"app.kubernetes.io/managed-by": "juju", "app.kubernetes.io/name": "test"}, - map[string]string{"hello": "world"}, - ) - - // Pre-create RoleBinding. - _, err := s.k8sclient.client.RbacV1().RoleBindings(ns).Create(ctx, &rbacv1.RoleBinding{ - ObjectMeta: v1.ObjectMeta{Name: sa.Name, Namespace: ns}, - RoleRef: rbacv1.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "Role", Name: sa.Name}, - Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: sa.Name, Namespace: ns}}, - }, v1.CreateOptions{}) - c.Assert(err, jc.ErrorIsNil) - - owned := []string{"o1"} - read := []string{"r1", "r2"} - removed := []string{"z1"} - - cleanups, err := s.k8sclient.ensureBindingForSecretAccessToken(ctx, sa, owned, read, removed) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cleanups, gc.HasLen, 1) - - // Check that role was created with correct metadata and rules. - role, err := s.k8sclient.client.RbacV1().Roles(ns).Get(ctx, sa.Name, v1.GetOptions{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(role.Labels, gc.DeepEquals, sa.Labels) - c.Assert(role.Annotations, gc.DeepEquals, sa.Annotations) - c.Assert(role.Rules, gc.DeepEquals, rulesForSecretAccess(ns, false, nil, owned, read, removed)) -}
secrets/provider/kubernetes/rules_test.go+0 −226 modified@@ -6,236 +6,10 @@ package kubernetes import ( "github.com/juju/testing" gc "gopkg.in/check.v1" - rbacv1 "k8s.io/api/rbac/v1" ) type rulesSuite struct { testing.IsolationSuite } var _ = gc.Suite(&rulesSuite{}) - -func (s *rulesSuite) TestRulesForSecretAccessNew(c *gc.C) { - owned := []string{"owned-secret-1"} - read := []string{"read-secret-1"} - newPolicies := rulesForSecretAccess("test", false, nil, owned, read, nil) - c.Assert(newPolicies, gc.DeepEquals, []rbacv1.PolicyRule{ - { - Verbs: []string{"create", "patch"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"*"}, - Resources: []string{"namespaces"}, - ResourceNames: []string{"test"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"owned-secret-1"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"read-secret-1"}, - }, - }) -} - -func (s *rulesSuite) TestRulesForSecretAccessControllerModelNew(c *gc.C) { - owned := []string{"owned-secret-1"} - read := []string{"read-secret-1"} - newPolicies := rulesForSecretAccess("test", true, nil, owned, read, nil) - c.Assert(newPolicies, gc.DeepEquals, []rbacv1.PolicyRule{ - { - Verbs: []string{"create", "patch"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"*"}, - Resources: []string{"namespaces"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"owned-secret-1"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"read-secret-1"}, - }, - }) -} - -func (s *rulesSuite) TestRulesForSecretAccessUpdate(c *gc.C) { - existing := []rbacv1.PolicyRule{ - { - Verbs: []string{"create", "patch"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"*"}, - Resources: []string{"namespaces"}, - ResourceNames: []string{"test"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"owned-secret-1"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"removed-owned-secret"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"read-secret-1"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"removed-read-secret"}, - }, - } - - owned := []string{"owned-secret-1", "owned-secret-2"} - read := []string{"read-secret-1", "read-secret-2"} - removed := []string{"removed-owned-secret", "removed-read-secret"} - - newPolicies := rulesForSecretAccess("test", false, existing, owned, read, removed) - c.Assert(newPolicies, gc.DeepEquals, []rbacv1.PolicyRule{ - { - Verbs: []string{"create", "patch"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"*"}, - Resources: []string{"namespaces"}, - ResourceNames: []string{"test"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"owned-secret-1"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"owned-secret-2"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"read-secret-1"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"read-secret-2"}, - }, - }) -} - -func (s *rulesSuite) TestRulesForSecretAccessControllerModelUpdate(c *gc.C) { - existing := []rbacv1.PolicyRule{ - { - Verbs: []string{"create", "patch"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"*"}, - Resources: []string{"namespaces"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"owned-secret-1"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"removed-owned-secret"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"read-secret-1"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"removed-read-secret"}, - }, - } - - owned := []string{"owned-secret-1", "owned-secret-2"} - read := []string{"read-secret-1", "read-secret-2"} - removed := []string{"removed-owned-secret", "removed-read-secret"} - - newPolicies := rulesForSecretAccess("test", true, existing, owned, read, removed) - c.Assert(newPolicies, gc.DeepEquals, []rbacv1.PolicyRule{ - { - Verbs: []string{"create", "patch"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"*"}, - Resources: []string{"namespaces"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"owned-secret-1"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"owned-secret-2"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"read-secret-1"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"*"}, - Resources: []string{"secrets"}, - ResourceNames: []string{"read-secret-2"}, - }, - }) -}
secrets/provider/provider.go+31 −1 modified@@ -29,6 +29,17 @@ func (nm SecretRevisions) Add(uri *secrets.URI, revisionIDs ...string) { } } +// Insert all the secret revisions from one set into this one. +func (nm SecretRevisions) Insert(other SecretRevisions) { + for id, revs := range other { + if v, ok := nm[id]; ok { + nm[id] = v.Union(revs) + } else { + nm[id] = set.NewStrings(revs.Values()...) + } + } +} + // RevisionIDs returns all the secret revisions. func (nm SecretRevisions) RevisionIDs() (result []string) { for _, revisions := range nm { @@ -80,10 +91,29 @@ type SecretBackendProvider interface { // associated with the model config. CleanupModel(cfg *ModelBackendConfig) error + // IssuesTokens returns true if this secret backend provider needs to issue + // a token to provide a restricted (delegated) config. + IssuesTokens() bool + + // CleanupIssuedTokens removes all ACLs/tokens related to the given issued + // token UUIDs. It returns, even during error, the list of tokens it revoked + // so far. + CleanupIssuedTokens( + cfg *ModelBackendConfig, issuedTokenUUIDs []string, + ) ([]string, error) + // RestrictedConfig returns the config needed to create a // secrets backend client restricted to manage the specified // owned secrets and read shared secrets for the given entity tag. - RestrictedConfig(adminCfg *ModelBackendConfig, sameController, forDrain bool, tag names.Tag, owned SecretRevisions, read SecretRevisions) (*BackendConfig, error) + RestrictedConfig( + adminCfg *ModelBackendConfig, + sameController, forDrain bool, + issuedTokenUUID string, + consumer names.Tag, + owned []string, + ownedRevs SecretRevisions, + readRevs SecretRevisions, + ) (*BackendConfig, error) // NewBackend creates a secrets backend client using the // specified model config.
secrets/provider/vault/provider.go+80 −47 modified@@ -6,6 +6,7 @@ package vault import ( "context" "fmt" + "math" "os" "strings" "time" @@ -16,6 +17,7 @@ import ( "github.com/juju/names/v5" vault "github.com/mittwald/vaultgo" + coresecrets "github.com/juju/juju/core/secrets" "github.com/juju/juju/secrets/provider" ) @@ -87,10 +89,13 @@ func (p vaultProvider) Initialise(cfg *provider.ModelBackendConfig) error { // CleanupModel deletes all secrets and policies associated with the model. func (p vaultProvider) CleanupModel(cfg *provider.ModelBackendConfig) (err error) { defer func() { - if err != nil && strings.HasSuffix(err.Error(), "no route to host") { + if err == nil { + return + } else if strings.HasSuffix(err.Error(), "no route to host") || + strings.HasSuffix(err.Error(), "connection refused") { // There is nothing we can do now, so just log the error and continue. - err = nil logger.Warningf("failed to cleanup secrets for model %q: %v", cfg.ModelUUID, err) + err = nil } }() @@ -180,13 +185,54 @@ func (p vaultProvider) CleanupSecrets(cfg *provider.ModelBackendConfig, tag name return nil } +// IssuesTokens returns true if this secret backend provider needs to issue +// a token to provide a restricted (delegated) config. +func (p vaultProvider) IssuesTokens() bool { + return true +} + +// CleanupIssuedTokens removes all ACLs/tokens related to the given issued +// token UUIDs. It returns, even during error, the list of tokens it revoked +// so far. +func (p vaultProvider) CleanupIssuedTokens( + adminCfg *provider.ModelBackendConfig, issuedTokenUUIDs []string, +) ([]string, error) { + // Get an admin backend client so we can set up the policies. + mountPath := modelPathPrefix(adminCfg.ModelName, adminCfg.ModelUUID) + backend, err := p.newBackend(mountPath, &adminCfg.BackendConfig) + if err != nil { + return nil, errors.Trace(err) + } + sys := backend.client.Sys() + ctx := context.TODO() + + for i, issuedTokenUUID := range issuedTokenUUIDs { + policyName := fmt.Sprintf("%s-%s", mountPath, issuedTokenUUID) + err := sys.DeletePolicyWithContext(ctx, policyName) + if err != nil && !isNotFound(err) { + // Return the tokens deleted so far. + return issuedTokenUUIDs[:i], errors.New( + "removing vault secret backend issued tokens", + ) + } + } + + return issuedTokenUUIDs, nil +} + // RestrictedConfig returns the config needed to create a // secrets backend client restricted to manage the specified // owned secrets and read shared secrets for the given entity tag. func (p vaultProvider) RestrictedConfig( - adminCfg *provider.ModelBackendConfig, sameController, forDrain bool, tag names.Tag, owned provider.SecretRevisions, read provider.SecretRevisions, + adminCfg *provider.ModelBackendConfig, + sameController, forDrain bool, + issuedTokenUUID string, + consumer names.Tag, + owned []string, + ownedRevs provider.SecretRevisions, + readRevs provider.SecretRevisions, ) (*provider.BackendConfig, error) { - adminUser := tag == nil + adminUser := consumer == nil // Get an admin backend client so we can set up the policies. modelPath := modelPathPrefix(adminCfg.ModelName, adminCfg.ModelUUID) backend, err := p.newBackend(modelPath, &adminCfg.BackendConfig) @@ -195,69 +241,56 @@ func (p vaultProvider) RestrictedConfig( } mountPath := backend.mountPath sys := backend.client.Sys() + ctx := context.TODO() - ctx := context.Background() - var policies []string - if forDrain { - // For drain worker, we need to be able to update a secret. + var rules []string + if forDrain && (adminUser || consumer.Kind() == names.ModelTagKind) { + // For controller drain worker, we need to be able to update a secret. // Because we may run into a situation that the worker creates a secret in the vault but gets killed/restarted // before it can update the secret to the new backend, we need to allow the worker to update the content // after it's coming up again. rule := fmt.Sprintf(`path "%s/*" {capabilities = ["update"]}`, mountPath) - policyName := mountPath + "-update" - err = sys.PutPolicyWithContext(ctx, policyName, rule) - if err != nil { - return nil, errors.Annotatef(err, "creating update policy for model %q for the drain worker", mountPath) - } - policies = append(policies, policyName) + rules = append(rules, rule) } if adminUser { // For admin users, all secrets for the model can be read. rule := fmt.Sprintf(`path "%s/*" {capabilities = ["read"]}`, mountPath) - policyName := mountPath + "-read" - err = sys.PutPolicyWithContext(ctx, policyName, rule) - if err != nil { - return nil, errors.Annotatef(err, "creating read policy for model %q", mountPath) - } - policies = append(policies, policyName) - } else { - // Agents can create new secrets in the model. - rule := fmt.Sprintf(`path "%s/*" {capabilities = ["create"]}`, mountPath) - policyName := mountPath + "-create" - err = sys.PutPolicyWithContext(ctx, policyName, rule) - if err != nil { - return nil, errors.Annotatef(err, "creating create policy for model %q", mountPath) - } - policies = append(policies, policyName) + rules = append(rules, rule) } + // Any secrets owned by the agent can be updated/deleted etc. logger.Debugf("owned secrets: %#v", owned) - for id := range owned { + for _, id := range owned { rule := fmt.Sprintf(`path "%s/%s-*" {capabilities = ["create", "read", "update", "delete", "list"]}`, mountPath, id) - policyName := fmt.Sprintf("%s-%s-owner", mountPath, id) - err = sys.PutPolicyWithContext(ctx, policyName, rule) - if err != nil { - return nil, errors.Annotatef(err, "creating owner policy for %q", id) - } - policies = append(policies, policyName) + rules = append(rules, rule) } // Any secrets consumed by the agent can be read etc. - logger.Debugf("consumed secrets: %#v", read) - for id := range read { - rule := fmt.Sprintf(`path "%s/%s-*" {capabilities = ["read"]}`, mountPath, id) - policyName := fmt.Sprintf("%s-%s-read", mountPath, id) - err = sys.PutPolicyWithContext(ctx, policyName, rule) - if err != nil { - return nil, errors.Annotatef(err, "creating read policy for %q", id) + logger.Debugf("consumed secret revisions: %#v", readRevs) + for _, revs := range readRevs { + for _, revId := range revs.Values() { + rule := fmt.Sprintf(`path "%s/%s" {capabilities = ["read"]}`, mountPath, revId) + rules = append(rules, rule) } - policies = append(policies, policyName) } - logger.Tracef("policies: %#v", policies) + + policyName := fmt.Sprintf("%s-%s", mountPath, issuedTokenUUID) + err = sys.PutPolicyWithContext(ctx, policyName, strings.Join(rules, "\n")) + if err != nil { + return nil, errors.Annotatef(err, "creating policy %q", policyName) + } + logger.Tracef("policy rules for %q: %#v", policyName, rules) + + ttl := fmt.Sprintf( + "%dm", int(math.Ceil(coresecrets.IssuedTokenValidity.Minutes())), + ) s, err := backend.client.Auth().Token().Create(&api.TokenCreateRequest{ - TTL: "10m", // 10 minutes for now, can configure later. + TTL: ttl, NoDefaultPolicy: true, - Policies: policies, + Policies: []string{policyName}, + Metadata: map[string]string{ + "juju-issued-token-uuid": issuedTokenUUID, + }, }) if err != nil { return nil, errors.Annotate(err, "creating secret access token")
secrets/provider/vault/provider_test.go+105 −82 modified@@ -4,7 +4,9 @@ package vault_test import ( + "encoding/json" "io" + "io/ioutil" "net/http" "strings" @@ -92,7 +94,7 @@ func (s *providerSuite) TestBackendConfigBadClient(c *gc.C) { ModelName: "fred", BackendConfig: provider.BackendConfig{ BackendType: "vault", - Config: map[string]interface{}{ + Config: map[string]any{ "endpoint": "http://vault-ip:8200/", "namespace": "ns", "token": "vault-token", @@ -101,35 +103,54 @@ func (s *providerSuite) TestBackendConfigBadClient(c *gc.C) { }, }, } - _, err = p.RestrictedConfig(adminCfg, true, false, nil, nil, nil) + issuedTokenUUID := "some-uuid" + _, err = p.RestrictedConfig( + adminCfg, true, false, issuedTokenUUID, nil, nil, nil, nil) c.Assert(err, gc.ErrorMatches, "boom") } func (s *providerSuite) TestBackendConfigAdmin(c *gc.C) { ctrl, newVaultClient := s.newVaultClient(c, nil) defer ctrl.Finish() - gomock.InOrder( - s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( - func(req *http.Request) (*http.Response, error) { - c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-read`) - return &http.Response{ - Request: req, - StatusCode: http.StatusOK, - Body: io.NopCloser(nil), - }, nil - }, - ), - s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( - func(req *http.Request) (*http.Response, error) { - c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/auth/token/create`) - return &http.Response{ - Request: req, - StatusCode: http.StatusOK, - Body: io.NopCloser(strings.NewReader(`{"auth": {"client_token": "foo"}}`)), - }, nil - }, - ), + s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( + func(req *http.Request) (*http.Response, error) { + c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-some-uuid`) + b, _ := ioutil.ReadAll(req.Body) + defer req.Body.Close() + policyReq := struct { + Policy string + }{} + _ = json.Unmarshal(b, &policyReq) + c.Assert(policyReq.Policy, gc.Equals, strings.Join([]string{ + `path "fred-06f00d/*" {capabilities = ["read"]}`, + }, "\n")) + return &http.Response{ + Request: req, + StatusCode: http.StatusOK, + Body: io.NopCloser(nil), + }, nil + }, + ) + s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( + func(req *http.Request) (*http.Response, error) { + c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/auth/token/create`) + b, _ := ioutil.ReadAll(req.Body) + defer req.Body.Close() + tokenReq := api.TokenCreateRequest{} + _ = json.Unmarshal(b, &tokenReq) + c.Assert(tokenReq, jc.DeepEquals, api.TokenCreateRequest{ + Policies: []string{"fred-06f00d-some-uuid"}, + Metadata: map[string]string{"juju-issued-token-uuid": "some-uuid"}, + TTL: "10m", + NoDefaultPolicy: true, + }) + return &http.Response{ + Request: req, + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{"auth": {"client_token": "foo"}}`)), + }, nil + }, ) s.PatchValue(&jujuvault.NewVaultClient, newVaultClient) @@ -142,7 +163,7 @@ func (s *providerSuite) TestBackendConfigAdmin(c *gc.C) { ModelName: "fred", BackendConfig: provider.BackendConfig{ BackendType: "vault", - Config: map[string]interface{}{ + Config: map[string]any{ "endpoint": "http://vault-ip:8200/", "namespace": "ns", "token": "vault-token", @@ -151,7 +172,9 @@ func (s *providerSuite) TestBackendConfigAdmin(c *gc.C) { }, }, } - cfg, err := p.RestrictedConfig(adminCfg, true, false, nil, nil, nil) + issuedTokenUUID := "some-uuid" + cfg, err := p.RestrictedConfig( + adminCfg, true, false, issuedTokenUUID, nil, nil, nil, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(cfg.Config["token"], gc.Equals, "foo") } @@ -162,27 +185,17 @@ func (s *providerSuite) TestBackendConfigNonAdmin(c *gc.C) { s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( func(req *http.Request) (*http.Response, error) { - c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-create`) - return &http.Response{ - Request: req, - StatusCode: http.StatusOK, - Body: io.NopCloser(nil), - }, nil - }, - ) - s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( - func(req *http.Request) (*http.Response, error) { - c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-owned-1-owner`) - return &http.Response{ - Request: req, - StatusCode: http.StatusOK, - Body: io.NopCloser(nil), - }, nil - }, - ) - s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( - func(req *http.Request) (*http.Response, error) { - c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-read-1-read`) + c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-some-uuid`) + b, _ := ioutil.ReadAll(req.Body) + defer req.Body.Close() + policyReq := struct { + Policy string + }{} + _ = json.Unmarshal(b, &policyReq) + c.Assert(policyReq.Policy, gc.Equals, strings.Join([]string{ + `path "fred-06f00d/owned-1-*" {capabilities = ["create", "read", "update", "delete", "list"]}`, + `path "fred-06f00d/read-rev-1" {capabilities = ["read"]}`, + }, "\n")) return &http.Response{ Request: req, StatusCode: http.StatusOK, @@ -193,6 +206,16 @@ func (s *providerSuite) TestBackendConfigNonAdmin(c *gc.C) { s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( func(req *http.Request) (*http.Response, error) { c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/auth/token/create`) + b, _ := ioutil.ReadAll(req.Body) + defer req.Body.Close() + tokenReq := api.TokenCreateRequest{} + _ = json.Unmarshal(b, &tokenReq) + c.Assert(tokenReq, jc.DeepEquals, api.TokenCreateRequest{ + Policies: []string{"fred-06f00d-some-uuid"}, + Metadata: map[string]string{"juju-issued-token-uuid": "some-uuid"}, + TTL: "10m", + NoDefaultPolicy: true, + }) return &http.Response{ Request: req, StatusCode: http.StatusOK, @@ -211,7 +234,7 @@ func (s *providerSuite) TestBackendConfigNonAdmin(c *gc.C) { ModelName: "fred", BackendConfig: provider.BackendConfig{ BackendType: "vault", - Config: map[string]interface{}{ + Config: map[string]any{ "endpoint": "http://vault-ip:8200/", "namespace": "ns", "token": "vault-token", @@ -220,13 +243,18 @@ func (s *providerSuite) TestBackendConfigNonAdmin(c *gc.C) { }, }, } + ownedNames := []string{"owned-1"} ownedRevs := map[string]set.Strings{ "owned-1": set.NewStrings("owned-rev-1", "owned-rev-2"), } readRevs := map[string]set.Strings{ "read-1": set.NewStrings("read-rev-1"), } - cfg, err := p.RestrictedConfig(adminCfg, true, false, names.NewUnitTag("ubuntu/0"), ownedRevs, readRevs) + issuedTokenUUID := "some-uuid" + cfg, err := p.RestrictedConfig( + adminCfg, true, false, issuedTokenUUID, names.NewUnitTag("ubuntu/0"), + ownedNames, ownedRevs, readRevs, + ) c.Assert(err, jc.ErrorIsNil) c.Assert(cfg.Config["token"], gc.Equals, "foo") } @@ -237,37 +265,17 @@ func (s *providerSuite) TestBackendConfigForDrain(c *gc.C) { s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( func(req *http.Request) (*http.Response, error) { - c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-update`) - return &http.Response{ - Request: req, - StatusCode: http.StatusOK, - Body: io.NopCloser(nil), - }, nil - }, - ) - s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( - func(req *http.Request) (*http.Response, error) { - c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-create`) - return &http.Response{ - Request: req, - StatusCode: http.StatusOK, - Body: io.NopCloser(nil), - }, nil - }, - ) - s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( - func(req *http.Request) (*http.Response, error) { - c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-owned-1-owner`) - return &http.Response{ - Request: req, - StatusCode: http.StatusOK, - Body: io.NopCloser(nil), - }, nil - }, - ) - s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( - func(req *http.Request) (*http.Response, error) { - c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-read-1-read`) + c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/sys/policies/acl/fred-06f00d-some-uuid`) + b, _ := ioutil.ReadAll(req.Body) + defer req.Body.Close() + policyReq := struct { + Policy string + }{} + _ = json.Unmarshal(b, &policyReq) + c.Assert(policyReq.Policy, gc.Equals, strings.Join([]string{ + `path "fred-06f00d/owned-1-*" {capabilities = ["create", "read", "update", "delete", "list"]}`, + `path "fred-06f00d/read-rev-1" {capabilities = ["read"]}`, + }, "\n")) return &http.Response{ Request: req, StatusCode: http.StatusOK, @@ -278,6 +286,16 @@ func (s *providerSuite) TestBackendConfigForDrain(c *gc.C) { s.mockRoundTripper.EXPECT().RoundTrip(gomock.Any()).DoAndReturn( func(req *http.Request) (*http.Response, error) { c.Assert(req.URL.String(), gc.Equals, `http://vault-ip:8200/v1/auth/token/create`) + b, _ := ioutil.ReadAll(req.Body) + defer req.Body.Close() + tokenReq := api.TokenCreateRequest{} + _ = json.Unmarshal(b, &tokenReq) + c.Assert(tokenReq, jc.DeepEquals, api.TokenCreateRequest{ + Policies: []string{"fred-06f00d-some-uuid"}, + Metadata: map[string]string{"juju-issued-token-uuid": "some-uuid"}, + TTL: "10m", + NoDefaultPolicy: true, + }) return &http.Response{ Request: req, StatusCode: http.StatusOK, @@ -296,7 +314,7 @@ func (s *providerSuite) TestBackendConfigForDrain(c *gc.C) { ModelName: "fred", BackendConfig: provider.BackendConfig{ BackendType: "vault", - Config: map[string]interface{}{ + Config: map[string]any{ "endpoint": "http://vault-ip:8200/", "namespace": "ns", "token": "vault-token", @@ -305,13 +323,18 @@ func (s *providerSuite) TestBackendConfigForDrain(c *gc.C) { }, }, } + ownedNames := []string{"owned-1"} ownedRevs := map[string]set.Strings{ "owned-1": set.NewStrings("owned-rev-1", "owned-rev-2"), } readRevs := map[string]set.Strings{ "read-1": set.NewStrings("read-rev-1"), } - cfg, err := p.RestrictedConfig(adminCfg, true, true, names.NewUnitTag("ubuntu/0"), ownedRevs, readRevs) + issuedTokenUUID := "some-uuid" + cfg, err := p.RestrictedConfig( + adminCfg, true, true, issuedTokenUUID, + names.NewUnitTag("ubuntu/0"), ownedNames, ownedRevs, readRevs, + ) c.Assert(err, jc.ErrorIsNil) c.Assert(cfg.Config["token"], gc.Equals, "foo") } @@ -328,7 +351,7 @@ func (s *providerSuite) TestNewBackend(c *gc.C) { ModelUUID: coretesting.ModelTag.Id(), BackendConfig: provider.BackendConfig{ BackendType: jujuvault.BackendType, - Config: map[string]interface{}{ + Config: map[string]any{ "endpoint": "http://vault-ip:8200/", "namespace": "ns", "token": "vault-token",
service/snap/app.go+38 −3 modified@@ -59,42 +59,58 @@ type App struct { // Validate will validate a given application for any potential issues. func (a *App) Validate() error { + logger.Debugf("validating snap app %q (path=%q, assertsPath=%q, confinement=%q, channel=%q)", + a.name, a.path, a.assertsPath, a.confinementPolicy, a.channel) if !snapNameRe.MatchString(a.name) { + logger.Warningf("snap app name %q does not match naming convention", a.name) return errors.NotValidf("application %v name", a.name) } if a.path != "" { + logger.Debugf("snap app %q is a local snap, checking path %q", a.name, a.path) if _, err := os.Stat(a.path); err != nil { + logger.Warningf("snap app %q local path %q not found: %v", a.name, a.path, err) return errors.NotFoundf("application %v path %v", a.name, a.path) } if a.assertsPath == "" { + logger.Warningf("snap app %q is local but has no assert file specified", a.name) return errors.NotValidf("local snap %v requires an assert file", a.name) } if _, err := os.Stat(a.assertsPath); err != nil { + logger.Warningf("snap app %q asserts path %q not found: %v", a.name, a.assertsPath, err) return errors.NotFoundf("application %v asserts path %v", a.name, a.assertsPath) } + logger.Debugf("snap app %q local paths validated successfully", a.name) } if a.confinementPolicy != "" { + logger.Debugf("validating confinement policy %q for snap app %q", a.confinementPolicy, a.name) if err := a.confinementPolicy.Validate(); err != nil { + logger.Warningf("snap app %q confinement policy %q is invalid: %v", a.name, a.confinementPolicy, err) return errors.Trace(err) } } for _, backgroundService := range a.backgroundServices { + logger.Debugf("validating background service %q for snap app %q", backgroundService.Name, a.name) err := backgroundService.Validate() if err != nil { + logger.Warningf("background service %q validation failed for snap app %q: %v", backgroundService.Name, a.name, err) return errors.Trace(err) } } for _, prerequisite := range a.prerequisites { + logger.Debugf("validating prerequisite %q for snap app %q", prerequisite.Name(), a.name) err := prerequisite.Validate() if err != nil { + logger.Warningf("prerequisite %q validation failed for snap app %q: %v", prerequisite.Name(), a.name, err) return errors.Trace(err) } } + logger.Debugf("snap app %q validation successful (%d background services, %d prerequisites)", + a.name, len(a.backgroundServices), len(a.prerequisites)) return nil } @@ -103,60 +119,79 @@ func (a *App) Validate() error { // executable. If the app has prerequisite applications defined, then take care to call // StartCommands on those apps also. func (a *App) StartCommands(executable string) []string { + logger.Debugf("generating start commands for snap app %q (executable=%q, background services=%d)", + a.name, executable, len(a.backgroundServices)) if len(a.backgroundServices) == 0 { - return []string{fmt.Sprintf("%s start %s", executable, a.name)} + cmd := fmt.Sprintf("%s start %s", executable, a.name) + logger.Debugf("snap app %q has no background services, using single start command: %q", a.name, cmd) + return []string{cmd} } commands := make([]string, 0, len(a.backgroundServices)) for _, backgroundService := range a.backgroundServices { enableFlag := "" if backgroundService.EnableAtStartup { enableFlag = " --enable " + logger.Debugf("background service %q.%q will be enabled at startup", a.name, backgroundService.Name) } command := fmt.Sprintf("%s start %s %s.%s", executable, enableFlag, a.name, backgroundService.Name) + logger.Debugf("generated start command for %q.%q: %q", a.name, backgroundService.Name, command) commands = append(commands, command) } + logger.Debugf("generated %d start commands for snap app %q", len(commands), a.name) return commands } // InstallArgs returns a way to install one application with all it's settings. func (a *App) InstallArgs() []string { + logger.Debugf("building install args for snap app %q (confinement=%q, path=%q, channel=%q)", + a.name, a.confinementPolicy, a.path, a.channel) args := []string{ "install", } if a.confinementPolicy != "" { + logger.Debugf("snap app %q: adding confinement policy %q to install args", a.name, a.confinementPolicy) args = append(args, fmt.Sprintf("--%s", a.confinementPolicy)) } if a.path != "" { // return early if this is a local snap, skipping over not-applicable // args such as channel - return append(args, a.path) + args = append(args, a.path) + logger.Debugf("snap app %q: local install args: %v", a.name, args) + return args } if a.channel != "" { + logger.Debugf("snap app %q: adding channel %q to install args", a.name, a.channel) args = append(args, fmt.Sprintf("--channel=%s", a.channel)) } - return append(args, a.name) + args = append(args, a.name) + logger.Debugf("snap app %q: store install args: %v", a.name, args) + return args } // AcknowledgeAssertsArgs returns args to acknowledge the asserts for the snap // required to install this application. Returns nil if none are required. func (a *App) AcknowledgeAssertsArgs() []string { if a.assertsPath == "" { + logger.Debugf("snap app %q has no asserts path, no ack args needed", a.name) return nil } + logger.Debugf("snap app %q: asserts acknowledgement args: ack %q", a.name, a.assertsPath) return []string{"ack", a.assertsPath} } // Prerequisites defines a list of all the Prerequisites required before the // application also needs to be installed. func (a *App) Prerequisites() []Installable { + logger.Debugf("snap app %q has %d prerequisites", a.name, len(a.prerequisites)) return a.prerequisites } // BackgroundServices returns a list of background services that are // required to be installed for the main application to run. func (a *App) BackgroundServices() []BackgroundService { + logger.Debugf("snap app %q has %d background services", a.name, len(a.backgroundServices)) return a.backgroundServices }
service/snap/snap.go+148 −12 modified@@ -73,16 +73,20 @@ func (backgroundService *BackgroundService) Validate() error { // SetSnapConfig sets a snap's key to value. func SetSnapConfig(snap string, key string, value string) error { + logger.Infof("setting snap %q config key %q to value %q", snap, key, value) if key == "" { + logger.Warningf("set snap config called with empty key for snap %q", snap) return errors.NotValidf("key must not be empty") } cmd := exec.Command(Command, "set", snap, fmt.Sprintf("%s=%s", key, value)) _, err := cmd.Output() if err != nil { + logger.Errorf("failed to set snap %q config %q=%q: %v", snap, key, value, err) return errors.Annotate(err, fmt.Sprintf("setting snap %s config %s to %s", snap, key, value)) } + logger.Infof("successfully set snap %q config %q", snap, key) return nil } @@ -179,7 +183,10 @@ type ServiceConfig struct { // If no BackgroundServices are provided, Service will wrap all of the snap's // background services. func NewService(config ServiceConfig) (Service, error) { + logger.Infof("creating new snap service %q (path=%q, channel=%q, confinement=%q)", + config.ServiceName, config.SnapPath, config.Channel, config.ConfinementPolicy) if config.ServiceName == "" { + logger.Warningf("NewService called with empty ServiceName") return Service{}, errors.New("ServiceName must be provided") } app := &App{ @@ -193,10 +200,13 @@ func NewService(config ServiceConfig) (Service, error) { } err := app.Validate() if err != nil { + logger.Warningf("snap app validation failed for %q: %v", config.ServiceName, err) return Service{}, errors.Trace(err) } isLocal := config.SnapPath != "" + logger.Debugf("snap service %q: isLocal=%v, configDir=%q, executable=%q", + config.ServiceName, isLocal, config.ConfigDir, config.SnapExecutable) return Service{ runnable: defaultRunner{}, @@ -214,16 +224,23 @@ func NewService(config ServiceConfig) (Service, error) { // Validate validates that snap.Service has been correctly configured. // Validate returns nil when successful and an error when successful. func (s Service) Validate() error { + logger.Debugf("validating snap service %q", s.name) if err := s.app.Validate(); err != nil { + logger.Warningf("snap service %q app validation failed: %v", s.name, err) return errors.Trace(err) } for _, prerequisite := range s.app.Prerequisites() { if err := prerequisite.Validate(); err != nil { + logger.Warningf( + "snap service %q prerequisite %q validation failed: %v", + s.name, prerequisite.Name(), err, + ) return errors.Trace(err) } } + logger.Debugf("snap service %q validation successful", s.name) return nil } @@ -245,10 +262,13 @@ func (s Service) IsLocal() bool { // Running returns (true, nil) when snap indicates that service is currently active. func (s Service) Running() (bool, error) { + logger.Debugf("checking if snap service %q is running", s.name) _, _, running, err := s.status() if err != nil { + logger.Warningf("failed to check running status for snap service %q: %v", s.name, err) return false, errors.Trace(err) } + logger.Debugf("snap service %q running=%v", s.name, running) return running, nil } @@ -259,19 +279,30 @@ func (s Service) Exists() (bool, error) { // Install installs the snap and its background services. func (s Service) Install() error { - for _, app := range s.app.Prerequisites() { - logger.Infof("command: %v", app) + logger.Infof("installing snap service %q (isLocal=%v)", s.name, s.isLocal) + prerequisites := s.app.Prerequisites() + logger.Infof("snap service %q has %d prereq(s) to install", s.name, len(prerequisites)) + for i, app := range prerequisites { + logger.Infof("installing prerequisite %d/%d: %q", i+1, len(prerequisites), app.Name()) out, err := s.installAppWithRetry(app) if err != nil { + logger.Errorf( + "failed to install prereq %q for snap service %q: %v (output: %v)", + app.Name(), s.name, err, out, + ) return errors.Annotatef(err, "output: %v", out) } + logger.Infof("successfully installed prerequisite %q", app.Name()) } + logger.Infof("installing snap app %q with args: %v", s.app.Name(), s.app.InstallArgs()) out, err := s.installAppWithRetry(s.app) if err != nil { + logger.Errorf("failed to install snap service %q: %v (output: %v)", s.name, err, out) return errors.Annotatef(err, "output: %v", out) } + logger.Infof("successfully installed snap service %q", s.name) return nil } @@ -280,58 +311,97 @@ func (s Service) Install() error { func (s Service) installAppWithRetry(app Installable) (string, error) { ackAsserts := app.AcknowledgeAssertsArgs() if ackAsserts != nil { + logger.Infof("acknowledging asserts for snap %q: %v", app.Name(), ackAsserts) _, err := s.runCommandWithRetry(ackAsserts...) if err != nil { + logger.Errorf("failed to acknowledge asserts for snap %q: %v", app.Name(), err) return "", errors.Trace(err) } + logger.Infof("successfully acknowledged asserts for snap %q", app.Name()) + } else { + logger.Debugf("no asserts to acknowledge for snap %q", app.Name()) } + logger.Infof("running install command for snap %q with args: %v", app.Name(), app.InstallArgs()) return s.runCommandWithRetry(app.InstallArgs()...) } // Installed returns true if the service has been successfully installed. func (s Service) Installed() (bool, error) { + logger.Debugf("checking if snap service %q is installed", s.name) installed, _, _, err := s.status() if err != nil { + logger.Warningf("failed to check installed status for snap service %q: %v", s.name, err) return false, errors.Trace(err) } + logger.Debugf("snap service %q installed=%v", s.name, installed) return installed, nil } // ConfigOverride writes a systemd override to enable the // specified limits to be used by the snap. func (s Service) ConfigOverride() error { + logger.Debugf( + "applying config overrides for snap service %q (limits count: %d)", + s.name, len(s.conf.Limit), + ) if len(s.conf.Limit) == 0 { + logger.Debugf("no config limits defined for snap service %q, skipping override", s.name) return nil } unitOptions := systemd.ServiceLimits(s.conf) data, err := io.ReadAll(systemd.UnitSerialize(unitOptions)) if err != nil { + logger.Errorf("failed to serialise systemd unit options for snap service %q: %v", s.name, err) return errors.Trace(err) } - for _, backgroundService := range s.app.BackgroundServices() { + backgroundServices := s.app.BackgroundServices() + logger.Infof( + "writing config overrides for %d background services of snap %q", + len(backgroundServices), s.name, + ) + for _, backgroundService := range backgroundServices { overridesDir := fmt.Sprintf("%s/snap.%s.%s.service.d", s.configDir, s.name, backgroundService.Name) + logger.Debugf( + "creating overrides directory %q for background service %q", + overridesDir, backgroundService.Name, + ) if err := os.MkdirAll(overridesDir, 0755); err != nil { + logger.Errorf("failed to create overrides directory %q: %v", overridesDir, err) return errors.Trace(err) } - if err := os.WriteFile(filepath.Join(overridesDir, "overrides.conf"), data, 0644); err != nil { + overridePath := filepath.Join(overridesDir, "overrides.conf") + logger.Debugf("writing overrides config to %q", overridePath) + if err := os.WriteFile(overridePath, data, 0644); err != nil { + logger.Errorf("failed to write overrides config to %q: %v", overridePath, err) return errors.Trace(err) } } + logger.Infof("successfully applied config overrides for snap service %q", s.name) return nil } // StartCommands returns a slice of strings. that are // shell commands to be executed by a shell which start the service. func (s Service) StartCommands() ([]string, error) { deps := s.app.Prerequisites() + logger.Debugf( + "generating start commands for snap service %q (%d prerequisites)", + s.name, len(deps), + ) commands := make([]string, 0, 1+len(deps)) for _, prerequisite := range deps { - commands = append(commands, prerequisite.StartCommands(s.executable)...) + cmds := prerequisite.StartCommands(s.executable) + logger.Debugf("prerequisite %q start commands: %v", prerequisite.Name(), cmds) + commands = append(commands, cmds...) } - return append(commands, s.app.StartCommands(s.executable)...), nil + appCmds := s.app.StartCommands(s.executable) + logger.Debugf("snap service %q start commands: %v", s.name, appCmds) + commands = append(commands, appCmds...) + logger.Debugf("total start commands for snap service %q: %v", s.name, commands) + return commands, nil } // status returns an interpreted output from the `snap services` command. @@ -344,86 +414,135 @@ func (s Service) StartCommands() ([]string, error) { // // (true, true, false, nil) func (s *Service) status() (isInstalled, enabledAtStartup, isCurrentlyActive bool, err error) { + logger.Debugf("querying status for snap service %q", s.Name()) out, err := s.runCommand("services", s.Name()) if err != nil { + logger.Warningf("failed to query snap services for %q: %v", s.Name(), err) return false, false, false, errors.Trace(err) } + logger.Debugf("snap services output for %q: %q", s.Name(), out) for _, line := range strings.Split(out, "\n") { if !strings.HasPrefix(line, s.Name()) { continue } fields := strings.Fields(line) - return true, fields[1] == "enabled", fields[2] == "active", nil + installed := true + enabled := fields[1] == "enabled" + active := fields[2] == "active" + logger.Debugf( + "snap service %q status: installed=%v, enabledAtStartup=%v, active=%v", + s.Name(), installed, enabled, active, + ) + return installed, enabled, active, nil } + logger.Debugf("snap service %q not found in services output", s.Name()) return false, false, false, nil } // Start starts the service, returning nil when successful. // If the service is already running, Start does not restart it. func (s Service) Start() error { + logger.Infof("starting snap service %q", s.name) running, err := s.Running() if err != nil { return errors.Trace(err) } if running { + logger.Debugf("snap service %q is already running, skipping start", s.name) return nil } commands, err := s.StartCommands() if err != nil { + logger.Errorf("failed to get start commands for snap service %q: %v", s.name, err) return errors.Trace(err) } - for _, command := range commands { + logger.Infof("executing %d start commands for snap service %q", len(commands), s.name) + for i, command := range commands { + logger.Infof( + "executing start command %d/%d for snap service %q: %q", + i, len(commands), s.name, command, + ) commandParts := strings.Fields(command) out, err := utils.RunCommand(commandParts[0], commandParts[1:]...) if err != nil { if strings.Contains(out, "has no services") { + logger.Debugf("snap %q has no services, skipping command %q", s.name, command) continue } + logger.Errorf( + "start command failed for snap service %q: %q -> %v (output: %v)", + s.name, command, err, out, + ) return errors.Annotatef(err, "%v -> %v", command, out) } + logger.Debugf( + "start command %d/%d completed successfully for snap service %q (output: %q)", + i, len(commands), s.name, out, + ) } + logger.Infof("successfully started snap service %q", s.name) return nil } // Stop stops a running service. Returns nil when the underlying // call to `snap stop <service-name>` exits with error code 0. func (s Service) Stop() error { + logger.Infof("stopping snap service %q", s.name) running, err := s.Running() if err != nil { return errors.Trace(err) } if !running { + logger.Debugf("snap service %q is not running, skipping stop", s.name) return nil } args := []string{"stop", s.Name()} - return s.execThenExpect(args, "Stopped.") + if err := s.execThenExpect(args, "Stopped."); err != nil { + logger.Errorf("failed to stop snap service %q: %v", s.name, err) + return err + } + logger.Infof("successfully stopped snap service %q", s.name) + return nil } // Restart restarts the service, or starts if it's not currently // running. // // Restart is part of the service.RestartableService interface func (s Service) Restart() error { + logger.Infof("restarting snap service %q", s.name) args := []string{"restart", s.Name()} - return s.execThenExpect(args, "Restarted.") + if err := s.execThenExpect(args, "Restarted."); err != nil { + logger.Errorf("failed to restart snap service %q: %v", s.name, err) + return err + } + logger.Infof("successfully restarted snap service %q", s.name) + return nil } // execThenExpect calls `snap <commandArgs>...` and then checks // stdout against expectation and snap's exit code. When there's a // mismatch or non-0 exit code, execThenExpect returns an error. func (s Service) execThenExpect(commandArgs []string, expectation string) error { + logger.Debugf("executing snap command %v, expecting %q", commandArgs, expectation) out, err := s.runCommand(commandArgs...) if err != nil { + logger.Errorf("snap command %v failed: %v", commandArgs, err) return errors.Trace(err) } if !strings.Contains(out, expectation) { + logger.Errorf( + "snap command %v: expected %q in output, got %q", + commandArgs, expectation, out, + ) return errors.Annotatef(err, `expected "%s", got "%s"`, expectation, out) } + logger.Debugf("snap command %v output matched expectation %q", commandArgs, expectation) return nil } @@ -433,18 +552,35 @@ func (s Service) runCommand(args ...string) (string, error) { } func (s Service) runCommandWithRetry(args ...string) (res string, err error) { + const delay = 5 * time.Second + const attempts = 2 + logger.Debugf( + "running snap command with retry: %v (delay=%v, attempts=%v)", + args, delay, attempts, + ) + attempt := 0 if resErr := retry.Call(retry.CallArgs{ Clock: s.clock, Func: func() error { + attempt++ + logger.Debugf("snap command attempt %d: %v", attempt, args) res, err = s.runCommand(args...) + if err != nil { + logger.Warningf( + "snap command attempt %d failed: %v (output: %q)", + attempt, err, res, + ) + } return errors.Trace(err) }, - Delay: 5 * time.Second, - Attempts: 2, + Delay: delay, + Attempts: attempts, }); resErr != nil { + logger.Errorf("snap command %v failed after %d attempts: %v", args, attempt, resErr) return "", errors.Trace(resErr) } + logger.Debugf("snap command %v succeeded on attempt %d (output: %q)", args, attempt, res) // Named args are set via the retry. return }
snap/snapcraft.yaml+1 −1 modified@@ -1,5 +1,5 @@ name: juju -version: 3.6.16 +version: 3.6.20 summary: Juju - a model-driven operator lifecycle manager for K8s and machines license: AGPL-3.0 description: |
state/allcollections.go+21 −1 modified@@ -592,6 +592,12 @@ func allCollections() CollectionSchema { }}, }, + secretReservationsC: { + indexes: []mgo.Index{{ + Key: []string{"model-uuid", "owner-tag"}, + }}, + }, + secretRevisionsC: { indexes: []mgo.Index{ {Key: []string{"model-uuid", "_id", "revision"}}, @@ -635,7 +641,13 @@ func allCollections() CollectionSchema { secretBackendsRotateC: { global: true, indexes: []mgo.Index{{ - Key: []string{"model-uuid"}, + Key: []string{"name"}, + }}, + }, + + secretBackendIssuedTokensC: { + indexes: []mgo.Index{{ + Key: []string{"model-uuid", "expire-time", "consumer-tag"}, }}, }, @@ -773,6 +785,14 @@ const ( secretRotateC = "secretRotate" secretBackendsC = "secretBackends" secretBackendsRotateC = "secretBackendsRotate" + // secretBackendIssuedTokensC define external token names, their expiry and + // which backend they originate from. Once these expire, they must be + // cleaned up, if they have not already been cleaned up. + secretBackendIssuedTokensC = "secretBackendIssuedTokens" + // secretReservationsC define pre-allocated secret IDs that a unit can use + // to create a secret. These must be recorded to ensure they are included in + // the issued token ACLs for external backends. + secretReservationsC = "secretReservations" ) // watcherIgnoreList contains all the collections in mongo that should not be watched by the
state/application.go+15 −0 modified@@ -3020,6 +3020,10 @@ func (a *Application) removeUnitOps(u *Unit, asserts bson.D, op *ForcedOperation if op.FatalError(err) { return nil, errors.Trace(err) } + secretReservationOps, err := a.st.removeSecretReservationOps(u.Tag()) + if op.FatalError(err) { + return nil, errors.Trace(err) + } observedFieldsMatch := bson.D{ {"charmurl", u.doc.CharmURL}, @@ -3049,6 +3053,7 @@ func (a *Application) removeUnitOps(u *Unit, asserts bson.D, op *ForcedOperation ops = append(ops, secretConsumerPermissionsOps...) ops = append(ops, secretOwnerLabelOps...) ops = append(ops, secretConsumerLabelOps...) + ops = append(ops, secretReservationOps...) m, err := a.st.Model() if err != nil { @@ -3711,6 +3716,16 @@ func (a *Application) Status() (status.StatusInfo, error) { return info, nil } +// OperatorStatus returns the status of the application's operator, which is +// only used on CAAS models. +func (a *Application) OperatorStatus() (status.StatusInfo, error) { + info, err := getStatus(a.st.db(), a.globalKey(), "operator") + if err != nil { + return status.StatusInfo{}, errors.Trace(err) + } + return info, nil +} + // CheckApplicationExpectsWorkload checks if the application expects workload or not. func CheckApplicationExpectsWorkload(m *Model, appName string) (bool, error) { cm, err := m.CAASModel()
state/application_test.go+7 −3 modified@@ -4175,19 +4175,23 @@ func (s *ApplicationSuite) TestWatchStorageConstraints(c *gc.C) { Charm: newCh, CharmOrigin: defaultCharmOrigin(newCh.URL()), } - err = app.SetCharm(cfg) + // Use a fresh Application for updates so the watcher goroutine and + // this test do not concurrently read/write the same application doc. + appForUpdates, err := s.State.Application(app.Name()) + c.Assert(err, jc.ErrorIsNil) + err = appForUpdates.SetCharm(cfg) c.Assert(err, jc.ErrorIsNil) // Make another change, check one event. constraints = map[string]state.StorageConstraints{ "data0": {Count: 6, Size: 2048, Pool: "loop"}, } - err = app.UpdateStorageConstraints(constraints) + err = appForUpdates.UpdateStorageConstraints(constraints) c.Assert(err, jc.ErrorIsNil) appWc.AssertOneChange() // Check the watcher does not react when the content remains the same. - err = app.UpdateStorageConstraints(constraints) + err = appForUpdates.UpdateStorageConstraints(constraints) c.Assert(err, jc.ErrorIsNil) appWc.AssertNoChange()
state/controller_test.go+0 −3 modified@@ -9,7 +9,6 @@ import ( "github.com/juju/clock" "github.com/juju/collections/set" "github.com/juju/errors" - mgotesting "github.com/juju/mgo/v3/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -104,8 +103,6 @@ func (s *ControllerSuite) TestControllerConfig(c *gc.C) { func (s *ControllerSuite) TestPing(c *gc.C) { c.Assert(s.Controller.Ping(), gc.IsNil) - mgotesting.MgoServer.Restart() - c.Assert(s.Controller.Ping(), gc.NotNil) } func (s *ControllerSuite) TestUpdateControllerConfig(c *gc.C) {
state/migration_export_test.go+0 −1 modified@@ -878,7 +878,6 @@ func (s *MigrationExportSuite) TestCharmDataMigrated(c *gc.C) { Name: "all-charm-data", Series: "jammy", }) - fmt.Printf("%#v", ch.Meta()) f.MakeApplication(c, &factory.ApplicationParams{ Charm: ch,
state/migration_internal_test.go+9 −0 modified@@ -213,6 +213,15 @@ func (s *MigrationSuite) TestKnownCollections(c *gc.C) { // sshConnRequestsC is a new collection and doesn't need to be // migrated. sshConnRequestsC, + + // secretBackendIssuedTokensC does not need to be migrated as the units + // using the tokens will only be migrated after they have been quiesced. + secretBackendIssuedTokensC, + + // secretReservationsC does not need to be migrated as the reservations + // are only valid for the duration of a hook call, units are not running + // a hook when they are quiesced for migration. + secretReservationsC, ) // THIS SET WILL BE REMOVED WHEN MIGRATIONS ARE COMPLETE
state/secrets.go+687 −18 modified@@ -6,6 +6,7 @@ package state import ( "fmt" "regexp" + "slices" "strconv" "strings" "time" @@ -22,6 +23,7 @@ import ( "github.com/juju/juju/core/leadership" "github.com/juju/juju/core/secrets" corewatcher "github.com/juju/juju/core/watcher" + "github.com/juju/juju/mongo" "github.com/juju/juju/mongo/utils" "github.com/juju/juju/state/watcher" ) @@ -75,8 +77,8 @@ type ChangeSecretBackendParams struct { // SecretsFilter holds attributes to match when listing secrets. type SecretsFilter struct { - URI *secrets.URI - Label *string + URIs []*secrets.URI + Labels []string OwnerTags []names.Tag ConsumerTags []names.Tag } @@ -88,6 +90,7 @@ type SecretsStore interface { DeleteSecret(*secrets.URI, ...int) ([]secrets.ValueRef, error) GetSecret(*secrets.URI) (*secrets.SecretMetadata, error) GetSecretValue(*secrets.URI, int) (secrets.SecretValue, *secrets.ValueRef, error) + ListReservedSecrets([]names.Tag) ([]*secrets.URI, error) ListSecrets(SecretsFilter) ([]*secrets.SecretMetadata, error) ListModelSecrets(bool) (map[string]set.Strings, error) ListSecretRevisions(uri *secrets.URI) ([]*secrets.SecretRevisionMetadata, error) @@ -120,13 +123,70 @@ type SecretsStore interface { GetOwnedSecretRevisionsByIDAsLeaderUnit( unit names.UnitTag, uri *secrets.URI, ) ([]int, error) + + // ReserveSecret sets aside the provided secret id for the given owner. This + // secret ID can then only be used by the given owner to create a secret. Future + // versions of Juju should replace this with a token based approach to cease the + // need for a database entry. + ReserveSecret(uri *secrets.URI, owner names.Tag) error + + // CreateSecretBackendIssuedToken inserts the given secret backend issued token + // into state. An error is returned if the consumer's life is Dead or if the + // secret backend issued token already exists. + CreateSecretBackendIssuedToken( + token SecretBackendIssuedToken, + ) error + + // NextSecretBackendIssuedTokenExpiry returns the time of the next secret + // backend issued token expiry. + NextSecretBackendIssuedTokenExpiry() (time.Time, error) + + // ListSecretBackendIssuedTokenUntil returns all the issued secret backend + // tokens that are valid until the given time. + ListSecretBackendIssuedTokenUntil( + until time.Time, + ) ([]SecretBackendIssuedToken, error) + + // ListSecretBackendIssuedTokenUntilForConsumer returns the issued secret + // backend tokens for the given secret backend token consumer tag that are valid + // until the given time. + ListSecretBackendIssuedTokenUntilForConsumer( + until time.Time, consumer names.Tag, + ) ([]SecretBackendIssuedToken, error) + + // RemoveSecretBackendIssuedTokens removes the secret backend tokens for the + // given secret backend token UUIDs. + RemoveSecretBackendIssuedTokens(uuids []string) error + + // ExpireSecretBackendIssuedTokensForConsumer returns a ModelOperation that + // sets the expire time of all currently non-expired secret backend issued + // tokens for the given consumer to now. + ExpireSecretBackendIssuedTokensForConsumer(consumer names.Tag) ModelOperation + + // RemoveSecretReservations removes all secret reservations that are held by + // the provided owner. + RemoveSecretReservations(owner names.Tag) ModelOperation + + // WatchSecretBackendIssuedTokenExpiry returns a state strings watcher that + // is fired when there is new secret backend tokens that must expire at the + // RFC3339 encoded timestamp in the string. + WatchSecretBackendIssuedTokenExpiry() StringsWatcher } // NewSecrets creates a new mongo backed secrets store. func NewSecrets(st *State) *secretsStore { return &secretsStore{st: st} } +// secretReservationDoc is the bson representation for a secret reservation. +type secretReservationDoc struct { + DocID string `bson:"_id"` + + OwnerTag string `bson:"owner-tag"` + + CreateTime time.Time `bson:"create-time"` +} + type secretMetadataDoc struct { DocID string `bson:"_id"` @@ -300,6 +360,187 @@ func (s *secretsStore) secretRevisionDoc(uri *secrets.URI, owner string, revisio return doc } +// ReserveSecret sets aside the provided secret id for the given owner. This +// secret ID can then only be used by the given owner to create a secret. Future +// versions of Juju should replace this with a token based approach to cease the +// need for a database entry. +func (s *secretsStore) ReserveSecret(uri *secrets.URI, owner names.Tag) error { + if uri == nil || owner == nil { + return errors.New("cannot reserve secret") + } + entity, scopeCollName, scopeDocID, err := s.st.findSecretEntity(owner) + if err != nil { + return errors.Annotate(err, "invalid owner reference") + } + if entity.Life() != Alive { + return errors.Errorf( + "cannot reserve secret for owner %q which is not alive", owner) + } + + secretMetadataCollection, closer := s.st.db().GetCollection(secretMetadataC) + defer closer() + metadata := struct { + DocID string `bson:"_id"` + }{} + err = secretMetadataCollection.FindId(uri.ID).One(&metadata) + if err != nil && !errors.Is(err, mgo.ErrNotFound) { + return errors.Annotatef( + err, "checking existence of secret %q before reserving", uri.ID, + ) + } else if err == nil { + return errors.AlreadyExistsf("secret %q", uri.ID) + } + + doc := secretReservationDoc{ + DocID: uri.ID, + OwnerTag: owner.String(), + CreateTime: s.st.nowToTheSecond(), + } + isOwnerAliveOp := txn.Op{ + C: scopeCollName, + Id: scopeDocID, + Assert: isAliveDoc, + } + buildTxn := func(attempt int) ([]txn.Op, error) { + current, err := s.getSecretReservation(uri) + if err != nil && !errors.Is(err, errors.NotFound) { + return nil, err + } else if err == nil { + if current.OwnerTag == owner.String() { + return nil, jujutxn.ErrNoOperations + } + return nil, errors.BadRequestf( + "secret %q is already reserved", uri.ID, + ) + } + + err = secretMetadataCollection.FindId(uri.ID).One(&metadata) + if err != nil && !errors.Is(err, mgo.ErrNotFound) { + return nil, errors.Annotatef( + err, "checking existence of secret %q before reserving", + uri.ID, + ) + } else if err == nil { + return nil, errors.AlreadyExistsf("secret %q", uri.ID) + } + + ops := []txn.Op{ + isOwnerAliveOp, + } + ops = append(ops, txn.Op{ + C: secretMetadataC, + Id: uri.ID, + Assert: txn.DocMissing, + }, txn.Op{ + C: secretReservationsC, + Id: uri.ID, + Assert: txn.DocMissing, + Insert: doc, + }) + return ops, nil + } + err = s.st.db().Run(buildTxn) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// ListReservedSecrets finds all reserved secret URIs for the provided owners. +func (s *secretsStore) ListReservedSecrets( + ownerTags []names.Tag, +) ([]*secrets.URI, error) { + if len(ownerTags) == 0 { + return nil, errors.NotValidf("no owner tags provided") + } + + var wantTags []string + for _, v := range ownerTags { + wantTags = append(wantTags, v.String()) + } + + secretReservationColl, closer := s.st.db().GetCollection(secretReservationsC) + defer closer() + + res := secretReservationColl.Find(bson.M{ + "owner-tag": bson.M{"$in": wantTags}, + }).Iter() + + var secretURIs []*secrets.URI + + var doc secretReservationDoc + for res.Next(&doc) { + uri, err := secrets.ParseURI(s.st.localID(doc.DocID)) + if err != nil { + return nil, errors.Trace(err) + } + secretURIs = append(secretURIs, uri) + } + + return secretURIs, errors.Trace(res.Close()) +} + +// getSecretReservation returns the secretReservationDoc for the given secret ID +// if it is found, otherwise it returns an [errors.NotFound]. +func (s *secretsStore) getSecretReservation(uri *secrets.URI) (secretReservationDoc, error) { + var doc secretReservationDoc + secretReservationColl, closer := s.st.db().GetCollection(secretReservationsC) + defer closer() + err := secretReservationColl.FindId(uri.ID).One(&doc) + if errors.Is(err, mgo.ErrNotFound) { + return secretReservationDoc{}, errors.NotFound + } else if err != nil { + return secretReservationDoc{}, errors.Annotatef(err, "cannot get secret reservation %q", uri.ID) + } + return doc, nil +} + +// removeSecretReservationsModelOp is a model operation for removing a secret +// reservation. +type removeSecretReservationsModelOp struct { + s *secretsStore + owner names.Tag +} + +// RemoveSecretReservations removes all secret reservations that are held by +// the provided owner. +func (s *secretsStore) RemoveSecretReservations(owner names.Tag) ModelOperation { + return &removeSecretReservationsModelOp{ + s: s, + owner: owner, + } +} + +func (op *removeSecretReservationsModelOp) Build(attempt int) ([]txn.Op, error) { + return op.s.st.removeSecretReservationOps(op.owner) +} + +func (op *removeSecretReservationsModelOp) Done(err error) error { + return err +} + +// removeSecretReservationOps returns the mongo operations to remove all secret +// reservations for the given owner. +func (st *State) removeSecretReservationOps(owner names.Tag) ([]txn.Op, error) { + secretReservationColl, closer := st.db().GetCollection(secretReservationsC) + defer closer() + + var ops []txn.Op + res := secretReservationColl.Find(bson.M{ + "owner-tag": owner.String(), + }).Iter() + defer closer() + var doc secretReservationDoc + for res.Next(&doc) { + ops = append(ops, txn.Op{ + C: secretReservationsC, + Id: doc.DocID, + Remove: true, + }) + } + return ops, errors.Trace(res.Close()) +} + // CreateSecret creates a new secret. func (s *secretsStore) CreateSecret(uri *secrets.URI, p CreateSecretParams) (*secrets.SecretMetadata, error) { if len(p.Data) == 0 && p.ValueRef == nil { @@ -326,6 +567,43 @@ func (s *secretsStore) CreateSecret(uri *secrets.URI, p CreateSecretParams) (*se Assert: isAliveDoc, } + // checkReservation ensures that if a reservation exists, the secret is + // being created by the reservation owner. + checkReservation := func() (bool, any, error) { + reservation, err := s.getSecretReservation(uri) + if errors.Is(err, errors.NotFound) { + return false, txn.DocMissing, nil + } else if err != nil { + return false, nil, errors.Trace(err) + } + reservationOwner, err := names.ParseTag(reservation.OwnerTag) + if err != nil { + return false, nil, errors.Annotate(err, "parsing secret reservation owner") + } + if owner.Kind() == names.ApplicationTagKind && + reservationOwner.Kind() == names.UnitTagKind { + // If a unit reserved a secret, we cannot determine it was expected + // to be used by an application secret or a unit secret. + app, _ := names.UnitApplication(reservationOwner.Id()) + if app != owner.Id() { + return false, nil, errors.NotValidf( + "cannot create secret %q because %q has reserved it instead of %q", + uri.ID, reservation.OwnerTag, metadataDoc.OwnerTag, + ) + } + } else if reservationOwner != owner { + return false, nil, errors.NotValidf( + "cannot create secret %q because %q has reserved it instead of %q", + uri.ID, reservation.OwnerTag, metadataDoc.OwnerTag, + ) + } + return true, bson.D{{"owner-tag", reservation.OwnerTag}}, nil + } + hasReservation, reservationAssertion, err := checkReservation() + if err != nil { + return nil, errors.Trace(err) + } + buildTxn := func(attempt int) ([]txn.Op, error) { var ops []txn.Op if p.Label != nil { @@ -339,9 +617,18 @@ func (s *secretsStore) CreateSecret(uri *secrets.URI, p CreateSecretParams) (*se if _, _, err := s.getSecretValue(uri, revision, false); err == nil { return nil, errors.AlreadyExistsf("secret value for %q", uri.String()) } + hasReservation, reservationAssertion, err = checkReservation() + if err != nil { + return nil, errors.Trace(err) + } } ops = append(ops, []txn.Op{ { + C: secretReservationsC, + Id: metadataDoc.DocID, + Assert: reservationAssertion, + Remove: hasReservation, + }, { C: secretMetadataC, Id: metadataDoc.DocID, Assert: txn.DocMissing, @@ -959,11 +1246,15 @@ func (s *secretsStore) ListSecrets(filter SecretsFilter) ([]*secrets.SecretMetad var docs []secretMetadataDoc q := bson.D{} - if filter.URI != nil { - q = append(q, bson.DocElem{"_id", filter.URI.ID}) + if len(filter.URIs) != 0 { + ids := make([]string, 0, len(filter.URIs)) + for _, uri := range filter.URIs { + ids = append(ids, uri.ID) + } + q = append(q, bson.DocElem{"_id", bson.D{{"$in", ids}}}) } - if filter.Label != nil { - q = append(q, bson.DocElem{"label", *filter.Label}) + if len(filter.Labels) != 0 { + q = append(q, bson.DocElem{"label", bson.D{{"$in", filter.Labels}}}) } if len(filter.OwnerTags) > 0 { owners := make([]string, len(filter.OwnerTags)) @@ -3006,8 +3297,19 @@ func (st *State) findSecretEntity(tag names.Tag) (entity Lifer, collName, docID docID = id case names.UnitTag: entity, err = st.Unit(id) - collName = unitsC - docID = id + if err == nil { + docID = id + collName = unitsC + } else if errors.IsNotFound(err) { + // If this unit is from a remote application, find that instead. + id, err = names.UnitApplication(id) + if err != nil { + return nil, "", "", err + } + entity, err = st.RemoteApplication(id) + docID = id + collName = remoteApplicationsC + } case names.ApplicationTag: entity, err = st.Application(id) docID = id @@ -3077,19 +3379,12 @@ func (st *State) GrantSecretAccess(uri *secrets.URI, p SecretAccessParams) (err return errors.Errorf("cannot grant access to secret in scope of %q which is not alive", p.Scope) } subjectEntity, subjectCollName, subjectDocID, err := st.findSecretEntity(p.Subject) - if p.Subject.Kind() == names.UnitTagKind && errors.Is(err, errors.NotFound) { - unitApp, _ := names.UnitApplication(p.Subject.Id()) - _, err2 := st.RemoteApplication(unitApp) - if err2 != nil && !errors.Is(err2, errors.NotFound) { - return errors.Trace(err2) - } - if err2 == nil { - return errors.NotSupportedf("sharing secrets with a unit across a cross model relation") - } - } if err != nil { return errors.Annotate(err, "invalid subject reference") } + if p.Subject.Kind() == names.UnitTagKind && subjectCollName == remoteApplicationsC { + return errors.NotSupportedf("sharing secrets with a unit across a cross model relation") + } if subjectEntity.Life() != Alive { return errors.Errorf("cannot grant dying %q access to secret", p.Subject) } @@ -3717,3 +4012,377 @@ func (w *secretsExpiryWatcher) loop() (err error) { } } } + +type secretIssuedTokenDoc struct { + DocID string `bson:"_id"` + + ConsumerTag string `bson:"consumer-tag"` + ExpireTime time.Time `bson:"expire-time"` + BackendID string `bson:"backend-id"` +} + +// CreateSecretBackendIssuedToken inserts the given secret backend issued token +// into state. An error is returned if the consumer's life is Dead or if the +// secret backend issued token already exists. +func (s *secretsStore) CreateSecretBackendIssuedToken( + token SecretBackendIssuedToken, +) error { + if token.UUID == "" || + token.ExpireTime.IsZero() || + token.BackendID == "" || + token.Consumer == nil { + return errors.New( + "creating secret backend issued token failed due to missing values", + ) + } + tokenColl, closer := s.st.db().GetCollection(secretBackendIssuedTokensC) + defer closer() + + check := func() (string, string, error) { + entity, entityC, entityDocID, err := s.st.findSecretEntity(token.Consumer) + if err != nil { + return "", "", errors.Annotate(err, "invalid secret token receiver") + } + if entity.Life() == Dead { + return "", "", errors.New( + "creating secret backend issued token: consumer is dead", + ) + } + existing := secretIssuedTokenDoc{} + err = tokenColl.FindId(token.UUID).One(&existing) + if errors.Is(err, mgo.ErrNotFound) { + return entityC, entityDocID, nil + } else if err != nil { + return "", "", errors.Annotate( + err, "checking existing secret issued backend token", + ) + } + if existing.ConsumerTag != token.Consumer.String() { + return "", "", errors.Unauthorizedf( + "%s secret issued backend token", token.UUID, + ) + } + return entityC, entityDocID, errors.AlreadyExists + } + entityC, entityDocID, err := check() + if errors.Is(err, errors.AlreadyExists) { + return nil + } else if err != nil { + return errors.Trace(err) + } + + doc := secretIssuedTokenDoc{ + DocID: token.UUID, + BackendID: token.BackendID, + ExpireTime: token.ExpireTime.UTC().Truncate(time.Second), + ConsumerTag: token.Consumer.String(), + } + buildTxn := func(attempt int) ([]txn.Op, error) { + var err error + if attempt > 0 { + entityC, entityDocID, err = check() + if errors.Is(err, errors.AlreadyExists) { + return nil, jujutxn.ErrNoOperations + } else if err != nil { + return nil, errors.Trace(err) + } + } + ops := []txn.Op{{ + C: entityC, + Id: entityDocID, + Assert: notDeadDoc, + }, { + C: secretBackendIssuedTokensC, + Id: doc.DocID, + Assert: txn.DocMissing, + Insert: doc, + }} + return ops, nil + } + err = s.st.db().Run(buildTxn) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// SecretBackendIssuedToken holds metadata about a secret backend authentication +// token that was issued for a specific consumer. The token is not contained in +// this metadata, but does have a unique identifier that was used when the token +// was issued. This allows a secret backend provider to clean up issued tokens. +type SecretBackendIssuedToken struct { + // UUID must be a random UUID that was used when the secret backend issued + // token was created, allowing the provider to find this token when it will + // expire. + UUID string + + // ExpireTime is the point in the future where the token that was created by + // the secret backend will expire and needs to be cleaned up. + ExpireTime time.Time + + // BackendID is the ID of the secret backend that this token was created by. + BackendID string + + // Consumer is a tag that represents the entity which the secret backend + // issued token was created for. + Consumer names.Tag +} + +// NextSecretBackendIssuedTokenExpiry returns the time of the next secret +// backend issued token expiry. +func (s *secretsStore) NextSecretBackendIssuedTokenExpiry() (time.Time, error) { + collection, closer := s.st.db().GetCollection(secretBackendIssuedTokensC) + defer closer() + + var doc secretIssuedTokenDoc + err := collection.Find(nil).Sort("expire-time").One(&doc) + if errors.Is(err, mgo.ErrNotFound) { + return time.Time{}, nil + } else if err != nil { + return time.Time{}, errors.Annotate( + err, "getting next secret backend issued token expiry time", + ) + } + return doc.ExpireTime, nil +} + +// ListSecretBackendIssuedTokenUntil returns all the issued secret backend +// tokens that are valid until the given time. +func (s *secretsStore) ListSecretBackendIssuedTokenUntil( + until time.Time, +) ([]SecretBackendIssuedToken, error) { + return s.listSecretBackendIssuedTokenUntil(bson.M{ + "expire-time": bson.M{"$lte": until.UTC().Truncate(time.Second)}, + }) +} + +// ListSecretBackendIssuedTokenUntilForConsumer returns the issued secret +// backend tokens for the given secret backend token consumer tag that are valid +// until the given time. +func (s *secretsStore) ListSecretBackendIssuedTokenUntilForConsumer( + until time.Time, consumer names.Tag, +) ([]SecretBackendIssuedToken, error) { + return s.listSecretBackendIssuedTokenUntil(bson.M{ + "expire-time": bson.M{"$lte": until.UTC().Truncate(time.Second)}, + "consumer-tag": consumer.String(), + }) +} + +// listSecretBackendIssuedTokenUntil lists all the secret backend tokens up to +// the given query, sorted by expire-time (oldest to newest). +func (s *secretsStore) listSecretBackendIssuedTokenUntil( + query any, +) ([]SecretBackendIssuedToken, error) { + collection, closer := s.st.db().GetCollection(secretBackendIssuedTokensC) + defer closer() + + iter := collection.Find(query).Sort("expire-time").Iter() + + var res []SecretBackendIssuedToken + + var doc secretIssuedTokenDoc + for iter.Next(&doc) { + uuid := s.st.localID(doc.DocID) + consumerTag, err := names.ParseTag(doc.ConsumerTag) + if err != nil { + return nil, errors.Annotatef( + err, "invalid consumer tag for secret backend issued token %q", + uuid, + ) + } + res = append(res, SecretBackendIssuedToken{ + UUID: uuid, + ExpireTime: doc.ExpireTime, + BackendID: doc.BackendID, + Consumer: consumerTag, + }) + } + + return res, nil +} + +// RemoveSecretBackendIssuedTokens removes the secret backend tokens for the +// given secret backend token UUIDs. +func (s *secretsStore) RemoveSecretBackendIssuedTokens(uuids []string) error { + if len(uuids) == 0 { + return nil + } + + ops := make([]txn.Op, 0, len(uuids)) + for _, uuid := range uuids { + ops = append(ops, txn.Op{ + C: secretBackendIssuedTokensC, + Id: uuid, + Remove: true, + }) + } + + err := s.st.db().RunTransaction(ops) + if err != nil { + return errors.Trace(err) + } + + return nil +} + +// WatchSecretBackendIssuedTokenExpiry returns a state strings watcher that +// is fired when there is new secret backend tokens that must expire at the +// RFC3339 encoded timestamp in the string. +func (s *secretsStore) WatchSecretBackendIssuedTokenExpiry() StringsWatcher { + return newSecretBackendIssuedTokenExpiryWatcher(s.st) +} + +// secretBackendIssuedTokenExpiryWatcher reports expiry times of new issued +// tokens as an RFC3339 timestamp string. +type secretBackendIssuedTokenExpiryWatcher struct { + commonWatcher + coll func() (mongo.Collection, func()) + out chan []string +} + +var _ Watcher = (*secretBackendIssuedTokenExpiryWatcher)(nil) + +// newSecretBackendIssuedTokenExpiryWatcher returns a state strings watcher that +// is fired when there is new secret backend tokens that must expire at the +// RFC3339 encoded timestamp in the string. +func newSecretBackendIssuedTokenExpiryWatcher(st *State) StringsWatcher { + w := &secretBackendIssuedTokenExpiryWatcher{ + commonWatcher: newCommonWatcher(st), + coll: collFactory(st.db(), secretBackendIssuedTokensC), + out: make(chan []string), + } + w.tomb.Go(func() error { + defer close(w.out) + return w.loop() + }) + return w +} + +// Changes returns the strings channel for the watcher. +func (w *secretBackendIssuedTokenExpiryWatcher) Changes() <-chan []string { + return w.out +} + +// initial returns the initial changes for the expiry watcher. Each string is an +// RFC3339 encoded timestamp. +func (w *secretBackendIssuedTokenExpiryWatcher) initial() ([]string, error) { + coll, closer := w.coll() + defer closer() + var expireTimes []time.Time + err := coll.Find(nil).Distinct("expire-time", &expireTimes) + if err != nil { + return nil, errors.Trace(err) + } + var changes []string + for _, ts := range expireTimes { + changes = append(changes, ts.UTC().Format(time.RFC3339)) + } + return changes, nil +} + +// loop watches the secretBackendIssuedTokens collection for new expire-time +// values and sends them on the watcher as RFC3339 encoded timestamps. +func (w *secretBackendIssuedTokenExpiryWatcher) loop() error { + in := make(chan watcher.Change) + + w.watcher.WatchCollection(secretBackendIssuedTokensC, in) + defer w.watcher.UnwatchCollection(secretBackendIssuedTokensC, in) + + changes, err := w.initial() + if err != nil { + return errors.Trace(err) + } + + coll, closer := w.coll() + defer closer() + + // out is not-nil when a notification should be sent, it is set to the + // output initially to ensure an initial notification is sent. + out := w.out + for { + select { + case <-w.watcher.Dead(): + return stateWatcherDeadError(w.watcher.Err()) + case <-w.tomb.Dying(): + return tomb.ErrDying + case change := <-in: + if change.Revno < 0 { + continue + } + var doc struct { + ExpireTime time.Time `bson:"expire-time"` + } + err := coll.FindId(change.Id).One(&doc) + if errors.Is(err, mgo.ErrNotFound) { + continue + } else if err != nil { + return errors.Trace(err) + } + out = w.out + changes = append(changes, doc.ExpireTime.UTC().Format(time.RFC3339)) + slices.Sort(changes) + changes = slices.Compact(changes) + case out <- changes: + out = nil + changes = nil + } + } +} + +// expireSecretBackendIssuedTokensModelOp is a model operation for expiring all +// secret backend issued tokens for the given consumer. +type expireSecretBackendIssuedTokensModelOp struct { + s *secretsStore + consumer names.Tag +} + +// ExpireSecretBackendIssuedTokensForConsumer returns a ModelOperation that +// sets the expire time of all currently non-expired secret backend issued +// tokens for the given consumer to now. +func (s *secretsStore) ExpireSecretBackendIssuedTokensForConsumer( + consumer names.Tag, +) ModelOperation { + return &expireSecretBackendIssuedTokensModelOp{ + s: s, + consumer: consumer, + } +} + +func (op *expireSecretBackendIssuedTokensModelOp) Build( + attempt int, +) ([]txn.Op, error) { + return op.s.expireSecretBackendIssuedTokensOps(op.consumer) +} + +func (op *expireSecretBackendIssuedTokensModelOp) Done(err error) error { + return err +} + +// expireSecretBackendIssuedTokensOps returns the operations to remove all the +// currently known secret backend issued tokens. +func (s *secretsStore) expireSecretBackendIssuedTokensOps( + consumer names.Tag, +) ([]txn.Op, error) { + coll, closer := s.st.db().GetCollection(secretBackendIssuedTokensC) + defer closer() + + now := s.st.clock().Now().UTC().Truncate(time.Second) + + var ops []txn.Op + res := coll.Find(bson.M{ + "consumer-tag": consumer.String(), + }).Iter() + defer closer() + var doc secretIssuedTokenDoc + for res.Next(&doc) { + if doc.ExpireTime.Before(now) { + continue + } + ops = append(ops, txn.Op{ + C: secretBackendIssuedTokensC, + Id: doc.DocID, + Update: bson.D{{"$set", bson.D{{"expire-time", now}}}}, + }) + } + return ops, errors.Trace(res.Close()) +}
state/secrets_test.go+756 −2 modified@@ -5,9 +5,12 @@ package state_test import ( "fmt" + "slices" "sort" + "strings" "time" + "github.com/google/uuid" "github.com/juju/charm/v12" "github.com/juju/collections/set" "github.com/juju/errors" @@ -60,6 +63,94 @@ func ptr[T any](v T) *T { return &v } +func (s *SecretsSuite) TestReserveSecret(c *gc.C) { + uri := secrets.NewURI() + + err := s.store.ReserveSecret(uri, s.owner.Tag()) + c.Assert(err, jc.ErrorIsNil) + + // Idempotent + err = s.store.ReserveSecret(uri, s.owner.Tag()) + c.Assert(err, jc.ErrorIsNil) + + // Already reserved + verboten := s.Factory.MakeApplication(c, &factory.ApplicationParams{ + Name: "someone-else", + }) + err = s.store.ReserveSecret(uri, verboten.Tag()) + c.Assert(err, jc.ErrorIs, errors.BadRequest) +} + +func (s *SecretsSuite) TestListReservedSecret(c *gc.C) { + uris := []*secrets.URI{ + secrets.NewURI(), + secrets.NewURI(), + secrets.NewURI(), + } + + otherApp := s.Factory.MakeApplication(c, &factory.ApplicationParams{ + Name: "someone-else", + }) + + err := s.store.ReserveSecret(uris[0], s.owner.Tag()) + c.Assert(err, jc.ErrorIsNil) + err = s.store.ReserveSecret(uris[1], s.ownerUnit.Tag()) + c.Assert(err, jc.ErrorIsNil) + err = s.store.ReserveSecret(uris[2], otherApp.Tag()) + c.Assert(err, jc.ErrorIsNil) + + reserved, err := s.store.ListReservedSecrets([]names.Tag{s.owner.Tag()}) + c.Assert(err, jc.ErrorIsNil) + c.Check(reserved, jc.SameContents, uris[0:1]) + + reserved, err = s.store.ListReservedSecrets([]names.Tag{s.ownerUnit.Tag()}) + c.Assert(err, jc.ErrorIsNil) + c.Check(reserved, jc.SameContents, uris[1:2]) + + reserved, err = s.store.ListReservedSecrets([]names.Tag{otherApp.Tag()}) + c.Assert(err, jc.ErrorIsNil) + c.Check(reserved, jc.SameContents, uris[2:3]) + + reserved, err = s.store.ListReservedSecrets([]names.Tag{ + s.owner.Tag(), s.ownerUnit.Tag(), otherApp.Tag(), + }) + c.Assert(err, jc.ErrorIsNil) + c.Check(reserved, jc.SameContents, uris) +} + +func (s *SecretsSuite) TestExchangeReservedSecret(c *gc.C) { + uri := secrets.NewURI() + + err := s.store.ReserveSecret(uri, s.owner.Tag()) + c.Assert(err, jc.ErrorIsNil) + + reserved, err := s.store.ListReservedSecrets([]names.Tag{s.owner.Tag()}) + c.Assert(err, jc.ErrorIsNil) + c.Check(reserved, jc.SameContents, []*secrets.URI{uri}) + + _, err = s.store.CreateSecret(uri, state.CreateSecretParams{ + Version: 1, + Owner: s.owner.Tag(), + UpdateSecretParams: state.UpdateSecretParams{ + LeaderToken: &fakeToken{}, + RotatePolicy: ptr(secrets.RotateNever), + Description: ptr("my secret"), + Label: ptr("foobar"), + Params: nil, + Data: map[string]string{"foo": "bar"}, + Checksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + }, + }) + c.Assert(err, jc.ErrorIsNil) + + err = s.store.ReserveSecret(uri, s.owner.Tag()) + c.Assert(err, jc.ErrorIs, errors.AlreadyExists) + + reserved, err = s.store.ListReservedSecrets([]names.Tag{s.owner.Tag()}) + c.Assert(err, jc.ErrorIsNil) + c.Check(reserved, jc.SameContents, []*secrets.URI{}) +} + func (s *SecretsSuite) TestCreate(c *gc.C) { uri := secrets.NewURI() now := s.Clock.Now().Round(time.Second).UTC() @@ -496,7 +587,7 @@ func (s *SecretsSuite) TestListByURI(c *gc.C) { c.Assert(err, jc.ErrorIsNil) list, err := s.store.ListSecrets(state.SecretsFilter{ - URI: uri, + URIs: []*secrets.URI{uri}, }) c.Assert(err, jc.ErrorIsNil) mc := jc.NewMultiChecker() @@ -518,6 +609,90 @@ func (s *SecretsSuite) TestListByURI(c *gc.C) { }}) } +func (s *SecretsSuite) TestListByURIs(c *gc.C) { + uris := []*secrets.URI{secrets.NewURI(), secrets.NewURI(), secrets.NewURI()} + now := s.Clock.Now().Round(time.Second).UTC() + next := now.Add(time.Minute).Round(time.Second).UTC() + expire := now.Add(time.Hour).Round(time.Second).UTC() + p := state.CreateSecretParams{ + Version: 1, + Owner: s.owner.Tag(), + UpdateSecretParams: state.UpdateSecretParams{ + LeaderToken: &fakeToken{}, + RotatePolicy: ptr(secrets.RotateDaily), + NextRotateTime: ptr(next), + Description: ptr("my secret"), + Label: ptr("foobar"), + ExpireTime: ptr(expire), + Params: nil, + Data: map[string]string{"foo": "bar"}, + Checksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + }, + } + for i, uri := range uris { + p.UpdateSecretParams.Label = ptr(fmt.Sprintf("foobar%d", i)) + _, err := s.store.CreateSecret(uri, p) + c.Assert(err, jc.ErrorIsNil) + } + + // Create another secret to ensure it is excluded. + uri2 := secrets.NewURI() + p.Owner = names.NewApplicationTag("wordpress") + _, err := s.store.CreateSecret(uri2, p) + c.Assert(err, jc.ErrorIsNil) + + list, err := s.store.ListSecrets(state.SecretsFilter{ + URIs: uris, + }) + c.Assert(err, jc.ErrorIsNil) + mc := jc.NewMultiChecker() + mc.AddExpr(`_.CreateTime`, jc.Almost, jc.ExpectedValue) + mc.AddExpr(`_.UpdateTime`, jc.Almost, jc.ExpectedValue) + slices.SortFunc(list, func(a *secrets.SecretMetadata, b *secrets.SecretMetadata) int { + return strings.Compare(a.Label, b.Label) + }) + c.Assert(list, mc, []*secrets.SecretMetadata{{ + URI: uris[0], + RotatePolicy: secrets.RotateDaily, + NextRotateTime: ptr(next), + LatestRevision: 1, + LatestRevisionChecksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + LatestExpireTime: ptr(expire), + Version: 1, + OwnerTag: s.owner.Tag().String(), + Description: "my secret", + Label: "foobar0", + CreateTime: now, + UpdateTime: now, + }, { + URI: uris[1], + RotatePolicy: secrets.RotateDaily, + NextRotateTime: ptr(next), + LatestRevision: 1, + LatestRevisionChecksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + LatestExpireTime: ptr(expire), + Version: 1, + OwnerTag: s.owner.Tag().String(), + Description: "my secret", + Label: "foobar1", + CreateTime: now, + UpdateTime: now, + }, { + URI: uris[2], + RotatePolicy: secrets.RotateDaily, + NextRotateTime: ptr(next), + LatestRevision: 1, + LatestRevisionChecksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + LatestExpireTime: ptr(expire), + Version: 1, + OwnerTag: s.owner.Tag().String(), + Description: "my secret", + Label: "foobar2", + CreateTime: now, + UpdateTime: now, + }}) +} + func (s *SecretsSuite) TestListByLabel(c *gc.C) { uri := secrets.NewURI() now := s.Clock.Now().Round(time.Second).UTC() @@ -548,7 +723,7 @@ func (s *SecretsSuite) TestListByLabel(c *gc.C) { c.Assert(err, jc.ErrorIsNil) list, err := s.store.ListSecrets(state.SecretsFilter{ - Label: ptr("foobar"), + Labels: []string{"foobar"}, }) c.Assert(err, jc.ErrorIsNil) mc := jc.NewMultiChecker() @@ -570,6 +745,91 @@ func (s *SecretsSuite) TestListByLabel(c *gc.C) { }}) } +func (s *SecretsSuite) TestListByLabels(c *gc.C) { + uris := []*secrets.URI{secrets.NewURI(), secrets.NewURI(), secrets.NewURI()} + labels := []string{"foobar0", "foobar1", "foobar2"} + now := s.Clock.Now().Round(time.Second).UTC() + next := now.Add(time.Minute).Round(time.Second).UTC() + expire := now.Add(time.Hour).Round(time.Second).UTC() + p := state.CreateSecretParams{ + Version: 1, + Owner: s.owner.Tag(), + UpdateSecretParams: state.UpdateSecretParams{ + LeaderToken: &fakeToken{}, + RotatePolicy: ptr(secrets.RotateDaily), + NextRotateTime: ptr(next), + Description: ptr("my secret"), + Label: ptr("foobar"), + ExpireTime: ptr(expire), + Params: nil, + Data: map[string]string{"foo": "bar"}, + Checksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + }, + } + for i, uri := range uris { + p.UpdateSecretParams.Label = ptr(labels[i]) + _, err := s.store.CreateSecret(uri, p) + c.Assert(err, jc.ErrorIsNil) + } + + // Create another secret to ensure it is excluded. + uri2 := secrets.NewURI() + p.Label = ptr("another") + _, err := s.store.CreateSecret(uri2, p) + c.Assert(err, jc.ErrorIsNil) + + list, err := s.store.ListSecrets(state.SecretsFilter{ + Labels: labels, + }) + c.Assert(err, jc.ErrorIsNil) + mc := jc.NewMultiChecker() + mc.AddExpr(`_.CreateTime`, jc.Almost, jc.ExpectedValue) + mc.AddExpr(`_.UpdateTime`, jc.Almost, jc.ExpectedValue) + slices.SortFunc(list, func(a *secrets.SecretMetadata, b *secrets.SecretMetadata) int { + return strings.Compare(a.Label, b.Label) + }) + c.Assert(list, mc, []*secrets.SecretMetadata{{ + URI: uris[0], + RotatePolicy: secrets.RotateDaily, + NextRotateTime: ptr(next), + LatestRevision: 1, + LatestRevisionChecksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + LatestExpireTime: ptr(expire), + Version: 1, + OwnerTag: s.owner.Tag().String(), + Description: "my secret", + Label: "foobar0", + CreateTime: now, + UpdateTime: now, + }, { + URI: uris[1], + RotatePolicy: secrets.RotateDaily, + NextRotateTime: ptr(next), + LatestRevision: 1, + LatestRevisionChecksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + LatestExpireTime: ptr(expire), + Version: 1, + OwnerTag: s.owner.Tag().String(), + Description: "my secret", + Label: "foobar1", + CreateTime: now, + UpdateTime: now, + }, { + URI: uris[2], + RotatePolicy: secrets.RotateDaily, + NextRotateTime: ptr(next), + LatestRevision: 1, + LatestRevisionChecksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + LatestExpireTime: ptr(expire), + Version: 1, + OwnerTag: s.owner.Tag().String(), + Description: "my secret", + Label: "foobar2", + CreateTime: now, + UpdateTime: now, + }}) +} + func (s *SecretsSuite) TestListByConsumer(c *gc.C) { uri := secrets.NewURI() now := s.Clock.Now().Round(time.Second).UTC() @@ -621,6 +881,77 @@ func (s *SecretsSuite) TestListByConsumer(c *gc.C) { }}) } +func (s *SecretsSuite) TestListByRemoteConsumerApplication(c *gc.C) { + rwordpress, err := s.State.AddRemoteApplication(state.AddRemoteApplicationParams{ + Name: "remote-wordpress", + SourceModel: names.NewModelTag("source-model"), + IsConsumerProxy: true, + OfferUUID: "offer-uuid", + Endpoints: []charm.Relation{{ + Interface: "mysql", + Limit: 1, + Name: "db", + Role: charm.RoleRequirer, + Scope: charm.ScopeGlobal, + }}, + }) + c.Assert(err, jc.ErrorIsNil) + wordpressEP, err := rwordpress.Endpoint("db") + c.Assert(err, jc.ErrorIsNil) + mysqlEP, err := s.owner.Endpoint("server") + c.Assert(err, jc.ErrorIsNil) + relation, err := s.State.AddRelation(wordpressEP, mysqlEP) + c.Assert(err, jc.ErrorIsNil) + + uri := secrets.NewURI() + now := s.Clock.Now().Round(time.Second).UTC() + cp := state.CreateSecretParams{ + Version: 1, + Owner: s.owner.Tag(), + UpdateSecretParams: state.UpdateSecretParams{ + LeaderToken: &fakeToken{}, + Description: ptr("my secret"), + Data: map[string]string{"foo": "bar"}, + Checksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + }, + } + _, err = s.store.CreateSecret(uri, cp) + c.Assert(err, jc.ErrorIsNil) + + subject := rwordpress.Tag() + err = s.State.GrantSecretAccess(uri, state.SecretAccessParams{ + LeaderToken: &fakeToken{}, + Scope: relation.Tag(), + Subject: subject, + Role: secrets.RoleView, + }) + c.Assert(err, jc.ErrorIsNil) + + // Create another secret to ensure it is excluded. + uri2 := secrets.NewURI() + cp.Owner = names.NewApplicationTag("wordpress") + _, err = s.store.CreateSecret(uri2, cp) + c.Assert(err, jc.ErrorIsNil) + + list, err := s.store.ListSecrets(state.SecretsFilter{ + ConsumerTags: []names.Tag{subject}, + }) + c.Assert(err, jc.ErrorIsNil) + mc := jc.NewMultiChecker() + mc.AddExpr(`_.CreateTime`, jc.Almost, jc.ExpectedValue) + mc.AddExpr(`_.UpdateTime`, jc.Almost, jc.ExpectedValue) + c.Assert(list, mc, []*secrets.SecretMetadata{{ + URI: uri, + LatestRevision: 1, + LatestRevisionChecksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + Version: 1, + OwnerTag: s.owner.Tag().String(), + Description: "my secret", + CreateTime: now, + UpdateTime: now, + }}) +} + func (s *SecretsSuite) TestListModelSecrets(c *gc.C) { backendStore := state.NewSecretBackends(s.State) _, err := backendStore.CreateSecretBackend(state.CreateSecretBackendParams{ @@ -4070,3 +4401,426 @@ func (s *SecretsSuite) TestDeleteRevisionsMultiple(c *gc.C) { _, _, err = s.store.GetSecretValue(uri, 18) c.Check(err, jc.ErrorIsNil) } + +func (s *SecretsSuite) TestCreateSecretBackendIssuedToken(c *gc.C) { + token := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: s.Clock.Now().Add(time.Minute), + BackendID: "backend-id", + Consumer: s.owner.Tag(), + } + err := s.store.CreateSecretBackendIssuedToken(token) + c.Assert(err, jc.ErrorIsNil) + + col, close := state.GetCollection(s.State, "secretBackendIssuedTokens") + defer close() + n, err := col.FindId(token.UUID).Count() + c.Assert(err, jc.ErrorIsNil) + c.Assert(n, gc.Equals, 1) +} + +func (s *SecretsSuite) TestCreateSecretBackendIssuedTokenForRemoteConsumerApplication(c *gc.C) { + rwordpress, err := s.State.AddRemoteApplication(state.AddRemoteApplicationParams{ + Name: "remote-wordpress", + SourceModel: names.NewModelTag("source-model"), + IsConsumerProxy: true, + OfferUUID: "offer-uuid", + Endpoints: []charm.Relation{{ + Interface: "mysql", + Limit: 1, + Name: "db", + Role: charm.RoleRequirer, + Scope: charm.ScopeGlobal, + }}, + }) + c.Assert(err, jc.ErrorIsNil) + wordpressEP, err := rwordpress.Endpoint("db") + c.Assert(err, jc.ErrorIsNil) + mysqlEP, err := s.owner.Endpoint("server") + c.Assert(err, jc.ErrorIsNil) + relation, err := s.State.AddRelation(wordpressEP, mysqlEP) + c.Assert(err, jc.ErrorIsNil) + + uri := secrets.NewURI() + cp := state.CreateSecretParams{ + Version: 1, + Owner: s.owner.Tag(), + UpdateSecretParams: state.UpdateSecretParams{ + LeaderToken: &fakeToken{}, + Description: ptr("my secret"), + Data: map[string]string{"foo": "bar"}, + Checksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + }, + } + _, err = s.store.CreateSecret(uri, cp) + c.Assert(err, jc.ErrorIsNil) + + subject := rwordpress.Tag() + err = s.State.GrantSecretAccess(uri, state.SecretAccessParams{ + LeaderToken: &fakeToken{}, + Scope: relation.Tag(), + Subject: subject, + Role: secrets.RoleView, + }) + c.Assert(err, jc.ErrorIsNil) + + token := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: s.Clock.Now().Add(time.Minute), + BackendID: "backend-id", + Consumer: subject, + } + err = s.store.CreateSecretBackendIssuedToken(token) + c.Assert(err, jc.ErrorIsNil) + + col, close := state.GetCollection(s.State, "secretBackendIssuedTokens") + defer close() + n, err := col.FindId(token.UUID).Count() + c.Assert(err, jc.ErrorIsNil) + c.Assert(n, gc.Equals, 1) +} + +func (s *SecretsSuite) TestCreateSecretBackendIssuedTokenForRemoteConsumerApplicationUnit(c *gc.C) { + rwordpress, err := s.State.AddRemoteApplication(state.AddRemoteApplicationParams{ + Name: "remote-wordpress", + SourceModel: names.NewModelTag("source-model"), + IsConsumerProxy: true, + OfferUUID: "offer-uuid", + Endpoints: []charm.Relation{{ + Interface: "mysql", + Limit: 1, + Name: "db", + Role: charm.RoleRequirer, + Scope: charm.ScopeGlobal, + }}, + }) + c.Assert(err, jc.ErrorIsNil) + wordpressEP, err := rwordpress.Endpoint("db") + c.Assert(err, jc.ErrorIsNil) + mysqlEP, err := s.owner.Endpoint("server") + c.Assert(err, jc.ErrorIsNil) + relation, err := s.State.AddRelation(wordpressEP, mysqlEP) + c.Assert(err, jc.ErrorIsNil) + + uri := secrets.NewURI() + cp := state.CreateSecretParams{ + Version: 1, + Owner: s.owner.Tag(), + UpdateSecretParams: state.UpdateSecretParams{ + LeaderToken: &fakeToken{}, + Description: ptr("my secret"), + Data: map[string]string{"foo": "bar"}, + Checksum: "7a38bf81f383f69433ad6e900d35b3e2385593f76a7b7ab5d4355b8ba41ee24b", + }, + } + _, err = s.store.CreateSecret(uri, cp) + c.Assert(err, jc.ErrorIsNil) + + err = s.State.GrantSecretAccess(uri, state.SecretAccessParams{ + LeaderToken: &fakeToken{}, + Scope: relation.Tag(), + Subject: rwordpress.Tag(), + Role: secrets.RoleView, + }) + c.Assert(err, jc.ErrorIsNil) + + subject := names.NewUnitTag(rwordpress.Name() + "/0") + token := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: s.Clock.Now().Add(time.Minute), + BackendID: "backend-id", + Consumer: subject, + } + err = s.store.CreateSecretBackendIssuedToken(token) + c.Assert(err, jc.ErrorIsNil) + + col, close := state.GetCollection(s.State, "secretBackendIssuedTokens") + defer close() + n, err := col.FindId(token.UUID).Count() + c.Assert(err, jc.ErrorIsNil) + c.Assert(n, gc.Equals, 1) +} + +func (s *SecretsSuite) TestNextSecretBackendIssuedTokenExpiry(c *gc.C) { + now := s.Clock.Now() + + tokenBefore := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now.Add(-time.Minute), + BackendID: "backend-id", + Consumer: s.owner.Tag(), + } + err := s.store.CreateSecretBackendIssuedToken(tokenBefore) + c.Assert(err, jc.ErrorIsNil) + + tokenAfter := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now.Add(time.Minute), + BackendID: "backend-id", + Consumer: s.owner.Tag(), + } + err = s.store.CreateSecretBackendIssuedToken(tokenAfter) + c.Assert(err, jc.ErrorIsNil) + + first, err := s.store.NextSecretBackendIssuedTokenExpiry() + c.Assert(err, jc.ErrorIsNil) + c.Assert(first, jc.DeepEquals, tokenBefore.ExpireTime.Truncate(time.Second)) +} + +func (s *SecretsSuite) TestListSecretBackendIssuedTokenUntil(c *gc.C) { + now := s.Clock.Now() + + tokenBefore := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now.Add(-time.Minute), + BackendID: "backend-id", + Consumer: s.owner.Tag(), + } + err := s.store.CreateSecretBackendIssuedToken(tokenBefore) + c.Assert(err, jc.ErrorIsNil) + + tokenAfter := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now.Add(time.Minute), + BackendID: "backend-id", + Consumer: s.owner.Tag(), + } + err = s.store.CreateSecretBackendIssuedToken(tokenAfter) + c.Assert(err, jc.ErrorIsNil) + + tokens, err := s.store.ListSecretBackendIssuedTokenUntil(now) + c.Assert(err, jc.ErrorIsNil) + c.Assert(tokens, gc.HasLen, 1) + tokenBefore.ExpireTime = tokenBefore.ExpireTime.Truncate(time.Second) + c.Assert(tokens[0], jc.DeepEquals, tokenBefore) +} + +func (s *SecretsSuite) TestListSecretBackendIssuedTokenUntilForConsumer(c *gc.C) { + now := s.Clock.Now() + + tokenBefore := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now.Add(-time.Minute), + BackendID: "backend-id", + Consumer: s.ownerUnit.Tag(), + } + err := s.store.CreateSecretBackendIssuedToken(tokenBefore) + c.Assert(err, jc.ErrorIsNil) + + tokenBeforeOther := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now.Add(-time.Minute), + BackendID: "backend-id", + Consumer: s.owner.Tag(), + } + err = s.store.CreateSecretBackendIssuedToken(tokenBeforeOther) + c.Assert(err, jc.ErrorIsNil) + + tokenAfter := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now.Add(time.Minute), + BackendID: "backend-id", + Consumer: s.ownerUnit.Tag(), + } + err = s.store.CreateSecretBackendIssuedToken(tokenAfter) + c.Assert(err, jc.ErrorIsNil) + + tokens, err := s.store.ListSecretBackendIssuedTokenUntilForConsumer(now, s.ownerUnit.Tag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(tokens, gc.HasLen, 1) + tokenBefore.ExpireTime = tokenBefore.ExpireTime.Truncate(time.Second) + c.Assert(tokens[0], jc.DeepEquals, tokenBefore) +} + +func (s *SecretsSuite) TestRemoveSecretBackendIssuedTokens(c *gc.C) { + token := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: s.Clock.Now().Add(time.Minute), + BackendID: "backend-id", + Consumer: s.owner.Tag(), + } + err := s.store.CreateSecretBackendIssuedToken(token) + c.Assert(err, jc.ErrorIsNil) + + col, close := state.GetCollection(s.State, "secretBackendIssuedTokens") + defer close() + n, err := col.FindId(token.UUID).Count() + c.Assert(err, jc.ErrorIsNil) + c.Assert(n, gc.Equals, 1) + + err = s.store.RemoveSecretBackendIssuedTokens([]string{token.UUID}) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *SecretsSuite) TestExpireSecretBackendIssuedTokensForConsumer(c *gc.C) { + now := s.Clock.Now() + + // Create a token that expires in the future. + tokenFuture := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now.Add(time.Hour), + BackendID: "backend-id", + Consumer: s.ownerUnit.Tag(), + } + err := s.store.CreateSecretBackendIssuedToken(tokenFuture) + c.Assert(err, jc.ErrorIsNil) + + // Create a token that is already expired. + tokenPast := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now, + BackendID: "backend-id", + Consumer: s.ownerUnit.Tag(), + } + err = s.store.CreateSecretBackendIssuedToken(tokenPast) + c.Assert(err, jc.ErrorIsNil) + + // Check there is only one expired token. + tokens, err := s.store.ListSecretBackendIssuedTokenUntilForConsumer( + now, s.ownerUnit.Tag()) + c.Assert(err, jc.ErrorIsNil) + c.Check(tokens, gc.HasLen, 1) + + op := s.store.ExpireSecretBackendIssuedTokensForConsumer(s.ownerUnit.Tag()) + err = s.State.ApplyOperation(op) + c.Assert(err, jc.ErrorIsNil) + + // Check there is now two expired tokens. + tokens, err = s.store.ListSecretBackendIssuedTokenUntilForConsumer( + s.Clock.Now(), s.ownerUnit.Tag()) + c.Assert(err, jc.ErrorIsNil) + c.Check(tokens, gc.HasLen, 2) +} + +func (s *SecretsSuite) TestExpireSecretBackendIssuedTokensForConsumerOnlyTargetConsumer(c *gc.C) { + now := s.Clock.Now() + + // Create a token. + tokenUnit := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now.Add(time.Hour), + BackendID: "backend-id", + Consumer: s.ownerUnit.Tag(), + } + err := s.store.CreateSecretBackendIssuedToken(tokenUnit) + c.Assert(err, jc.ErrorIsNil) + + // Create a token for a different consumer. + tokenApp := state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: now.Add(time.Hour), + BackendID: "backend-id", + Consumer: s.owner.Tag(), + } + err = s.store.CreateSecretBackendIssuedToken(tokenApp) + c.Assert(err, jc.ErrorIsNil) + + op := s.store.ExpireSecretBackendIssuedTokensForConsumer(s.ownerUnit.Tag()) + err = s.State.ApplyOperation(op) + c.Assert(err, jc.ErrorIsNil) + + tokens, err := s.store.ListSecretBackendIssuedTokenUntilForConsumer( + s.Clock.Now(), s.ownerUnit.Tag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(tokens, gc.HasLen, 1) + + // The token for the different consumer should not be expired. + tokens, err = s.store.ListSecretBackendIssuedTokenUntilForConsumer( + s.Clock.Now(), s.owner.Tag()) + c.Assert(err, jc.ErrorIsNil) + c.Check(tokens, gc.HasLen, 0) +} + +type SecretBackendIssuedTokenExpiryWatcherSuite struct { + testing.StateSuite + store state.SecretsStore +} + +var _ = gc.Suite(&SecretBackendIssuedTokenExpiryWatcherSuite{}) + +func (s *SecretBackendIssuedTokenExpiryWatcherSuite) SetUpTest(c *gc.C) { + s.StateSuite.SetUpTest(c) + s.store = state.NewSecrets(s.State) +} + +func (s *SecretBackendIssuedTokenExpiryWatcherSuite) TestWatchInitialEvent(c *gc.C) { + ownerApp := s.Factory.MakeApplication(c, nil) + now := s.Clock.Now().Round(time.Second).UTC() + next := now.Add(time.Minute).Round(time.Second).UTC() + + err := s.store.CreateSecretBackendIssuedToken(state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: next, + BackendID: "abc", + Consumer: ownerApp.Tag(), + }) + c.Assert(err, jc.ErrorIsNil) + + w := s.store.WatchSecretBackendIssuedTokenExpiry() + defer testing.AssertStop(c, w) + + wc := testing.NewStringsWatcherC(c, w) + wc.AssertChange(next.Format(time.RFC3339)) +} + +func (s *SecretBackendIssuedTokenExpiryWatcherSuite) TestWatchUpdates(c *gc.C) { + ownerApp := s.Factory.MakeApplication(c, nil) + now := s.Clock.Now().Round(time.Second).UTC() + first := now.Add(time.Minute).Round(time.Second).UTC() + + err := s.store.CreateSecretBackendIssuedToken(state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: first, + BackendID: "abc", + Consumer: ownerApp.Tag(), + }) + c.Assert(err, jc.ErrorIsNil) + + s.WaitForModelWatchersIdle(c, s.Model.UUID()) + + w := s.store.WatchSecretBackendIssuedTokenExpiry() + defer testing.AssertStop(c, w) + + wc := testing.NewStringsWatcherC(c, w) + wc.AssertChange(first.Format(time.RFC3339)) + wc.AssertNoChange() + + next := now.Add(10 * time.Minute).Round(time.Second).UTC() + err = s.store.CreateSecretBackendIssuedToken(state.SecretBackendIssuedToken{ + UUID: uuid.NewString(), + ExpireTime: next, + BackendID: "abc", + Consumer: ownerApp.Tag(), + }) + c.Assert(err, jc.ErrorIsNil) + + wc.AssertChange(next.Format(time.RFC3339)) +} + +func (s *SecretBackendIssuedTokenExpiryWatcherSuite) TestWatchNoDeletion(c *gc.C) { + ownerApp := s.Factory.MakeApplication(c, nil) + now := s.Clock.Now().Round(time.Second).UTC() + first := now.Add(time.Minute).Round(time.Second).UTC() + firstUUID := uuid.NewString() + + err := s.store.CreateSecretBackendIssuedToken(state.SecretBackendIssuedToken{ + UUID: firstUUID, + ExpireTime: first, + BackendID: "abc", + Consumer: ownerApp.Tag(), + }) + c.Assert(err, jc.ErrorIsNil) + + s.WaitForModelWatchersIdle(c, s.Model.UUID()) + + w := s.store.WatchSecretBackendIssuedTokenExpiry() + defer testing.AssertStop(c, w) + + wc := testing.NewStringsWatcherC(c, w) + wc.AssertChange(first.Format(time.RFC3339)) + wc.AssertNoChange() + + err = s.store.RemoveSecretBackendIssuedTokens([]string{firstUUID}) + c.Assert(err, jc.ErrorIsNil) + wc.AssertNoChange() +}
state/state.go+3 −0 modified@@ -276,6 +276,9 @@ func cleanupSecretBackendRefCountAfterModelMigrationDone(st *State) error { ops = append(ops, refOps...) } } + if len(ops) == 0 { + return nil + } return st.db().RunTransaction(ops) }
state/testing/watcher.go+8 −0 modified@@ -4,6 +4,8 @@ package testing import ( + "slices" + "strings" "time" "github.com/juju/collections/set" @@ -396,6 +398,12 @@ func (c SecretsTriggerWatcherC) AssertChange(expect ...watcher.SecretTriggerChan c.Assert(ok, jc.IsTrue) received = append(received, actual...) if len(received) >= len(expect) { + slices.SortFunc(received, func(a, b watcher.SecretTriggerChange) int { + return strings.Compare(a.URI.ID, b.URI.ID) + }) + slices.SortFunc(expect, func(a, b watcher.SecretTriggerChange) int { + return strings.Compare(a.URI.ID, b.URI.ID) + }) mc := jc.NewMultiChecker() mc.AddExpr(`_[_].NextTriggerTime`, jc.Almost, jc.ExpectedValue) c.Assert(received, mc, expect)
state/upgrades.go+60 −0 modified@@ -10,6 +10,7 @@ import ( "github.com/juju/mgo/v3/bson" "github.com/juju/mgo/v3/txn" + "github.com/juju/juju/core/network" "github.com/juju/juju/pki/ssh" ) @@ -189,6 +190,7 @@ func AddVirtualHostKeys(pool *StatePool) error { } return st.runRawTransaction(ops) } + func SplitMigrationStatusMessages(pool *StatePool) error { type legacyModelMigStatusDoc struct { // These are the same as the ids as migrationsC. @@ -261,6 +263,64 @@ func SplitMigrationStatusMessages(pool *StatePool) error { return st.runRawTransaction(ops) } +// OpenControllerAPIPort runs an upgrade to open the controller api port +// on the controller units. +func OpenControllerAPIPort(pool *StatePool) error { + st, err := pool.SystemState() + if err != nil { + return errors.Trace(err) + } + + controllerCfg, err := st.ControllerConfig() + if err != nil { + return errors.Trace(err) + } + apiPort := controllerCfg.APIPort() + + unitsColl, closer := st.db().GetRawCollection(unitsC) + defer closer() + + var controllerUnits []unitDoc + err = unitsColl.Find(bson.M{"application": controllerAppName}).Select(bson.M{"name": 1}).All(&controllerUnits) + if err != nil { + return errors.Annotatef(err, "cannot get controller units") + } +nextUnit: + for _, unitDoc := range controllerUnits { + // Ideally we'd want to do this work using bson maps to avoid + // using state objects but the logic is complicated enough that + // it's viable at the moment to use the existing state code to + // manipulate the port ranges. + controllerUnit, err := st.Unit(unitDoc.Name) + if err != nil { + return errors.Trace(err) + } + + pcp, err := controllerUnit.OpenedPortRanges() + if err != nil { + return errors.Trace(err) + } + for _, pr := range pcp.UniquePortRanges() { + if pr.Protocol != "tcp" { + continue + } + if apiPort >= pr.FromPort && apiPort <= pr.ToPort { + continue nextUnit + } + } + pcp.Open("", network.PortRange{ + FromPort: apiPort, + ToPort: apiPort, + Protocol: "tcp", + }) + + if err = st.ApplyOperation(pcp.Changes()); err != nil { + return errors.Trace(err) + } + } + return nil +} + // PopulateApplicationStorageUniqueID has the responsibility of populating the // `storage-unique-id` field in the application document. func PopulateApplicationStorageUniqueID(
state/upgrades_test.go+69 −0 modified@@ -18,6 +18,8 @@ import ( "github.com/kr/pretty" gc "gopkg.in/check.v1" + "github.com/juju/juju/core/constraints" + "github.com/juju/juju/core/network" "github.com/juju/juju/environs/config" "github.com/juju/juju/storage/provider" "github.com/juju/juju/testing" @@ -216,6 +218,73 @@ func (s *upgradesSuite) TestSplitMigrationStatusMessages(c *gc.C) { ) } +func (s *upgradesSuite) TestOpenControllerAPIPort(c *gc.C) { + m0, err := s.state.AddMachine(UbuntuBase("12.10"), JobManageModel, JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + _, err = s.state.EnableHA(3, constraints.Value{}, UbuntuBase("12.10"), nil) + c.Assert(err, jc.ErrorIsNil) + m1, err := s.state.Machine("1") + c.Assert(err, jc.ErrorIsNil) + + controllerApp := AddTestingApplication(c, s.state, "controller", AddTestingCharm(c, s.state, "wordpress")) + + // Unit 0 has no existing ports. + u0, err := controllerApp.AddUnit(AddUnitParams{}) + c.Assert(err, jc.ErrorIsNil) + err = u0.AssignToMachine(m0) + c.Assert(err, jc.ErrorIsNil) + + // Unit 1 has existing ports. + u1, err := controllerApp.AddUnit(AddUnitParams{}) + c.Assert(err, jc.ErrorIsNil) + err = u1.AssignToMachine(m1) + c.Assert(err, jc.ErrorIsNil) + pcp, err := u1.OpenedPortRanges() + c.Assert(err, jc.ErrorIsNil) + pcp.Open("", network.PortRange{ + FromPort: 666, + ToPort: 666, + Protocol: "tcp", + }) + err = s.state.ApplyOperation(pcp.Changes()) + c.Assert(err, jc.ErrorIsNil) + + openPorts, closer := s.state.db().GetRawCollection(openedPortsC) + defer closer() + + s.assertUpgradedData(c, OpenControllerAPIPort, nil, + upgradedData(openPorts, []bson.M{{ + "_id": s.state.ModelUUID() + ":" + m0.Id(), + "model-uuid": s.state.ModelUUID(), + "machine-id": m0.Id(), + "unit-port-ranges": bson.M{ + "controller/0": bson.M{"": []any{bson.M{ + "protocol": "tcp", + "fromport": 17777, + "toport": 17777, + }}}, + }, + }, { + "_id": s.state.ModelUUID() + ":" + m1.Id(), + "model-uuid": s.state.ModelUUID(), + "machine-id": m1.Id(), + "unit-port-ranges": bson.M{ + "controller/1": bson.M{"": []any{ + bson.M{ + "protocol": "tcp", + "fromport": 666, + "toport": 666, + }, bson.M{ + "protocol": "tcp", + "fromport": 17777, + "toport": 17777, + }}, + }, + }, + }}), + ) + +} func (s *upgradesSuite) TestPopulateApplicationStorageUniqueID(c *gc.C) { state1 := s.makeModel(c, "m1", coretesting.Attrs{}, ModelArgs{Type: ModelTypeCAAS}) state2 := s.makeModel(c, "m2", coretesting.Attrs{}, ModelArgs{Type: ModelTypeCAAS})
tests/includes/aws_ami_creation.sh+2 −2 modified@@ -63,7 +63,7 @@ create_ami_and_wait_available() { image_id="${OUT}" # Retrieve the subnet id - sub1=$(aws ec2 describe-subnets | jq -r '.Subnets[] | select(.DefaultForAz==true and .CidrBlock=="172.31.0.0/20") | .SubnetId') + sub1=$(aws ec2 describe-subnets | yq -r '.Subnets[] | select(.DefaultForAz==true and .CidrBlock=="172.31.0.0/20") | .SubnetId') # Launch an ec2 instance using the retrieved jammy image id local instance_id_ami_builder @@ -95,7 +95,7 @@ run_cleanup_ami() { if [[ -f "${TEST_DIR}/ec2-amis" ]]; then echo "====> Cleaning up EC2 AMIs" while read -r ec2_ami; do - snapshot_ids=$(aws ec2 describe-images --image-ids="${ec2_ami}" | jq -r ".Images[0].BlockDeviceMappings | .[] .Ebs.SnapshotId | select(. != null)") + snapshot_ids=$(aws ec2 describe-images --image-ids="${ec2_ami}" | yq -r ".Images[0].BlockDeviceMappings | .[] .Ebs.SnapshotId | select(. != null)") aws ec2 deregister-image --image-id="${ec2_ami}" >>"${TEST_DIR}/aws_cleanup" echo ${snapshot_ids} | xargs -L 1 aws ec2 delete-snapshot --snapshot-id >>"${TEST_DIR}/aws_cleanup" done <"${TEST_DIR}/ec2-amis"
tests/includes/gcloudcli.sh+7 −8 modified@@ -5,30 +5,29 @@ setup_gcloudcli_credential() { fi fi - # Check if a service account is already active - if gcloud auth list --filter="status:ACTIVE" \ - --format="value(account)" | grep -q "gserviceaccount\.com$"; then + # Check if an account is already active + if [[ "$(gcloud config get account 2>&1)" != "(unset)" ]]; then return fi local key_json_file_path - google_entry=$(cat "$HOME/.local/share/juju/credentials.yaml" | yq e '.credentials.google | to_entries | .[0].value' -) + google_entry=$(cat "$HOME/.local/share/juju/credentials.yaml" | yq '.credentials.google | to_entries | .[0].value') # The `file` field points to a JSON file, which contains the private key. - key_json_file_path=$(echo "$google_entry" | yq e '.file' -) + key_json_file_path=$(echo "$google_entry" | yq '.file // ""') # If credentials.yaml doesn't have a `file` field # we assume that this yaml file has the contents expanded so we read from it. - if [[ $key_json_file_path == "null" || -z $key_json_file_path ]]; then + if [[ -z $key_json_file_path ]]; then tmp_key_file=$(mktemp /tmp/google-key.XXXXXX.json) echo "$google_entry" | - yq e '.. | select(tag == "!!map") | with_entries(.key |= sub("-"; "_"))' -o=json - \ + yq -o=json '.. | select(tag == "!!map") | with_entries(.key |= sub("-"; "_"))' \ >"$tmp_key_file" key_json_file_path="$tmp_key_file" fi gcloud auth activate-service-account --key-file "$key_json_file_path" - project_id=$(jq -r .project_id "$key_json_file_path") + project_id=$(yq -r '.project_id' "$key_json_file_path") gcloud config set project "$project_id" }
tests/includes/ha.sh+2 −2 modified@@ -3,7 +3,7 @@ wait_for_controller_machines() { attempt=0 # shellcheck disable=SC2143 - until [[ "$(juju machines -m controller --format=json | jq -r '.machines | .[] | .["juju-status"] | select(.current == "started") | .current' | wc -l | grep "${amount}")" ]]; do + until [[ "$(juju machines -m controller --format=json | yq -r '.machines | .[] | .["juju-status"] | select(.current == "started") | .current' | wc -l | grep "${amount}")" ]]; do echo "[+] (attempt ${attempt}) polling machines" juju machines -m controller 2>&1 | sed 's/^/ | /g' || true sleep "${SHORT_TIMEOUT}" @@ -30,7 +30,7 @@ wait_for_ha() { attempt=0 # shellcheck disable=SC2143 - until [[ "$(juju show-controller --format=json | jq -r '.[] | .["controller-machines"] | .[] | select(.["ha-status"] == "ha-enabled") | .["instance-id"]' | wc -l | grep "${amount}")" ]]; do + until [[ "$(juju show-controller --format=json | yq -r '.[] | .["controller-machines"] | .[] | select(.["ha-status"] == "ha-enabled") | .["instance-id"]' | wc -l | grep "${amount}")" ]]; do echo "[+] (attempt ${attempt}) polling ha" juju show-controller 2>&1 | sed 's/^/ | /g' sleep "${SHORT_TIMEOUT}"
tests/includes/jq.sh+4 −0 added@@ -0,0 +1,4 @@ +jq() { + echo "Do not use jq. Instead use yq, it works on JSON too." + exit 1 +}
tests/includes/juju.sh+49 −18 modified@@ -90,7 +90,7 @@ bootstrap() { fi ;; "k8s") - cloud="${BOOTSTRAP_CLOUD:-microk8s}" + cloud="${BOOTSTRAP_CLOUD:-$(default_k8s)}" ;; "manual") manual_name=${1} @@ -130,7 +130,7 @@ bootstrap() { fi if [[ ${BOOTSTRAP_REUSE} == "true" && ${BOOTSTRAP_PROVIDER} != "k8s" ]]; then # juju show-machine not supported with k8s controllers - OUT=$(juju show-machine -m "${bootstrapped_name}":controller --format=json | jq -r ".machines | .[] | .series") + OUT=$(juju show-machine -m "${bootstrapped_name}":controller --format=json | yq -r ".machines | .[] | .series") if [[ -n ${OUT} ]]; then OUT=$(echo "${OUT}" | grep -oh "${BOOTSTRAP_SERIES}" || true) if [[ ${OUT} != "${BOOTSTRAP_SERIES}" ]]; then @@ -146,7 +146,7 @@ bootstrap() { if [[ ${BOOTSTRAP_REUSE} == "true" ]]; then echo "====> Reusing bootstrapped juju ($(green "${version}:${cloud}"))" - OUT=$(juju models -c "${bootstrapped_name}" --format=json 2>/dev/null | jq -r ".models[] | .[\"short-name\"] | select(. == \"${model}\")" || true) + OUT=$(juju models -c "${bootstrapped_name}" --format=json 2>/dev/null | yq -r ".models[] | .[\"short-name\"] | select(. == \"${model}\")" || true) if [[ -n ${OUT} ]]; then echo "${model} already exists. Use the following to clean up the environment:" echo " juju switch ${bootstrapped_name}" @@ -156,9 +156,9 @@ bootstrap() { juju_add_model "${model}" "${cloud}" "${bootstrapped_name}" "${output}" name="${bootstrapped_name}" - BOOTSTRAPPED_CLOUD=$(juju show-model controller --format json | jq -r '.[] | .cloud') + BOOTSTRAPPED_CLOUD=$(juju show-model controller --format json | yq -r '.[] | .cloud') export BOOTSTRAPPED_CLOUD - BOOTSTRAPPED_CLOUD_REGION=$(juju show-model controller --format json | jq -r '.[] | (.cloud + "/" + .region)') + BOOTSTRAPPED_CLOUD_REGION=$(juju show-model controller --format json | yq -r '.[] | "\(.cloud)/\(.region)"') export BOOTSTRAPPED_CLOUD_REGION else local cloud_region @@ -190,7 +190,7 @@ juju_add_model() { controller=${3} output=${4} - OUT=$(juju controllers --format=json | jq '.controllers | .["${bootstrapped_name}"] | .cloud' | grep "${cloud}" || true) + OUT=$(juju controllers --format=json | yq '.controllers | .["${bootstrapped_name}"] | .cloud' | grep "${cloud}" || true) if [[ -n ${OUT} ]]; then juju add-model --show-log -c "${controller}" "${model}" 2>&1 | OUTPUT "${output}" else @@ -231,7 +231,7 @@ setup_vsphere_simplestreams() { mkdir "${dir}" || true fi - cloud_endpoint=$(juju clouds --client --format=json | jq -r ".[\"$BOOTSTRAP_CLOUD\"] | .endpoint") + cloud_endpoint=$(juju clouds --client --format=json | yq -r ".[\"$BOOTSTRAP_CLOUD\"] | .endpoint") # pipe output to test dir, otherwise becomes part of the return value. juju metadata generate-image -i juju-ci-root/templates/"${series}"-test-template -r "${BOOTSTRAP_REGION}" -d "${dir}" -u "${cloud_endpoint}" -s "${series}" >>"${TEST_DIR}"/simplestreams 2>&1 } @@ -303,7 +303,13 @@ pre_bootstrap() { else version=$(juju_version) fi - export BOOTSTRAP_ADDITIONAL_ARGS="${BOOTSTRAP_ADDITIONAL_ARGS:-} --agent-version=${version}" + + local extra_opts + if [[ ${version} == "3.6.14" ]]; then + extra_opts="--config juju-db-snap-channel=4.4/stable" + fi + + export BOOTSTRAP_ADDITIONAL_ARGS="${BOOTSTRAP_ADDITIONAL_ARGS:-} --agent-version=${version} ${extra_opts:-}" fi if [[ -n ${SHORT_GIT_COMMIT:-} ]]; then @@ -362,7 +368,7 @@ post_bootstrap() { # shellcheck disable=SC2069 juju debug-log -m "${controller}:controller" --replay --tail 2>&1 >"${TEST_DIR}/${controller}-controller-debug.log" & CMD_PID=$! - echo "${CMD_PID}" >>"${TEST_DIR}/pids" + track_daemon_pid "${CMD_PID}" case "${BOOTSTRAP_PROVIDER:-}" in "vsphere") @@ -391,7 +397,7 @@ post_add_model() { # shellcheck disable=SC2069 juju debug-log -m "${ctrl_arg}" --replay --tail 2>&1 >"${TEST_DIR}/${log_file}" & CMD_PID=$! - echo "${CMD_PID}" >>"${TEST_DIR}/pids" + track_daemon_pid "${CMD_PID}" case "${BOOTSTRAP_PROVIDER:-}" in "vsphere") @@ -418,7 +424,7 @@ destroy_model() { shift # shellcheck disable=SC2034 - OUT=$(juju models --format=json | jq '.models | .[] | .["short-name"]' | grep "${name}" || true) + OUT=$(juju models --format=json | yq '.models | .[] | .["short-name"]' | grep "${name}" || true) # shellcheck disable=SC2181 if [[ -z ${OUT} ]]; then return @@ -428,7 +434,7 @@ destroy_model() { echo "====> Destroying juju model ${name}" echo "${name}" | xargs -I % timeout "$timeout" juju destroy-model --no-prompt --destroy-storage % >"${output}" 2>&1 || true - CHK=$(cat "${output}" | grep -i "ERROR\|Unable to get the model status from the API" || true) + CHK=$(cat "${output}" | grep -e "^ERROR " || true) if [[ -n ${CHK} ]]; then printf '\nFound some issues destroying model\n' cat "${output}" @@ -450,10 +456,10 @@ destroy_controller() { shift # shellcheck disable=SC2034 - OUT=$(juju controllers --format=json | jq '.controllers | keys[]' | grep "${name}" || true) + OUT=$(juju controllers --format=json | yq 'select(.controllers) | .controllers | keys | .[]' | grep "${name}" || true) # shellcheck disable=SC2181 if [[ -z ${OUT} ]]; then - OUT=$(juju models --format=json | jq -r '.models | .[] | .["short-name"]' | grep "^${name}$" || true) + OUT=$(juju models --format=json | yq -r '.models | .[] | .["short-name"]' | grep "^${name}$" || true) if [[ -z ${OUT} ]]; then echo "====> ERROR Destroy controller/model. Unable to locate $(red "${name}")" exit 1 @@ -486,6 +492,31 @@ destroy_controller() { echo "====> Destroying juju ($(green "${name}"))" if [[ ${KILL_CONTROLLER:-} != "true" ]]; then + if [[ ${CLEANUP:-} == "true" ]]; then + # Run `juju resolve --no-retry --all` in the background for every + # model on this controller, retrying continuously to unblock any + # hook errors that may prevent teardown. This must be done as the + # tests have already finished, if teardown of the charms was a part + # of the test, then destroy_controller should have been called + # earlier. + # A sentinel file is used to signal the background loops to stop + # when this function returns. + local resolve_sentinel + resolve_sentinel=$(mktemp -p ${TEST_DIR}) + while IFS= read -r model_uuid; do + ( + while [[ -f ${resolve_sentinel} ]]; do + timeout 30s juju resolve --no-retry --all -m "${model_uuid}" >/dev/null 2>&1 || true + sleep 5 + done + ) & + done < <(juju models -c "${name}" --format=json 2>/dev/null | yq -r '.models // [] | .[] | select(.["is-controller"] != "true") | .["model-uuid"]' || true) + # Ensure the sentinel file is removed (stopping background loops) + # whenever this function exits, whether normally or via error. + # shellcheck disable=SC2064 + trap "rm -f ${resolve_sentinel}" RETURN + fi + echo "${name}" | xargs -I % juju destroy-controller --destroy-all-models --destroy-storage --no-prompt % 2>&1 | OUTPUT "${output}" else echo "${name}" | xargs -I % juju kill-controller -t 0 --no-prompt % 2>&1 | OUTPUT "${output}" @@ -512,7 +543,7 @@ cleanup_jujus() { echo "====> Cleaning up jujus" while read -r juju_name; do - destroy_controller "${juju_name}" + CLEANUP=true destroy_controller "${juju_name}" done <"${TEST_DIR}/jujus" rm -f "${TEST_DIR}/jujus" || true fi @@ -529,7 +560,7 @@ introspect_controller() { return fi - idents=$(juju machines -m "${name}:controller" --format=json | jq ".machines | keys | .[]") + idents=$(juju machines -m "${name}:controller" --format=json | yq "select(.machines) | .machines | keys | .[]") if [[ -z ${idents} ]]; then return fi @@ -543,10 +574,10 @@ remove_controller_offers() { name=${1} - OUT=$(juju models -c "${name}" --format=json | jq -r '.["models"] | .[] | select(.["is-controller"] == false) | .name' || true) + OUT=$(juju models -c "${name}" --format=json | yq -r '.["models"] | .[] | select(.["is-controller"] == false) | .name' || true) if [[ -n ${OUT} ]]; then echo "${OUT}" | while read -r model; do - OUT=$(juju offers -m "${name}:${model}" --format=json | jq -r '.[] | .["offer-url"]' || true) + OUT=$(juju offers -m "${name}:${model}" --format=json | yq -r '.[] | .["offer-url"]' || true) echo "${OUT}" | while read -r offer; do if [[ -n ${offer} ]]; then juju remove-offer --force -y -c "${name}" "${offer}"
tests/includes/k8s.sh+33 −0 added@@ -0,0 +1,33 @@ +kubectl() { + local k8s="${BOOTSTRAP_CLOUD}" + case "${BOOTSTRAP_PROVIDER}" in + "k8s") ;; + *) + # Use a local k8s that is available for IAAS testing needs. + k8s="$(default_k8s)" + ;; + esac + case "${k8s}" in + "microk8s") + if [ "$1" = "config" ] && [ "$2" = "view" ]; then + microk8s.config + else + microk8s kubectl "$@" + fi + ;; + "minikube") + minikube kubectl -- "$@" + ;; + *) + $(which kubectl) "$@" + ;; + esac +} + +default_k8s() { + if which "minikube" >/dev/null 2>&1; then + printf "minikube" + elif which "microk8s" >/dev/null 2>&1; then + printf "microk8s" + fi +}
tests/includes/network.sh+2 −2 modified@@ -9,7 +9,7 @@ assert_machine_ip_is_in_cidrs() { fi for cidr in $cidrs; do - machine_ip_in_cidr=$(juju machines --format json | jq -r ".machines[\"${machine_index}\"][\"ip-addresses\"][]" | grepcidr "${cidr}" || echo "") + machine_ip_in_cidr=$(juju machines --format json | yq -r ".machines[\"${machine_index}\"][\"ip-addresses\"][]" | grepcidr "${cidr}" || echo "") if [ -n "${machine_ip_in_cidr}" ]; then echo "${machine_ip_in_cidr}" return @@ -53,7 +53,7 @@ assert_endpoint_binding_matches() { exp_space_name=${3} # shellcheck disable=SC2086,SC2016 - got=$(juju show-application ${app_name} --format json | jq -r ".[\"${app_name}\"] | .[\"endpoint-bindings\"] | .[\"${endpoint_name}\"]" || echo "") + got=$(juju show-application ${app_name} --format json | yq -r ".[\"${app_name}\"] | .[\"endpoint-bindings\"] | .[\"${endpoint_name}\"]" || echo "") if [ "$got" != "$exp_space_name" ]; then # shellcheck disable=SC2086,SC2016,SC2046 echo $(red "Expected endpoint ${endpoint_name} in juju show-application ${app_name} to be ${exp_space_name}; got ${got}")
tests/includes/pids.sh+87 −9 modified@@ -1,14 +1,92 @@ -cleanup_pids() { - if [[ -f "${TEST_DIR}/pids" ]]; then - echo "====> Cleaning up pids" +DAEMON_SCOPE_DEPTH=0 + +_daemon_scope_label() { + local depth=${1:-${DAEMON_SCOPE_DEPTH}} + local scope_label + scope_label="scope_index" + + local i + for ((i = 0; i < depth; i++)); do + scope_label="${scope_label}_${i}" + done + echo "$scope_label" +} + +# track_daemon_pid appends the provided PID to the scoped pid list file +track_daemon_pid() { + local pid + pid=$1 + echo "${pid} $(_daemon_scope_label)" >>"${TEST_DIR}/pids" +} + +# track_daemon_exec_trampoline returns a new command, that when invoked will +# perform an exec on the supplied arguments, tracking the life of the program. +track_daemon_exec_trampoline() { + mkdir -p "${TEST_DIR}/exec_trampoline/" + local trampoline + trampoline="$(mktemp -p "${TEST_DIR}/exec_trampoline/")" + sed -e "s|__DAEMON_SCOPE_LABEL__|$(_daemon_scope_label)|;s|__PIDS__|${TEST_DIR}/pids|" >"${trampoline}" <<'EOM' +#!/usr/bin/env sh +echo "$$ __DAEMON_SCOPE_LABEL__" >> "__PIDS__" +exec "$@" +EOM + chmod +x "${trampoline}" + echo "${trampoline}" +} + +# push_daemon_scope creates a new daemon tracking scope for nested run calls +push_daemon_scope() { + DAEMON_SCOPE_DEPTH=$((DAEMON_SCOPE_DEPTH + 1)) +} - while read -r pid; do - if ps -p "${pid}" >/dev/null; then - kill -9 "${pid}" || true +# pop_daemon_scope removes and cleans up the current daemon scope +pop_daemon_scope() { + local expected_depth + expected_depth=$1 + + # Kill all daemons whose scope matches current_scope exactly or is a child + # of it. + if [[ -f "${TEST_DIR}/pids" ]]; then + local pid + local current_scope + current_scope="$(_daemon_scope_label $expected_depth)" + while IFS= read -r pid; do + if kill -0 "${pid}" 2>/dev/null; then + kill -9 "${pid}" >/dev/null 2>&1 || true + echo "==> Killed daemon (PID is $(green "${pid}"))" fi - done <"${TEST_DIR}/pids" - rm -f "${TEST_DIR}/pids" || true + done < <(awk -v scope="${current_scope}" ' + $2 == scope || index($2, scope "_") == 1 { print $1 } + ' "${TEST_DIR}/pids") + fi + + if [[ ${DAEMON_SCOPE_DEPTH} -eq 0 ]]; then + return + fi + if [[ ${DAEMON_SCOPE_DEPTH} -ne ${expected_depth} ]]; then + return + fi + DAEMON_SCOPE_DEPTH=$((DAEMON_SCOPE_DEPTH - 1)) +} +# daemon runs a command in the background and tracks its PID for cleanup +daemon() { + if [[ ${DAEMON_SCOPE_DEPTH} -eq 0 ]]; then + echo "ERROR: daemon() called outside of run() scope" >&2 + return 1 fi - echo "====> Completed cleaning up pids" + + local pid + local program_name + program_name=$(basename "$1") + ( + exec >"${TEST_DIR}/${TEST_CURRENT}-${program_name}-${BASHPID}.log" 2>&1 + exec "$@" + ) & + pid=$! + + # Append PID and current scope label to the cleanup file + track_daemon_pid "$pid" + + echo "==> Started daemon (PID is $(green "${pid}"))" }
tests/includes/run.sh+31 −18 modified@@ -16,22 +16,25 @@ run() { START_TIME=$(date +%s) + push_daemon_scope + local expected_scope_depth + expected_scope_depth=${DAEMON_SCOPE_DEPTH} + # shellcheck disable=SC2064 + trap "pop_daemon_scope ${expected_scope_depth}" RETURN + set_verbosity + local pid if [[ ${VERBOSE} -gt 1 ]]; then touch "${TEST_DIR}/${TEST_CURRENT}.log" tail -f "${TEST_DIR}/${TEST_CURRENT}.log" 2>/dev/null & pid=$! - # SIGKILL it with fire, as we don't know what state we're in. - trap 'kill -9 "${pid}" >/dev/null 2>&1 || true' EXIT + track_daemon_pid "$pid" fi "${CMD}" "$@" >"${TEST_DIR}/${TEST_CURRENT}.log" 2>&1 - if [[ ${VERBOSE} -gt 1 ]]; then - # SIGKILL because it should be safe to do so. - kill -9 "${pid}" >/dev/null 2>&1 || true - fi + pop_daemon_scope ${expected_scope_depth} END_TIME=$(date +%s) @@ -86,18 +89,28 @@ run_linter() { } skip() { - CMD="${1}" - - if [[ -n ${RUN_LIST} ]]; then - # shellcheck disable=SC2143,SC2046 - if [[ ! $(echo "${RUN_LIST}" | grep -w "${CMD}") ]]; then - echo "SKIP" - exit 1 - fi - fi - - # shellcheck disable=SC2143,SC2046 - if [[ $(echo "${SKIP_LIST:-}" | grep -w "${CMD}") ]]; then + # For each command, check if it would be skipped (absent from RUN_LIST when + # provided or present in SKIP_LIST). Only output "SKIP" if every command + # would be skipped. + if echo "$@" | tr ' ' '\n' | awk -v run_list="${RUN_LIST:-}" -v skip_list="${SKIP_LIST:-}" ' + function strip_quotes(s) { + gsub(/^"+|"+$/, "", s) + return s + } + function is_skipped(cmd, i, n, parts) { + if (run_list != "") { + n = split(run_list, parts, /,/) + for (i = 1; i <= n; i++) if (strip_quotes(parts[i]) == cmd) { break } + if (i > n) return 1 + } + n = split(skip_list, parts, /,/) + for (i = 1; i <= n; i++) if (strip_quotes(parts[i]) == cmd) return 1 + return 0 + } + BEGIN { all_skip = 1 } + { if (!is_skipped($0)) { all_skip = 0 } } + END { exit (NR == 0 || !all_skip) } + '; then echo "SKIP" exit 1 fi
tests/includes/server.sh+1 −19 modified@@ -5,28 +5,10 @@ start_server() { ( cd "${path}" || exit 1 - python3 -m http.server 8666 >"${TEST_DIR}/server.log" 2>&1 & - SERVER_PID=$! - - echo "${SERVER_PID}" >"${TEST_DIR}/server.pid" + daemon python3 -m http.server 8666 # Sleep to ensure the python server is up and running correctly, as it's # a daemon service (&) we can't actually see if it's up easily. sleep 5 ) } - -kill_server() { - if [[ ! -f "${TEST_DIR}/server.pid" ]]; then - return - fi - - pid=$(cat "${TEST_DIR}/server.pid" | head -n 1 || echo "NOT FOUND") - if [[ ${pid} == "NOT FOUND" ]]; then - return - fi - - echo "==> Killing server" - kill -9 "${pid}" >/dev/null 2>&1 || true - echo "==> Killed server (PID is $(green "${pid}"))" -}
tests/includes/storage.sh+2 −2 modified@@ -4,7 +4,7 @@ assert_storage() { name=${1:?"name is missing"} query=${2:?"query is missing"} - juju storage --format json | jq "${query}" | check "${name}" + juju storage --format json | yq "${query}" | check "${name}" } # life_status checks for the life status for a given application storage. Uses a combination of the storage name and its unit index to query. @@ -59,7 +59,7 @@ unit_state() { unit_exist() { local name name=${1} - juju storage --format json | jq "any(paths; .[-1] == \"${name}\")" + juju storage --format json | yq "[.. | select(kind == \"map\") | has(\"${name}\")] | any" } # filesystem_status used to check for the current status of the given volume for a filesystem matched by the volume number and volume index combination e.g 0/0, 2/1, 3/1
tests/includes/wait-for.sh+45 −15 modified@@ -4,7 +4,7 @@ SHORT_TIMEOUT=5 # wait_for defines the ability to wait for a given condition to happen in a # juju status output. The output is JSON, so everything that the API server # knows about should be valid. -# The query argument is a jq query. +# The query argument is a yq query. # The default timeout is 10 minutes. You can change this by providing the # timeout argument (an integer number of seconds). # @@ -21,7 +21,7 @@ wait_for() { attempt=0 start_time="$(date -u +%s)" # shellcheck disable=SC2046,SC2143 - until [[ "$(juju status --format=json 2>/dev/null | jq -S "${query}" | grep "${name}")" ]]; do + until [[ "$(juju status --format=json 2>/dev/null | yq "${query}" | grep "${name}")" ]]; do echo "[+] (attempt ${attempt}) polling status for" "${query} => ${name}" juju status --relations 2>&1 | sed 's/^/ | /g' sleep "${SHORT_TIMEOUT}" @@ -158,7 +158,7 @@ wait_for_machine_agent_status() { attempt=0 # shellcheck disable=SC2046,SC2143 - until [ $(juju show-machine --format json | jq -r ".[\"machines\"] | .[\"${inst_id}\"] | .[\"juju-status\"] | .[\"current\"]" | grep "${status}") ]; do + until [ $(juju show-machine --format json | yq -r ".[\"machines\"] | .[\"${inst_id}\"] | .[\"juju-status\"] | .[\"current\"]" | grep "${status}") ]; do echo "[+] (attempt ${attempt}) polling machines" juju machines | grep "$inst_id" 2>&1 | sed 's/^/ | /g' sleep "${SHORT_TIMEOUT}" @@ -191,7 +191,7 @@ wait_for_container_agent_status() { attempt=0 # shellcheck disable=SC2046,SC2143 - until [ $(juju show-machine --format json | jq -r ".[\"machines\"] | .[\"${parent_id}\"] | .[\"containers\"] | .[\"${inst_id}\"] | .[\"juju-status\"] | .[\"current\"]" | grep "${status}") ]; do + until [ $(juju show-machine --format json | yq -r ".[\"machines\"] | .[\"${parent_id}\"] | .[\"containers\"] | .[\"${inst_id}\"] | .[\"juju-status\"] | .[\"current\"]" | grep "${status}") ]; do echo "[+] (attempt ${attempt}) polling machines" juju machines | grep "$inst_id" 2>&1 | sed 's/^/ | /g' sleep "${SHORT_TIMEOUT}" @@ -223,13 +223,12 @@ wait_for_machine_netif_count() { attempt=0 # shellcheck disable=SC2046,SC2143 - until [ $(juju show-machine --format json | jq -r ".[\"machines\"] | .[\"${inst_id}\"] | .[\"network-interfaces\"] | length" | grep "${count}") ]; do + until [ $(juju show-machine --format json | yq -r ".[\"machines\"] | .[\"${inst_id}\"] | .[\"network-interfaces\"] | length" | grep "${count}") ]; do # shellcheck disable=SC2046,SC2143 - echo "[+] (attempt ${attempt}) network interface count for instance ${inst_id} = "$(juju show-machine --format json | jq -r ".[\"machines\"] | .[\"${inst_id}\"] | .[\"network-interfaces\"] | length") + echo "[+] (attempt ${attempt}) network interface count for instance ${inst_id} = "$(juju show-machine --format json | yq -r ".[\"machines\"] | .[\"${inst_id}\"] | .[\"network-interfaces\"] | length") sleep "${SHORT_TIMEOUT}" attempt=$((attempt + 1)) done - } # wait_for_subordinate_count blocks until the number of subordinates @@ -250,9 +249,40 @@ wait_for_subordinate_count() { attempt=0 # shellcheck disable=SC2046,SC2143 - until [ $(juju status --format json | jq -r ".applications | .[\"${name}\"] | .units | .[\"${name}/${unit_index}\"] | .subordinates | length" | grep "${count}") ]; do + until [ $(juju status --format json | yq -r ".applications | .[\"${name}\"] | .units | .[\"${name}/${unit_index}\"] | .subordinates | length" | grep "${count}") ]; do + # shellcheck disable=SC2046,SC2143 + echo "[+] (attempt ${attempt}) subordinate count for unit ${name}/${unit_index} = "$(juju status --format json | yq -r ".applications | .[\"${name}\"] | .units | .[\"${name}/${unit_index}\"] | .subordinates | length") + sleep "${SHORT_TIMEOUT}" + attempt=$((attempt + 1)) + done + + if [[ ${attempt} -gt 0 ]]; then + echo "[+] $(green 'Completed polling status')" + juju status 2>&1 | sed 's/^/ | /g' + sleep "${SHORT_TIMEOUT}" + fi +} + +# wait_for_unit_count blocks until the number of units for the application +# becomes equal to the desired value. +# +# ``` +# wait_for_unit_count <application name> <count> +# +# example: +# wait_for_unit_count mysql 3 +# ``` +wait_for_unit_count() { + local name count + + name=${1} + count=${2:-0} + + attempt=0 + # shellcheck disable=SC2046,SC2143 + until [ $(juju status --format json | yq -r ".applications | .[\"${name}\"] | .units | length" | grep "${count}") ]; do # shellcheck disable=SC2046,SC2143 - echo "[+] (attempt ${attempt}) subordinate count for unit ${name}/${unit_index} = "$(juju status --format json | jq -r ".applications | .[\"${name}\"] | .units | .[\"${name}/${unit_index}\"] | .subordinates | length") + echo "[+] (attempt ${attempt}) unit count ${name} = "$(juju status --format json | yq -r ".applications | .[\"${name}\"] | .units | length") sleep "${SHORT_TIMEOUT}" attempt=$((attempt + 1)) done @@ -281,15 +311,15 @@ wait_for_model() { attempt=0 # shellcheck disable=SC2046,SC2143 - until [ $(juju models --format=json | jq -r ".models | .[] | select(.[\"short-name\"] == \"${name}\") | .[\"short-name\"]" | grep "${name}") ]; do - echo "[+] (attempt ${attempt}) polling models" + until [ $(juju models --format=json | yq -r ".models | .[] | select(.[\"short-name\"] == \"${name}\") | .[\"short-name\"]" | grep "${name}") ]; do + echo "[+] (attempt ${attempt}) polling for model ${name}" juju models | sed 's/^/ | /g' sleep "${SHORT_TIMEOUT}" attempt=$((attempt + 1)) done if [[ ${attempt} -gt 0 ]]; then - echo "[+] $(green 'Completed polling models')" + echo "[+] $(green "Completed polling for model ${name}")" juju models | sed 's/^/ | /g' sleep "${SHORT_TIMEOUT}" fi @@ -342,7 +372,7 @@ wait_for_storage() { attempt=0 start_time="$(date -u +%s)" # shellcheck disable=SC2046,SC2143 - until [[ "$(juju storage --format=json 2>/dev/null | jq "${query}" | grep "${name}")" ]]; do + until [[ "$(juju storage --format=json 2>/dev/null | yq "${query}" | grep "${name}")" ]]; do echo "[+] (attempt ${attempt}) polling status for" "${query} => ${name}" juju storage 2>&1 | sed 's/^/ | /g' sleep "${SHORT_TIMEOUT}" @@ -384,7 +414,7 @@ wait_for_aws_ingress_cidrs_for_port_range() { secgrp_list=$(aws ec2 describe-security-groups --filters Name=ip-permission.from-port,Values=${from_port} Name=ip-permission.to-port,Values=${to_port}) # print the security group rules # shellcheck disable=SC2086 - got_cidrs=$(echo ${secgrp_list} | jq -r ".SecurityGroups[0].IpPermissions // [] | .[] | select(.FromPort == ${from_port} and .ToPort == ${to_port}) | .Ip${ipV6Suffix}Ranges // [] | .[] | .CidrIp${ipV6Suffix}" | sort | paste -sd, -) + got_cidrs=$(echo ${secgrp_list} | yq -r ".SecurityGroups[0].IpPermissions // [] | .[] | select(.FromPort == ${from_port} and .ToPort == ${to_port}) | .Ip${ipV6Suffix}Ranges // [] | .[] | .CidrIp${ipV6Suffix}" | sort | paste -sd, -) attempt=0 # shellcheck disable=SC2046,SC2143 @@ -393,7 +423,7 @@ wait_for_aws_ingress_cidrs_for_port_range() { # shellcheck disable=SC2086 secgrp_list=$(aws ec2 describe-security-groups --filters Name=ip-permission.from-port,Values=${from_port} Name=ip-permission.to-port,Values=${to_port}) # shellcheck disable=SC2086 - got_cidrs=$(echo ${secgrp_list} | jq -r ".SecurityGroups[0].IpPermissions // [] | .[] | select(.FromPort == ${from_port} and .ToPort == ${to_port}) | .Ip${ipV6Suffix}Ranges // [] | .[] | .CidrIp${ipV6Suffix}" | sort | paste -sd, -) + got_cidrs=$(echo ${secgrp_list} | yq -r ".SecurityGroups[0].IpPermissions // [] | .[] | select(.FromPort == ${from_port} and .ToPort == ${to_port}) | .Ip${ipV6Suffix}Ranges // [] | .[] | .CidrIp${ipV6Suffix}" | sort | paste -sd, -) sleep "${SHORT_TIMEOUT}" if [ "$got_cidrs" == "$exp_cidrs" ]; then
tests/main.sh+19 −7 modified@@ -111,7 +111,7 @@ show_help() { echo "¯¯¯¯¯¯" echo "Flags should appear $(red 'before') arguments." echo "" - echo "cmd [-h] [-v] [-A] [-s test] [-a file] [-x file] [-r] [-l controller] [-p provider type <lxd|aws|google|azure|manual|microk8s|vsphere|maas>]" + echo "cmd [-h] [-v] [-A] [-s test] [-a file] [-x file] [-r] [-l controller] [-p provider type <lxd|aws|google|azure|manual|k8s|vsphere|maas>]" echo "" echo " $(green './main.sh -h') Display this help message" echo " $(green './main.sh -v') Verbose and debug messages" @@ -199,16 +199,16 @@ while getopts "hH?vAs:a:x:rl:p:c:R:S:V" opt; do export BOOTSTRAP_REUSE_LOCAL="${OPTARG}" export BOOTSTRAP_REUSE="true" - CLOUD=$(juju show-controller "${OPTARG}" --format=json 2>/dev/null | jq -r ".[\"${OPTARG}\"] | .details | .cloud") - PROVIDER=$(juju clouds --client --all --format=json 2>/dev/null | jq -r ".[\"${CLOUD}\"] | .type") + CLOUD=$(juju show-controller "${OPTARG}" --format=json 2>/dev/null | yq -r ".[\"${OPTARG}\"] | .details | .cloud") + PROVIDER=$(juju clouds --client --all --format=json 2>/dev/null | yq -r ".[\"${CLOUD}\"] | .type") export BOOTSTRAP_PROVIDER="${PROVIDER}" export BOOTSTRAP_CLOUD="${CLOUD}" ;; p) export BOOTSTRAP_PROVIDER="${OPTARG}" ;; c) - PROVIDER=$(juju clouds --client --all --format=json 2>/dev/null | jq -r ".[\"${OPTARG}\"] | .type") + PROVIDER=$(juju clouds --client --all --format=json 2>/dev/null | yq -r ".[\"${OPTARG}\"] | .type") export BOOTSTRAP_PROVIDER="${PROVIDER}" CLOUD="${OPTARG}" export BOOTSTRAP_CLOUD="${CLOUD}" @@ -245,7 +245,7 @@ fi echo "" echo "==> Checking for dependencies" -check_dependencies curl jq yq shellcheck expect +check_dependencies curl yq shellcheck expect if [[ ${USER:-'root'} == "root" ]]; then echo "The testsuite must not be run as root." >&2 @@ -281,7 +281,8 @@ cleanup() { archive_logs "partial" - cleanup_pids + pop_daemon_scope 0 + cleanup_jujus cleanup_funcs @@ -332,6 +333,7 @@ archive_logs() { TEST_CURRENT=setup TEST_RESULT=failure +push_daemon_scope trap cleanup EXIT HUP INT TERM # Setup test directory @@ -351,7 +353,17 @@ run_test() { # shellcheck disable=SC2046,SC2086 echo "==> TEST BEGIN: ${TEST_CURRENT_DESCRIPTION} ($(green $(basename ${TEST_DIR})))" START_TIME=$(date +%s) - ${TEST_CURRENT} + ( + push_daemon_scope + local expected_scope_depth + expected_scope_depth=${DAEMON_SCOPE_DEPTH} + # shellcheck disable=SC2064 + trap "pop_daemon_scope ${expected_scope_depth}" EXIT + + set_verbosity + + ${TEST_CURRENT} + ) END_TIME=$(date +%s) echo "==> TEST DONE: ${TEST_CURRENT_DESCRIPTION} ($((END_TIME - START_TIME))s)"
tests/README.md+1 −2 modified@@ -44,10 +44,9 @@ echo "failed" | grep -q "passes" # fails ## Getting started -Before running tests, you'll need to install `jq`, `yq`, `shellcheck` and `expect`: +Before running tests, you'll need to install `yq`, `shellcheck` and `expect`: ```sh -sudo snap install jq sudo snap install yq sudo snap install shellcheck sudo snap install expect
tests/suites/backup/backup.sh+7 −7 modified@@ -33,8 +33,8 @@ run_basic_backup_restore() { echo "Deploy a workload (1 machine)" juju deploy jameinel-ubuntu-lite wait_for "ubuntu-lite" "$(idle_condition "ubuntu-lite")" - juju status --format json | jq '.machines | length' | check 1 - id0=$(juju status --format json | jq -r '.machines["0"]["instance-id"]') + juju status --format json | yq '.machines | length' | check 1 + id0=$(juju status --format json | yq -r '.machines["0"]["instance-id"]') echo "Create a backup" juju switch controller # create-backup only works from controller model @@ -44,8 +44,8 @@ run_basic_backup_restore() { juju switch test-basic-backup-restore juju add-unit ubuntu-lite wait_for_machine_agent_status "1" "started" - juju status --format json | jq '.machines | length' | check 2 - id1=$(juju status --format json | jq -r '.machines["1"]["instance-id"]') + juju status --format json | yq '.machines | length' | check 2 + id1=$(juju status --format json | yq -r '.machines["1"]["instance-id"]') echo "Restore the backup" juju switch controller @@ -56,13 +56,13 @@ run_basic_backup_restore() { echo "Ensure there's only one machine (state before the backup)" juju switch test-basic-backup-restore wait_for "ubuntu-lite" "$(idle_condition "ubuntu-lite")" - juju status --format json | jq '.machines | length' | check 1 + juju status --format json | yq '.machines | length' | check 1 # Only do this check if provider is LXD (too hard to do for all providers) if [ "${BOOTSTRAP_PROVIDER}" == "lxd" ]; then echo "Ensure that both instances are running (restore shouldn't terminate machines)" - lxc list --format json | jq --arg name "${id0}" -r '.[] | select(.name==$name) | .state.status' | check Running - lxc list --format json | jq --arg name "${id1}" -r '.[] | select(.name==$name) | .state.status' | check Running + lxc list --format json | name="${id0}" yq -r '.[] | select(.name==env(name)) | .state.status' | check Running + lxc list --format json | name="${id1}" yq -r '.[] | select(.name==env(name)) | .state.status' | check Running fi destroy_model "test-basic-backup-restore"
tests/suites/backup/task.sh+1 −1 modified@@ -7,7 +7,7 @@ test_backup() { set_verbosity echo "==> Checking for dependencies" - check_dependencies juju jq + check_dependencies juju yq file="${TEST_DIR}/test-backup-restore.log"
tests/suites/bootstrap/streams.sh+6 −2 modified@@ -12,7 +12,6 @@ run_simplestream_metadata() { --prevent-fallback \ -d "./tests/suites/bootstrap/streams/" - add_clean_func "kill_server" start_server "./tests/suites/bootstrap/streams/tools" # Find a routable address to the server that isn't the loopback address. @@ -27,14 +26,19 @@ run_simplestream_metadata() { name="test-bootstrap-stream" + local extra_opts + if [[ ${JUJUD_VERSION} == "3.6.14" ]]; then + extra_opts="--config juju-db-snap-channel=4.4/stable" + fi + file="${TEST_DIR}/test-bootstrap-stream.log" juju bootstrap "lxd" "${name}" \ --show-log \ --config agent-metadata-url="http://${server_address}:8666/" \ --config test-mode=true \ --add-model=default \ --bootstrap-series="${BOOTSTRAP_SERIES}" \ - --agent-version="${JUJUD_VERSION}" 2>&1 | OUTPUT "${file}" + --agent-version="${JUJUD_VERSION}" ${extra_opts:-} 2>&1 | OUTPUT "${file}" echo "${name}" >>"${TEST_DIR}/jujus" juju deploy jameinel-ubuntu-lite
tests/suites/branches/active_branch.sh+4 −4 modified@@ -12,7 +12,7 @@ run_indicate_active_branch_no_active() { check_not_contains "$(juju status)" "Branch" - if [ "$(juju status --format=json | jq '.branches')" != null ]; then + if [ "$(juju status --format=json | yq '.branches')" != null ]; then echo "The status shows branches even though we do not use them yet" exit 1 fi @@ -35,7 +35,7 @@ run_indicate_active_branch_active() { check_contains "$(juju status)" "bla\*" - if [ "$(juju status --format=json | jq '.branches.bla.active')" != true ]; then + if [ "$(juju status --format=json | yq '.branches.bla.active')" != true ]; then echo "The status does not show active branch" exit 1 fi @@ -49,8 +49,8 @@ run_indicate_active_branch_active() { check_contains "$(juju status)" "testtest\*" check_not_contains "$(juju status)" "bla\*" - STATUS_UNACTIVE=$(juju status --format=json | jq '.branches.bla.active') - STATUS_ACTIVE=$(juju status --format=json | jq '.branches.testtest.active') + STATUS_UNACTIVE=$(juju status --format=json | yq '.branches.bla.active') + STATUS_ACTIVE=$(juju status --format=json | yq '.branches.testtest.active') if [ "${STATUS_UNACTIVE}" != null ]; then echo "The status shows active branch"
tests/suites/caasadmission/admission.sh+2 −2 modified@@ -47,7 +47,7 @@ EOF bearer_token=$(kubectl --kubeconfig "${KUBE_CONFIG}" create token "${name}" -n "$namespace") - kubectl --kubeconfig "${KUBE_CONFIG}" config view --raw -o json | jq "del(.users[0]) | .contexts[0].context.user = \"test\" | .users[0] = {\"name\": \"test\", \"user\": {\"token\": \"$bearer_token\"}}" >"${TEST_DIR}"/kube-sa.json + kubectl --kubeconfig "${KUBE_CONFIG}" config view --raw -o json | yq "del(.users[0]) | .contexts[0].context.user = \"test\" | .users[0] = {\"name\": \"test\", \"user\": {\"token\": \"$bearer_token\"}}" >"${TEST_DIR}"/kube-sa.json # Wait for the model operator to be ready echo "waiting for modeloperator to become available" @@ -129,7 +129,7 @@ EOF bearer_token=$(kubectl --kubeconfig "${KUBE_CONFIG}" create token "${name}" -n "$namespace") - kubectl --kubeconfig "${TEST_DIR}"/kube.conf config view --raw -o json | jq "del(.users[0]) | .contexts[0].context.user = \"test\" | .users[0] = {\"name\": \"test\", \"user\": {\"token\": \"$bearer_token\"}}" >"${TEST_DIR}"/kube-sa.json + kubectl --kubeconfig "${TEST_DIR}"/kube.conf config view --raw -o json | yq "del(.users[0]) | .contexts[0].context.user = \"test\" | .users[0] = {\"name\": \"test\", \"user\": {\"token\": \"$bearer_token\"}}" >"${TEST_DIR}"/kube-sa.json # Wait for the model operator to be ready echo "waiting for modeloperator to become available"
tests/suites/caasadmission/task.sh+1 −1 modified@@ -11,7 +11,7 @@ test_caasadmission() { echo "==> Checking for dependencies" check_dependencies petname - microk8s config >"${TEST_DIR}"/kube.conf + kubectl config view --raw --flatten >"${TEST_DIR}"/kube.conf export KUBE_CONFIG="${TEST_DIR}"/kube.conf test_controller_model_admission
tests/suites/charmhub/find.sh+1 −1 modified@@ -16,7 +16,7 @@ run_charmhub_find_json() { echo # There should always be 1 charm with ubuntu in the name, # charms should always have at least 1 supported base. - output=$(juju find ubuntu --format json | jq '.[0].supports | length') + output=$(juju find ubuntu --format json | yq '.[0].supports | length') check_gt "${output}" "0" }
tests/suites/charmhub/info.sh+1 −1 modified@@ -36,7 +36,7 @@ run_charmhub_info_config() { run_charmhub_info_json() { echo - output=$(juju info ubuntu --format json | jq .charm.config.Options.hostname.Type) + output=$(juju info ubuntu --format json | yq .charm.config.Options.hostname.Type) check_contains "${output}" "string" }
tests/suites/ck/ck.sh+2 −2 modified@@ -71,7 +71,7 @@ run_deploy_ck() { # And on AWS, the maximum number of tags per resource is 50. # Then we will get `Error while granting requests (TagLimitExceeded); check credentials and debug-log` error in next test run. # So we purge the subnet tags here in advance as a workaround. - integrator_app_name=$(cat "$overlay_path" | yq '.applications | keys | .[] | select(.== "*integrator")') + integrator_app_name=$(cat "$overlay_path" | yq 'select(.applications) | .applications | keys | .[] | select(.== "*integrator")') juju --show-log run "$integrator_app_name/leader" --wait=10m purge-subnet-tags } @@ -104,7 +104,7 @@ run_deploy_caas_workload() { model_name="test-${name}" file="${TEST_DIR}/${model_name}.log" - controller_name=$(juju controllers --format json | jq -r '.controllers | keys[0]') + controller_name=$(juju controllers --format json | yq -r 'select(.controllers) | .controllers | keys[0]') juju add-k8s "${k8s_cloud_name}" --storage "${storage}" --controller "${controller_name}" 2>&1 | OUTPUT "${file}" juju_add_model "${model_name}" "${k8s_cloud_name}" "${controller_name}" "${file}"
tests/suites/cli/block.sh+2 −2 modified@@ -63,7 +63,7 @@ run_block_all() { # juju status and offers should still work when 'all' commands # are disabled. - juju status --format json | jq '.applications | .["ubuntu"] | .exposed' | check true + juju status --format json | yq '.applications | .["ubuntu"] | .exposed' | check true juju offers | grep -q 'Offer' || true juju enable-ha | grep -q 'the operation has been blocked' || true @@ -79,7 +79,7 @@ run_block_all() { wait_for "ntp" "$(idle_subordinate_condition "ntp" "ubuntu" 0)" juju unexpose ubuntu - juju status --format json | jq '.applications | .["ubuntu"] | .exposed' | check false + juju status --format json | yq '.applications | .["ubuntu"] | .exposed' | check false destroy_model "${model_name}" }
tests/suites/cli/display_clouds.sh+21 −22 modified@@ -2,24 +2,24 @@ run_show_clouds() { echo mkdir -p "${TEST_DIR}/juju" - echo "" >>"${TEST_DIR}/juju/public-clouds.yaml" - echo "" >>"${TEST_DIR}/juju/credentials.yaml" + touch "${TEST_DIR}/juju/public-clouds.yaml" + touch "${TEST_DIR}/juju/credentials.yaml" - OUT=$(JUJU_DATA="${TEST_DIR}/juju" juju clouds --client --format=json | jq '.[] | select(.defined != "built-in")') + OUT=$(JUJU_DATA="${TEST_DIR}/juju" juju clouds --client --format=json | yq '.[] | select(.defined != "built-in")') if [ -n "${OUT}" ]; then echo "expected empty, got ${OUT}" exit 1 fi cp ./tests/suites/cli/clouds/public-clouds.yaml "${TEST_DIR}"/juju/public-clouds.yaml - OUT=$(JUJU_DATA="${TEST_DIR}/juju" juju clouds --client --format=json | jq '.[] | select(.defined != "built-in")') + OUT=$(JUJU_DATA="${TEST_DIR}/juju" juju clouds --client --format=json | yq '.[] | select(.defined != "built-in")') if [ -n "${OUT}" ]; then echo "expected empty, got ${OUT}" exit 1 fi EXPECTED=$( - cat <<'EOF' + yq -o=json <<'EOF' { "defined": "public", "type": "ec2", @@ -38,7 +38,7 @@ run_show_clouds() { EOF ) - OUT=$(JUJU_DATA="${TEST_DIR}/juju" juju clouds --all --format=json | jq '.[] | select(.defined != "built-in")') + OUT=$(JUJU_DATA="${TEST_DIR}/juju" juju clouds --all --format=json | yq -o=json '.[] | select(.defined != "built-in")') if [ "${OUT}" != "${EXPECTED}" ]; then echo "expected ${EXPECTED}, got ${OUT}" exit 1 @@ -49,18 +49,18 @@ run_assess_clouds() { echo mkdir -p "${TEST_DIR}/juju" - echo "" >>"${TEST_DIR}/juju/public-clouds.yaml" - echo "" >>"${TEST_DIR}/juju/credentials.yaml" + touch "${TEST_DIR}/juju/public-clouds.yaml" + touch "${TEST_DIR}/juju/credentials.yaml" - CLOUD_LIST=$(JUJU_DATA="${TEST_DIR}/juju" juju clouds --client --format=json | jq 'with_entries(select(.value.defined != "built-in"))') + CLOUD_LIST=$(JUJU_DATA="${TEST_DIR}/juju" juju clouds --client --format=json | yq -o=json 'with_entries(select(.value.defined != "built-in"))') EXPECTED={} if [ "${CLOUD_LIST}" != "${EXPECTED}" ]; then echo "expected ${EXPECTED}, got ${CLOUD_LIST}" exit 1 fi CLOUDS=$( - cat <<'EOF' + yq <<'EOF' clouds: finfolk-vmaas: auth-types: @@ -78,22 +78,21 @@ run_assess_clouds() { EOF ) - echo "${CLOUDS}" >>"${TEST_DIR}/juju/clouds.yaml" - CLOUD_LIST=$(JUJU_DATA="${TEST_DIR}/juju" juju clouds --client --format=json | jq -S 'with_entries(select( + echo "${CLOUDS}" >"${TEST_DIR}/juju/clouds.yaml" + CLOUD_LIST=$(JUJU_DATA="${TEST_DIR}/juju" juju clouds --client --format=json | yq -o=json 'with_entries(select( .value.defined != "built-in")) | with_entries((select(.value.defined == "local") - | del(.value.defined) | del(.value.description)))') - EXPECTED=$(echo "${CLOUDS}" | yq -o=json | jq -S '.[] | del(.clouds) | .[] |= ({endpoint} as $endpoint | .[] |= walk( - (objects | select(contains($endpoint))) |= del(.endpoint) - ))') + | del(.value.defined) | del(.value.description))) | sort_keys(..)') + EXPECTED=$(echo "${CLOUDS}" | yq -o=json '.[] | del(.clouds) | .[] |= (.endpoint as $ep + | del(.regions[].endpoint | select(. == $ep))) | sort_keys(..)') if [ "${CLOUD_LIST}" != "${EXPECTED}" ]; then echo "expected ${EXPECTED}, got ${CLOUD_LIST}" exit 1 fi - CLOUD_LIST=$(JUJU_DATA="${TEST_DIR}/juju" juju show-cloud finfolk-vmaas --format json --client | jq -S '.[] | with_entries((select(.value!= null)))') + CLOUD_LIST=$(JUJU_DATA="${TEST_DIR}/juju" juju show-cloud finfolk-vmaas --format json --client | yq -o=json '.[] | with_entries((select(.value!= null))) | sort_keys(..)') EXPECTED=$( - cat <<'EOF' | jq -S - { + yq -o=json 'sort_keys(..)' <<'EOF' +{ "auth-types": [ "oauth1" ], @@ -103,7 +102,7 @@ EOF "name": "finfolk-vmaas", "summary": "Client cloud \"finfolk-vmaas\"", "type": "maas" - } +} EOF ) @@ -117,10 +116,10 @@ run_controller_clouds() { echo juju add-cloud my-ec2 -f "./tests/suites/cli/clouds/myclouds.yaml" --force --controller ${BOOTSTRAPPED_JUJU_CTRL_NAME} - OUT=$(juju clouds --controller ${BOOTSTRAPPED_JUJU_CTRL_NAME} --format=json | jq '.[]') + OUT=$(juju clouds --controller ${BOOTSTRAPPED_JUJU_CTRL_NAME} --format=json | yq -o=json '.[]') EXPECTED=$( - cat <<'EOF' + yq -o=json <<'EOF' { "defined": "public", "type": "ec2",
tests/suites/cli/local_charms.sh+14 −16 modified@@ -15,13 +15,13 @@ run_deploy_local_charm_revision() { # Initialise a git repo to check the commit SHA is used as the charm version. create_local_git_and_commit_all - SHA_OF_UBUNTU_PLUS=\"$(git describe --dirty --always)\" + SHA_OF_UBUNTU_PLUS=$(git describe --dirty --always) # Deploy from directory. juju deploy . - wait_for "ubuntu-plus" ".applications | keys[0]" - CURRENT_CHARM_SHA=$(juju status --format=json | jq '.applications."ubuntu-plus"."charm-version"') + wait_for "ubuntu-plus" "select(.applications) | .applications | keys[0]" + CURRENT_CHARM_SHA=$(juju status --format=json | yq -r '.applications."ubuntu-plus"."charm-version"') if [ "${SHA_OF_UBUNTU_PLUS}" != "${CURRENT_CHARM_SHA}" ]; then echo "The expected sha does not equal the ntp SHA" @@ -65,14 +65,14 @@ run_deploy_local_charm_revision_no_vcs_but_version_file() { cp -r "$CURRENT_DIR/../testcharms/charms/ubuntu-plus" "${TMP}" cd "${TMP}/ubuntu-plus" || exit 1 - VERSION_OUTPUT=\""$(cat version | sed 's/.* //')"\" + VERSION_OUTPUT="$(cat version | sed 's/.* //')" CURRENT_DIRECTORY=$(pwd) # this is done relative because we expect that the output will be absolute in the end. OUTPUT=$(juju deploy --debug . 2>&1) - wait_for "ubuntu-plus" ".applications | keys[0]" - CURRENT_CHARM_SHA=$(juju status --format=json | jq '.applications."ubuntu-plus"."charm-version"') + wait_for "ubuntu-plus" "select(.applications) | .applications | keys[0]" + CURRENT_CHARM_SHA=$(juju status --format=json | yq '.applications."ubuntu-plus"."charm-version"') if [ "${VERSION_OUTPUT}" != "${CURRENT_CHARM_SHA}" ]; then echo "The expected sha does not equal the ubuntu-plus SHA. Current sha: ${CURRENT_CHARM_SHA} expected sha: ${VERSION_OUTPUT}" @@ -100,24 +100,24 @@ run_deploy_local_charm_revision_relative_path() { # Initialise a git repo and commit everything so that commit SHA is used as the charm version. create_local_git_and_commit_all - SHA_OF_UBUNTU_PLUS=\"$(git describe --dirty --always)\" + SHA_OF_UBUNTU_PLUS=$(git describe --dirty --always) # Create git directory outside the charm directory cd .. create_local_git_folder - SHA_OF_TMP=\"$(git describe --dirty --always)\" + SHA_OF_TMP=$(git describe --dirty --always) # state: there is a git repo in the current directory, $TMP, but the correct # git repo is in $TMP/ubuntu-plus. juju deploy ./ubuntu-plus 2>&1 cd "${TMP}/ubuntu-plus" || exit 1 - SHA_OF_UBUNTU_PLUS=\"$(git describe --dirty --always)\" + SHA_OF_UBUNTU_PLUS=$(git describe --dirty --always) - wait_for "ubuntu-plus" ".applications | keys[0]" + wait_for "ubuntu-plus" "select(.applications) | .applications | keys[0]" # We still expect the SHA to be the one from the place we deploy and not the CWD, which in this case has no SHA - CURRENT_CHARM_SHA=$(juju status --format=json | jq '.applications."ubuntu-plus"."charm-version"') + CURRENT_CHARM_SHA=$(juju status --format=json | yq -r '.applications."ubuntu-plus"."charm-version"') if [ "${SHA_OF_TMP}" = "${CURRENT_CHARM_SHA}" ]; then echo "The expected sha should not equal the tmp SHA. Current sha: ${CURRENT_CHARM_SHA}" @@ -148,19 +148,17 @@ run_deploy_local_charm_revision_invalid_git() { # Initialise a git repo and commit everything so that commit SHA is used as the charm version. create_local_git_and_commit_all - SHA_OF_UBUNTU_PLUS=\"$(git describe --dirty --always)\" - - WANTED_CHARM_SHA=\"$(git describe --dirty --always)\" + WANTED_CHARM_SHA=$(git describe --dirty --always) # We cd into a folder without git, add an unrelated repo there. cd "${TMP}" || exit 1 create_local_git_folder # Deploy from the correct repo juju deploy "${TMP_CHARM_GIT}"/ubuntu-plus - wait_for "ubuntu-plus" ".applications | keys[0]" + wait_for "ubuntu-plus" "select(.applications) | .applications | keys[0]" # We still expect the SHA to be the one from the place we deploy and not the CWD, which in this case has no SHA. - CURRENT_CHARM_SHA=$(juju status --format=json | jq '.applications."ubuntu-plus"."charm-version"') + CURRENT_CHARM_SHA=$(juju status --format=json | yq -r '.applications."ubuntu-plus"."charm-version"') if [ "${WANTED_CHARM_SHA}" != "${CURRENT_CHARM_SHA}" ]; then echo "The expected sha does not equal the ubuntu-plus SHA. Current sha: ${CURRENT_CHARM_SHA} expected sha: ${WANTED_CHARM_SHA}" exit 1
tests/suites/cli/model_defaults.sh+5 −5 modified@@ -25,9 +25,9 @@ EOF run_model_defaults_boolean() { echo - juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" automatically-retry-hooks --format=json | jq '."automatically-retry-hooks"."default"' | grep '^true$' + juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" automatically-retry-hooks --format=json | yq '."automatically-retry-hooks"."default"' | grep '^true$' juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" automatically-retry-hooks=false - juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" automatically-retry-hooks --format=json | jq '."automatically-retry-hooks"."controller"' | grep '^false$' + juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" automatically-retry-hooks --format=json | yq '."automatically-retry-hooks"."controller"' | grep '^false$' juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" | grep -E 'automatically-retry-hooks +true +false' juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" automatically-retry-hooks --format=yaml | grep 'default: true' juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" automatically-retry-hooks --format=yaml | grep 'controller: false' @@ -36,10 +36,10 @@ run_model_defaults_boolean() { run_model_defaults_region_aws() { echo - juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" --format=json test-mode | jq '."test-mode"."default"' + juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" --format=json test-mode | yq '."test-mode"."default"' juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" --format=yaml aws/ca-central-1 test-mode=true - juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" --format=json aws/ca-central-1 test-mode | jq '."test-mode".regions[0].value' | grep '^true$' - juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" --format=json test-mode | jq '."test-mode".regions[]|select(.name=="ca-central-1").value' | grep '^true$' + juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" --format=json aws/ca-central-1 test-mode | yq '."test-mode".regions[0].value' | grep '^true$' + juju model-defaults --cloud "${BOOTSTRAPPED_CLOUD}" --format=json test-mode | yq '."test-mode".regions[]|select(.name=="ca-central-1").value' | grep '^true$' } test_model_defaults() {
tests/suites/cloud_gce/create_storage_pool.sh+3 −3 modified@@ -9,10 +9,10 @@ run_create_storage_pool() { juju deploy postgresql --channel 14/stable --storage pgdata=20G,mygpd wait_for_machine_agent_status "0" "started" - disk_name="$(juju status --format=json | jq -r '.storage.volumes["0"]."provider-id"')" + disk_name="$(juju status --format=json | yq -r '.storage.volumes["0"]."provider-id"')" disk_info="$(gcloud compute disks list --filter="name=$disk_name" --format=json)" - test 20 -eq "$(jq -r '.[0].sizeGb' <<<"$disk_info")" - jq -r '.[0].type' <<<"$disk_info" | grep "/pd-ssd$" + test 20 -eq "$(yq -r '.[0].sizeGb' <<<"$disk_info")" + yq -r '.[0].type' <<<"$disk_info" | grep "/pd-ssd$" # TODO: use `--force` for `destroy-model` as a temporary workaround. # This avoids hangs caused by problems detaching storage during model teardown.
tests/suites/cloud_gce/pro_images.sh+1 −1 modified@@ -8,7 +8,7 @@ run_pro_images() { juju metadata add-image --base ubuntu@24.04 ubuntu-pro-2404-noble-amd64-v20250805 --stream pro juju deploy ubuntu wait_for "ubuntu" "$(idle_condition "ubuntu")" - pro_status=$(juju ssh 0 'sudo pro status --format json | jq -r .attached') + pro_status=$(juju ssh 0 'sudo pro status --format json' | yq -r .attached) check_contains "$pro_status" "true" destroy_model "test-pro-images"
tests/suites/cloud_gce/service_account.sh+11 −16 modified@@ -6,23 +6,21 @@ run_serviceaccount_credential() { export BOOTSTRAP_ADDITIONAL_ARGS="${BOOTSTRAP_ADDITIONAL_ARGS:-} --bootstrap-constraints=instance-role=auto" bootstrap "test-serviceaccount-gce" "${file}" - projectServiceAccount=$(gcloud compute project-info describe --format json | jq -r .defaultServiceAccount) + projectServiceAccount=$(gcloud compute project-info describe --format json | yq -r .defaultServiceAccount) if [[ $projectServiceAccount == null ]]; then projectInfo=$(gcloud compute project-info describe) printf "Could not find project default service account:\n%s" "${projectInfo}" >&2 exit 1 fi - credServiceAccount=$(juju show-credential --controller "$BOOTSTRAPPED_JUJU_CTRL_NAME" | yq '.controller-credentials .google .default .content .service-account') - chk=$(echo "${credServiceAccount}" | grep "${projectServiceAccount}" || true) + credAuthType=$(juju show-credential --controller "$BOOTSTRAPPED_JUJU_CTRL_NAME" | yq '.controller-credentials .google .credentials .content .auth-type') + chk=$(echo "${credAuthType}" | grep "service-account" || true) if [[ -z ${chk} ]]; then - printf "Expected project service account \"%s\" not found in controller credential\n" "${projectServiceAccount}" >&2 - accountInfo=$(gcloud compute project-info describe --format yaml) - printf "Google account info:\n%s\n" "${accountInfo}" >&2 + printf "Expected project service account auth type not found in controller credential" credentialInfo=$(juju show-credential --controller "$BOOTSTRAPPED_JUJU_CTRL_NAME") printf "Controller credential info:\n%s\n" "${credentialInfo}" >&2 return 1 else - echo "Success: \"${projectServiceAccount}\" found" >&2 + echo 'Success: auth type "service-account" found' fi juju switch "test-serviceaccount-gce" @@ -38,7 +36,7 @@ run_serviceaccount_credential() { instId=$(juju show-machine $m | yq '.machines .'"$m"' .instance-id') echo "Checking service account for machine ${m} with inst id ${instId}" az=$(juju show-machine $m | yq '.machines .'"$m"' .hardware' | awk '{ delete vars; for(i = 1; i <= NF; ++i) { n = index($i, "="); if(n) { vars[substr($i, 1, n - 1)] = substr($i, n + 1) } } az = vars["availability-zone"] } { print az }') - instServiceAccount=$(gcloud compute instances describe --zone "${az}" "${instId}" --format json | jq -r '.serviceAccounts[0].email') + instServiceAccount=$(gcloud compute instances describe --zone "${az}" "${instId}" --format json | yq -r '.serviceAccounts[0].email') if [[ $instServiceAccount == null ]]; then instInfo=$(gcloud compute instances describe --zone "${az}" "${instId}") printf "Could not find instance %s service account:\n%s" "${instId}" "${instInfo}" >&2 @@ -56,14 +54,11 @@ test_serviceaccount_credential() { return fi - setup_gcloudcli_credential + ( + set_verbosity - set_verbosity + cd .. || exit - echo "==> Checking for dependencies" - check_dependencies juju gcloud - - cd .. || exit - - run "run_serviceaccount_credential" "$@" + run "run_serviceaccount_credential" "$@" + ) }
tests/suites/cloud_gce/task.sh+7 −7 modified@@ -18,16 +18,16 @@ test_cloud_gce() { file="${TEST_DIR}/test-cloud-gce.log" - bootstrap "test-cloud-gce" "${file}" + if [ -z "$(skip 'test_pro_images' 'test_deploy_gpu_instance' 'test_create_storage_pool')" ]; then + bootstrap "test-cloud-gce" "${file}" - test_pro_images - test_deploy_gpu_instance + test_pro_images + test_deploy_gpu_instance + test_create_storage_pool - test_create_storage_pool - - destroy_controller "test-cloud-gce" + destroy_controller "test-cloud-gce" + fi # This test bootstraps a custom controller. test_serviceaccount_credential - }
tests/suites/cmr/offer_consume.sh+16 −24 modified@@ -28,7 +28,7 @@ run_offer_consume() { wait_for "dummy-source" "$(idle_condition "dummy-source")" echo "Check list-offer output" - juju list-offers --format=json | jq -r 'has("dummy-offer")' | check true + juju list-offers --format=json | yq -r 'has("dummy-offer")' | check true echo "Deploy workload in consume model" juju add-model "model-consume" @@ -38,7 +38,7 @@ run_offer_consume() { wait_for "dummy-sink" "$(idle_condition "dummy-sink")" echo "Check find-offer output" - juju find-offers --format=json | jq -r "has(\"${BOOTSTRAPPED_JUJU_CTRL_NAME}:admin/model-offer.dummy-offer\")" | check true + juju find-offers --format=json | yq -r "has(\"${BOOTSTRAPPED_JUJU_CTRL_NAME}:admin/model-offer.dummy-offer\")" | check true echo "Relate workload in consume model with offer" juju consume "${BOOTSTRAPPED_JUJU_CTRL_NAME}:admin/model-offer.dummy-offer" @@ -83,7 +83,7 @@ run_offer_consume_cross_controller() { file="${TEST_DIR}/test-offer-consume-cross-controller.log" ensure "model-offer" "${file}" - offer_controller="$(juju controllers --format=json | jq -r '."current-controller"')" + offer_controller="$(juju controllers --format=json | yq -r '."current-controller"')" # Ensure we have another controller available. echo "Bootstrap consume offer controller" @@ -172,8 +172,8 @@ run_offer_find_non_admin() { # (the non-admin user has no models of their own). JUJU_MODEL="test-offer-find:admin/model-offer-find" JUJU_DATA=/tmp/offeruser \ - juju find-offers --format=json | - jq -r 'has("test-offer-find:admin/model-offer-find.dummy-offer")' | + juju find-offers --format=yaml | + yq -r 'keys | .[] | select(. == "test-offer-find:admin/model-offer-find.dummy-offer")' | check true echo "Clean up" @@ -191,32 +191,26 @@ run_offer_find_non_admin() { run_offer_find_external_user() { echo - # Build the test identity provider binary (CWD is repo root via "cd .."). - TEST_IDP_TMPDIR=$(mktemp -d) - TEST_IDP_BIN="${TEST_IDP_TMPDIR}/test-identity-provider" - MAIN_SH_DIR="$(dirname "$(readlink -f "$0")")" - go build -o "${TEST_IDP_BIN}" "${MAIN_SH_DIR}/tests/tools/test-identity-provider/" - # Start the discharger in the background; wait for it to write its two # output lines (URL then public key). IDP_OUTPUT="${TEST_DIR}/idp-output.txt" - "${TEST_IDP_BIN}" --username testextuser >"${IDP_OUTPUT}" 2>&1 & - IDP_PID=$! + go run -exec "$(track_daemon_exec_trampoline)" \ + github.com/juju/juju/tests/tools/test-identity-provider \ + --username testextuser >"${IDP_OUTPUT}" 2>&1 & IDP_URL="" IDP_PUBKEY="" - for _ in $(seq 1 20); do + for i in $(seq 1 20); do if [[ $(wc -l <"${IDP_OUTPUT}" 2>/dev/null) -ge 2 ]]; then IDP_URL=$(sed -n '1p' "${IDP_OUTPUT}") IDP_PUBKEY=$(sed -n '2p' "${IDP_OUTPUT}") break fi - sleep 0.5 + sleep $i done if [[ -z ${IDP_URL} || -z ${IDP_PUBKEY} ]]; then echo "ERROR: test identity provider failed to start" - kill "${IDP_PID}" 2>/dev/null || true exit 1 fi echo "Identity provider running at ${IDP_URL}" @@ -243,27 +237,25 @@ run_offer_find_external_user() { # Retrieve one of the API endpoints for the external-user login step. CTRL_ENDPOINT=$(juju show-controller ctrl-extuser-idp --format=json | - jq -r '."ctrl-extuser-idp".details."api-endpoints"[0]') + yq -r '."ctrl-extuser-idp".details."api-endpoints"[0]') echo "Login as testextuser@external (auto-approved by test identity provider)" - rm -rf /tmp/extuser - mkdir -p /tmp/extuser + TEST_EXTUSER_DIR="$(mktemp -d)" # --no-prompt suppresses all interactive input including the CA cert trust # prompt; --trust auto-approves the self-signed controller certificate. # The bakery discharger auto-approves the login, so no browser is needed. - JUJU_DATA=/tmp/extuser juju login "${CTRL_ENDPOINT}" \ + JUJU_DATA="${TEST_EXTUSER_DIR}" juju login "${CTRL_ENDPOINT}" \ -c ctrl-extuser-idp --no-prompt --trust echo "Check find-offers output as external user" - JUJU_MODEL="ctrl-extuser-idp:admin/model-offer-ext" JUJU_DATA=/tmp/extuser \ + JUJU_MODEL="ctrl-extuser-idp:admin/model-offer-ext" JUJU_DATA="${TEST_EXTUSER_DIR}" \ juju find-offers --format=json | - jq -r 'has("ctrl-extuser-idp:admin/model-offer-ext.dummy-offer")' | + yq -r 'has("ctrl-extuser-idp:admin/model-offer-ext.dummy-offer")' | check true echo "Clean up" - kill "${IDP_PID}" 2>/dev/null || true - rm -rf /tmp/extuser "${TEST_IDP_TMPDIR}" + rm -rf "${TEST_EXTUSER_DIR}" destroy_controller "ctrl-extuser-idp" }
tests/suites/cmr/task.sh+1 −1 modified@@ -12,7 +12,7 @@ test_cmr() { # Only bootstrap the shared controller when at least one test that uses it # will run. This avoids an unnecessary bootstrap/destroy cycle when only # test_offer_find_external_user is selected (it manages its own controller). - if [ -z "$(skip 'test_offer_consume')" ] || [ -z "$(skip 'test_offer_find_non_admin')" ]; then + if [ -z "$(skip 'test_offer_consume' 'test_offer_find_non_admin')" ]; then file="${TEST_DIR}/test-cmr.log" bootstrap "test-cmr" "${file}"
tests/suites/constraints/constraints_aws.sh+2 −2 modified@@ -24,11 +24,11 @@ run_constraints_aws() { wait_for_machine_agent_status "1" "started" echo "Ensure machine 0 has 2 cores" - machine0_hardware=$(juju machines --format json | jq -r '.["machines"]["0"]["hardware"]') + machine0_hardware=$(juju machines --format json | yq -r '.["machines"]["0"]["hardware"]') check_contains "${machine0_hardware}" "cores=2" echo "Ensure machine 1 uses the correct AMI ID from image-id constraint" - machine_instance_id=$(juju show-machine --format json | jq -r '.["machines"]["1"]["instance-id"]') + machine_instance_id=$(juju show-machine --format json | yq -r '.["machines"]["1"]["instance-id"]') aws ec2 describe-instances --instance-ids "${machine_instance_id}" --query 'Reservations[0].Instances[0].ImageId' --output text | check "${ami_id}" destroy_model "${name}"
tests/suites/constraints/constraints_gce.sh+5 −5 modified@@ -20,7 +20,7 @@ test_gce_pro_image() { --filter="${name_filter}" \ --sort-by=~creationTimestamp \ --limit=1 \ - --format=json | jq -r '.[0].selfLink | split("/") | .[-1]')" + --format=json | yq -r '.[0].selfLink | split("/") | .[-1]')" # Switch to the pro image stream juju model-config image-stream=pro @@ -32,17 +32,17 @@ test_gce_pro_image() { juju add-machine --base "ubuntu@${release_version}" --constraints "image-id=${image_id}" machine_info="$(juju list-machines --format=json)" - machine_id=$(jq -r --arg ch "$release_version" \ - '.machines | to_entries[] | select(.value.base.channel==$ch) | .key' <<<"$machine_info") + machine_id=$(ch="$release_version" yq -r \ + '.machines | to_entries[] | select(.value.base.channel==env(ch)) | .key' <<<"$machine_info") wait_for_machine_agent_status "$machine_id" "started" # Refresh machine info and verify the actual instance uses the expected image machine_info="$(juju list-machines --format=json)" - instance_id="$(jq -r --arg id "$machine_id" '.machines[$id]."instance-id"' <<<"$machine_info")" + instance_id="$(id="$machine_id" yq -r '.machines[env(id)]."instance-id"' <<<"$machine_info")" source_image_id=$(gcloud compute disks list \ --filter="name=$instance_id" \ - --format="json" | jq -r '.[0].sourceImage | split("/")[-1]') + --format="json" | yq -r '.[0].sourceImage | split("/") | .[-1]') test "$image_id" = "$source_image_id" }
tests/suites/constraints/constraints_lxd.sh+2 −2 modified@@ -14,11 +14,11 @@ run_constraints_lxd() { wait_for_machine_agent_status "1" "started" echo "Ensure machine 0 has 2 cores" - machine0_hardware=$(juju machines --format json | jq -r '.["machines"]["0"]["hardware"]') + machine0_hardware=$(juju machines --format json | yq -r '.["machines"]["0"]["hardware"]') check_contains "${machine0_hardware}" "cores=2" echo "Ensure machine 1 has 2 cores and 2G memory" - machine1_hardware=$(juju machines --format json | jq -r '.["machines"]["1"]["hardware"]') + machine1_hardware=$(juju machines --format json | yq -r '.["machines"]["1"]["hardware"]') check_contains "${machine1_hardware}" "cores=2" check_contains "${machine1_hardware}" "mem=2048M"
tests/suites/constraints/constraints_model.sh+1 −1 modified@@ -15,7 +15,7 @@ run_constraints_model_bootstrap() { check_contains "$(juju model-constraints)" "mem=1024M" check_contains "$(juju constraints controller)" "mem=1024M" - case "${BOOTSTRAP_PROVIDER:-}" in + case "${BOOTSTRAP_CLOUD:-}" in "microk8s") ;; *) check_contains "$(juju show-machine 0)" "mem.*1024M"
tests/suites/constraints/constraints_openstack.sh+3 −3 modified@@ -12,7 +12,7 @@ run_constraints_openstack() { # The openstack cluster must contain an image named 'jammy', otherwise # the test cannot run. echo "Ensure there is an image with name 'jammy'" - jammy_id=$(openstack image list -f json --name jammy | jq -r '.[] | .ID') + jammy_id=$(openstack image list -f json --name jammy | yq -r '.[] | .ID') if [[ -z ${jammy_id} ]]; then echo "No image available with name 'jammy' on openstack" exit 1 @@ -23,8 +23,8 @@ run_constraints_openstack() { wait_for_machine_agent_status "0" "started" echo "Ensure machine 0 uses the correct image ID from image-id constraint" - juju_machine_name=$(juju show-machine --format json | jq -r '.["machines"]["0"]["hostname"]') - openstack server list -f json --name ${juju_machine_name} | jq -r '.[] | .Image' | check "jammy" + juju_machine_name=$(juju show-machine --format json | yq -r '.["machines"]["0"]["hostname"]') + openstack server list -f json --name ${juju_machine_name} | yq -r '.[] | .Image' | check "jammy" destroy_model "${name}" }
tests/suites/constraints/constraints.sh+2 −2 modified@@ -25,8 +25,8 @@ test_constraints_common() { "gce") run "run_constraints_gce" ;; - "microk8s") - echo "==> TEST SKIPPED: constraints - there are no test for k8s cloud" + "k8s") + echo "==> TEST SKIPPED: constraints - there are no tests for k8s provider" ;; *) run "run_constraints_vm"
tests/suites/constraints/constraints_vm.sh+2 −2 modified@@ -14,12 +14,12 @@ run_constraints_vm() { wait_for_machine_agent_status "1" "started" echo "Ensure machine 0 has 16G root disk" - machine0_hardware=$(juju machines --format json | jq -r '.["machines"]["0"]["hardware"]') + machine0_hardware=$(juju machines --format json | yq -r '.["machines"]["0"]["hardware"]') machine0_rootdisk=$(echo "$machine0_hardware" | awk '{for(i=1;i<=NF;i++){if($i ~ /root-disk/){print $i}}}') check_ge "${machine0_rootdisk}" "root-disk=16384M" echo "Ensure machine 1 has 4 cores and 16G root disk" - machine1_hardware=$(juju machines --format json | jq -r '.["machines"]["1"]["hardware"]') + machine1_hardware=$(juju machines --format json | yq -r '.["machines"]["1"]["hardware"]') machine1_cores=$(echo "$machine1_hardware" | awk '{for(i=1;i<=NF;i++){if($i ~ /cores/){print $i}}}') machine1_rootdisk=$(echo "$machine1_hardware" | awk '{for(i=1;i<=NF;i++){if($i ~ /root-disk/){print $i}}}') check_ge "${machine1_cores}" "cores=4"
tests/suites/controllercharm/prometheus.sh+14 −14 modified@@ -16,8 +16,8 @@ run_prometheus() { # Check Juju controller is removed from Prometheus targets retry 'check_prometheus_no_target prometheus-k8s 0' 30 # Check no errors in controller charm or Prometheus - juju status -m controller --format json | jq -r "$(active_condition "controller")" | check "controller" - juju status --format json | jq -r "$(active_condition "prometheus-k8s")" | check "prometheus-k8s" + juju status -m controller --format json | yq -r "$(active_condition "controller")" | check "controller" + juju status --format json | yq -r "$(active_condition "prometheus-k8s")" | check "prometheus-k8s" juju remove-application prometheus-k8s --destroy-storage \ --force --no-wait # TODO: remove these flags once storage bug is fixed @@ -53,8 +53,8 @@ run_prometheus_multiple_units() { wait_for "p1" "$(active_condition "p1" 0)" # Check all applications are still healthy - juju status -m controller --format json | jq -r "$(active_condition "controller")" | check "controller" - juju status --format json | jq -r "$(active_condition "p1" 0)" | check "p1" + juju status -m controller --format json | yq -r "$(active_condition "controller")" | check "controller" + juju status --format json | yq -r "$(active_condition "p1" 0)" | check "p1" juju remove-relation p2 controller # Wait until the application p2 settles before health checks @@ -63,15 +63,15 @@ run_prometheus_multiple_units() { # Check Juju controller is removed from Prometheus targets retry 'check_prometheus_no_target p2 0' 30 # Check no errors in controller charm or Prometheus - juju status -m controller --format json | jq -r "$(active_condition "controller")" | check "controller" - juju status --format json | jq -r "$(active_condition "p2" 1)" | check "p2" + juju status -m controller --format json | yq -r "$(active_condition "controller")" | check "controller" + juju status --format json | yq -r "$(active_condition "p2" 1)" | check "p2" juju remove-relation p1 controller # Check Juju controller is removed from Prometheus targets retry 'check_prometheus_no_target p1 0' 30 # Check no errors in controller charm or Prometheus - juju status -m controller --format json | jq -r "$(active_condition "controller")" | check "controller" + juju status -m controller --format json | yq -r "$(active_condition "controller")" | check "controller" # Ensure p1 is still healty wait_for "p1" "$(active_condition "p1" 0)" @@ -88,7 +88,7 @@ run_prometheus_cross_controller() { CONTROLLER_MODEL_NAME="test-prometheus-cmr-ctrlr" file="${TEST_DIR}/${CONTROLLER_MODEL_NAME}.log" bootstrap "${CONTROLLER_MODEL_NAME}" "${file}" - CONTROLLER_NAME=$(juju controllers --format json | jq -r '."current-controller"') + CONTROLLER_NAME=$(juju controllers --format json | yq -r '."current-controller"') # Prometheus must be deployed on k8s. By default, we choose microk8s, but you # can set the K8S_CLOUD environment variable to select a different cluster. @@ -108,8 +108,8 @@ run_prometheus_cross_controller() { # Check Juju controller is removed from Prometheus targets retry 'check_prometheus_no_target prometheus-k8s 0' 30 # Check no errors in controller charm or Prometheus - juju status -m controller --format json | jq -r "$(active_condition "controller")" | check "controller" - juju status --format json | jq -r "$(active_condition "prometheus-k8s")" | check "prometheus-k8s" + juju status -m controller --format json | yq -r "$(active_condition "controller")" | check "controller" + juju status --format json | yq -r "$(active_condition "prometheus-k8s")" | check "prometheus-k8s" juju remove-application prometheus-k8s --destroy-storage \ --force --no-wait --no-prompt # TODO: remove these flags once storage bug is fixed @@ -129,9 +129,9 @@ check_prometheus_targets() { return 1 fi - TARGET_STATUS=$(echo $TARGET | jq -r '.health') + TARGET_STATUS=$(echo $TARGET | yq -r '.health') if [[ $TARGET_STATUS != "up" ]]; then - echo "Controller metrics endpoint status: $TARGET_STATUS: $(echo $TARGET | jq -r '.lastError')" + echo "Controller metrics endpoint status: $TARGET_STATUS: $(echo $TARGET | yq -r '.lastError')" return 1 fi @@ -162,9 +162,9 @@ get_juju_target() { local unit_number=$2 PROM_IP=$(juju status --format json | - jq -r ".applications.\"$app_name\".units.\"$app_name/$unit_number\".address") + yq -r ".applications.\"$app_name\".units.\"$app_name/$unit_number\".address") TARGET=$(curl -sSm 2 "http://${PROM_IP}:9090/api/v1/targets" | - jq '.data.activeTargets[] | select(.labels.juju_application == "controller")') + yq '.data.activeTargets[] | select(.labels.juju_application == "controller")') echo "$TARGET" }
tests/suites/controller/enable_ha.sh+5 −5 modified@@ -3,7 +3,7 @@ wait_for_controller_machines_tear_down() { attempt=0 # shellcheck disable=SC2143 - until [[ "$(juju machines -m controller --format=json | jq -r '.machines | .[] | .["juju-status"] | select(.current == "started") | .current' | wc -l | grep "${amount}")" ]]; do + until [[ "$(juju machines -m controller --format=json | yq -r '.machines | .[] | .["juju-status"] | select(.current == "started") | .current' | wc -l | grep "${amount}")" ]]; do echo "[+] (attempt ${attempt}) polling started machines during ha tear down" juju machines -m controller 2>&1 | sed 's/^/ | /g' || true sleep "${SHORT_TIMEOUT}" @@ -17,7 +17,7 @@ wait_for_controller_machines_tear_down() { attempt=0 # shellcheck disable=SC2143 - until [[ "$(juju machines -m controller --format=json | jq -r '.machines | .[] | .["juju-status"] | select(.current == "stopped") | .current' | wc -l | grep 0)" ]]; do + until [[ "$(juju machines -m controller --format=json | yq -r '.machines | .[] | .["juju-status"] | select(.current == "stopped") | .current' | wc -l | grep 0)" ]]; do echo "[+] (attempt ${attempt}) polling stopped machines during ha tear down" juju machines -m controller 2>&1 | sed 's/^/ | /g' || true sleep "${SHORT_TIMEOUT}" @@ -29,7 +29,7 @@ wait_for_controller_machines_tear_down() { fi done - if [[ "$(juju machines -m controller --format=json | jq -r '.machines | .[] | .["juju-status"] | select(.current == "error") | .current' | wc -l)" -gt 0 ]]; then + if [[ "$(juju machines -m controller --format=json | yq -r '.machines | .[] | .["juju-status"] | select(.current == "error") | .current' | wc -l)" -gt 0 ]]; then echo "machine in controller model with error during ha tear down" juju machines -m controller 2>&1 | sed 's/^/ | /g' || true exit 1 @@ -61,7 +61,7 @@ run_controller_limit_access_in_ha() { case "${BOOTSTRAP_PROVIDER:-}" in "ec2" | "gce") machine_info="$(juju list-machines -m controller --format=json)" - instance_id="$(jq -r '.machines["0"]."instance-id"' <<<"$machine_info")" + instance_id="$(yq -r '.machines["0"]."instance-id"' <<<"$machine_info")" region_or_az=$(region_or_availability_zone) network_tag_or_group=$(instance_network_tag_or_group) @@ -121,7 +121,7 @@ run_enable_ha() { wait_for_controller_machines_tear_down 1 # Ensure that we have no ha enabled machines. - juju show-controller --format=json | jq -r '.[] | .["controller-machines"] | reduce(.[] | select(.["instance-id"] == null)) as $i (0;.+=1)' | grep 0 + juju show-controller --format=json | yq -r '.[] | .["controller-machines"] | (.[] | select(.["instance-id"] == null)) as $i ireduce (0; . + 1)' | grep 0 wait_for_controller_leader
tests/suites/controller/limit_access.sh+12 −12 modified@@ -4,16 +4,16 @@ verify_model_network_tag() { case "${BOOTSTRAP_PROVIDER:-}" in "ec2") sg="$(aws ec2 describe-security-groups --filters Name=group-name,Values="$network_tag")" - jq -r ".SecurityGroups[] |.IpPermissions[] | select(.FromPort == 22) | .IpRanges[0].CidrIp" <<<"${sg}" | check "${sourceRange}" + yq -r ".SecurityGroups[] |.IpPermissions[] | select(.FromPort == 22) | .IpRanges[0].CidrIp" <<<"${sg}" | check "${sourceRange}" ;; "gce") # Ensure only one allowed item, which is ssh port default_rule=$(gcloud compute firewall-rules list \ --filter="targetTags.list():${network_tag}" \ --format=json) - echo "${default_rule}" | jq -r '.[0].allowed[0].ports | length' | check "1" - echo "${default_rule}" | jq -r '.[0].allowed[0].ports[0]' | check "22" - echo "${default_rule}" | jq -r '.[0].sourceRanges[0]' | check "${sourceRange}" + echo "${default_rule}" | yq -r '.[0].allowed[0].ports | length' | check "1" + echo "${default_rule}" | yq -r '.[0].allowed[0].ports[0]' | check "22" + echo "${default_rule}" | yq -r '.[0].sourceRanges[0]' | check "${sourceRange}" ;; *) echo "Aborting, we shouldn't be here" @@ -29,17 +29,17 @@ verify_instance_network_tag() { case "${BOOTSTRAP_PROVIDER:-}" in "ec2") sg="$(aws ec2 describe-security-groups --filters Name=group-name,Values="$network_tag")" - jq -r ".SecurityGroups[] |.IpPermissions[] | select(.FromPort == 17070) | .IpRanges[0].CidrIp" <<<"${sg}" | check "${sourceRange}" - jq -r ".SecurityGroups[] |.IpPermissions[] | select(.FromPort == 17022) | .IpRanges[0].CidrIp" <<<"${sg}" | check "${sourceRange}" + yq -r ".SecurityGroups[] |.IpPermissions[] | select(.FromPort == 17070) | .IpRanges[0].CidrIp" <<<"${sg}" | check "${sourceRange}" + yq -r ".SecurityGroups[] |.IpPermissions[] | select(.FromPort == 17022) | .IpRanges[0].CidrIp" <<<"${sg}" | check "${sourceRange}" ;; "gce") default_rule=$(gcloud compute firewall-rules list \ --filter="targetTags.list():${network_tag}" \ --format=json) - echo "${default_rule}" | jq -r '.[0].allowed[0].ports | length' | check "2" - echo "${default_rule}" | jq -r '.[0].allowed[0].ports[0]' | check "17022" - echo "${default_rule}" | jq -r '.[0].allowed[0].ports[1]' | check "17070" - echo "${default_rule}" | jq -r '.[0].sourceRanges[0]' | check "${sourceRange}" + echo "${default_rule}" | yq -r '.[0].allowed[0].ports | length' | check "2" + echo "${default_rule}" | yq -r '.[0].allowed[0].ports[0]' | check "17022" + echo "${default_rule}" | yq -r '.[0].allowed[0].ports[1]' | check "17070" + echo "${default_rule}" | yq -r '.[0].sourceRanges[0]' | check "${sourceRange}" ;; *) echo "Unexpected bootstrap provider (${BOOTSTRAP_PROVIDER})." @@ -59,9 +59,9 @@ run_limit_access() { wait_for_machine_agent_status "0" "started" machine_info="$(juju list-machines -m controller --format=json)" - instance_id="$(jq -r '.machines["0"]."instance-id"' <<<"$machine_info")" + instance_id="$(yq -r '.machines["0"]."instance-id"' <<<"$machine_info")" region_or_az=$(region_or_availability_zone) - model_uuid=$(juju show-model controller --format json | jq -r '.["controller"]["model-uuid"]') + model_uuid=$(juju show-model controller --format json | yq -r '.["controller"]["model-uuid"]') model_network_tag="juju-${model_uuid}" verify_model_network_tag "${model_network_tag}" "0.0.0.0/0"
tests/suites/controller/mongo_memory_profile.sh+1 −1 modified@@ -18,7 +18,7 @@ run_mongo_memory_profile() { attempt=0 # shellcheck disable=SC2046,SC2143,SC2091 - until $(check_contains "$(cat_mongo_service)" wiredTigerCacheSizeGB >/dev/null 2>&1); do + until check_contains "$(cat_mongo_service)" wiredTigerCacheSizeGB >/dev/null 2>&1; do echo "[+] (attempt ${attempt}) polling mongo service" cat_mongo_service | sed 's/^/ | /g' # This will attempt to wait for 2 minutes before failing out.
tests/suites/controller/query_tracing.sh+2 −2 modified@@ -18,7 +18,7 @@ run_query_tracing_enabled() { attempt=0 # shellcheck disable=SC2046,SC2143,SC2091 - until $(check_contains "$(cat_query_tracing_enabled_agent_conf)" "true" >/dev/null 2>&1); do + until check_contains "$(cat_query_tracing_enabled_agent_conf)" "true" >/dev/null 2>&1; do echo "[+] (attempt ${attempt}) polling agent conf" cat_query_tracing_enabled_agent_conf | sed 's/^/ | /g' # This will attempt to wait for 2 minutes before failing out. @@ -58,7 +58,7 @@ run_query_tracing_threshold() { attempt=0 # shellcheck disable=SC2046,SC2143,SC2091 - until $(check_contains "$(cat_query_tracing_threshold_agent_conf)" "42ms" >/dev/null 2>&1); do + until check_contains "$(cat_query_tracing_threshold_agent_conf)" "42ms" >/dev/null 2>&1; do echo "[+] (attempt ${attempt}) polling agent conf" cat_query_tracing_threshold_agent_conf | sed 's/^/ | /g' # This will attempt to wait for 2 minutes before failing out.
tests/suites/controller/task.sh+9 −0 modified@@ -9,6 +9,15 @@ test_controller() { echo "==> Checking for dependencies" check_dependencies juju + case "${BOOTSTRAP_PROVIDER:-}" in + "ec2") + setup_awscli_credential + ;; + "gce") + setup_gcloudcli_credential + ;; + esac + file="${TEST_DIR}/test-controller.log" bootstrap "test-controller" "${file}"
tests/suites/controller/util.sh+4 −4 modified@@ -71,10 +71,10 @@ remove_access_to_api_port() { region_or_availability_zone() { case "${BOOTSTRAP_PROVIDER:-}" in "ec2") - juju show-model controller --format json | jq -r '.["controller"]["region"]' + juju show-model controller --format json | yq -r '.["controller"]["region"]' ;; "gce") - juju show-machine -m controller 0 --format=json | jq -r '.["machines"]["0"]["hardware"]' | grep -oP 'availability-zone=\K\S+' + juju show-machine -m controller 0 --format=json | yq -r '.["machines"]["0"]["hardware"]' | grep -oP 'availability-zone=\K\S+' ;; *) echo "Unexpected bootstrap provider (${BOOTSTRAP_PROVIDER})." @@ -89,12 +89,12 @@ region_or_availability_zone() { instance_network_tag_or_group() { case "${BOOTSTRAP_PROVIDER:-}" in "ec2") - model_uuid=$(juju show-model controller --format json | jq -r '.["controller"]["model-uuid"]') + model_uuid=$(juju show-model controller --format json | yq -r '.["controller"]["model-uuid"]') echo "juju-${model_uuid}-0" ;; "gce") machine_info="$(juju list-machines -m controller --format=json)" - echo "$(jq -r '.machines["0"]."instance-id"' <<<"$machine_info")" + echo "$(yq -r '.machines["0"]."instance-id"' <<<"$machine_info")" ;; *) echo "Unexpected bootstrap provider (${BOOTSTRAP_PROVIDER})."
tests/suites/coslite/cl.sh+4 −4 modified@@ -15,22 +15,22 @@ run_deploy_coslite() { wait_for 0 "$(not_idle_list) | length" 1800 # run-action will change in 3.0 - admin_passwd=$(juju run grafana/0 get-admin-password --wait=2m --format json | jq '.["unit-grafana-0"]["results"]["admin-password"]') + admin_passwd=$(juju run grafana/0 get-admin-password --wait=2m --format json | yq '.["unit-grafana-0"]["results"]["admin-password"]') if [ -z "$admin_passwd" ]; then echo "expected to get admin password for grafana/0" exit 1 fi echo "check if alertmanager is ready" - alertmanager_ip=$(juju status --format=json | jq -r '.applications.alertmanager.units."alertmanager/0".address') + alertmanager_ip=$(juju status --format=json | yq -r '.applications.alertmanager.units."alertmanager/0".address') check_ready "http://$alertmanager_ip:9093/-/ready" 200 echo "check if grafana is ready" - grafana_ip=$(juju status --format=json | jq -r '.applications.grafana.units."grafana/0".address') + grafana_ip=$(juju status --format=json | yq -r '.applications.grafana.units."grafana/0".address') check_ready "http://$grafana_ip:3000/api/health" 200 echo "check if prometheus is ready" - prometheus_ip=$(juju status --format=json | jq -r '.applications.prometheus.units."prometheus/0".address') + prometheus_ip=$(juju status --format=json | yq -r '.applications.prometheus.units."prometheus/0".address') check_ready "http://$prometheus_ip:9090/-/ready" 200 echo "cos lite tests passed"
tests/suites/credential/add_remove_credential.sh+2 −2 modified@@ -7,13 +7,13 @@ run_add_remove_credential() { JUJU_DATA="${TEST_DIR}/juju" juju add-credential aws -f ./tests/suites/credential/credentials-data/fake-credentials.yaml --client echo "Check fake credential" - JUJU_DATA="${TEST_DIR}/juju" juju credentials aws --format=json | jq -r '."client-credentials"."aws"."cloud-credentials"."fake-credential-name"."details"."access-key"' | check "fake-access-key" + JUJU_DATA="${TEST_DIR}/juju" juju credentials aws --format=json | yq -r '."client-credentials"."aws"."cloud-credentials"."fake-credential-name"."details"."access-key"' | check "fake-access-key" echo "Remove fake credential" JUJU_DATA="${TEST_DIR}/juju" juju remove-credential aws fake-credential-name --client echo "Check fake credential is deleted" - JUJU_DATA="${TEST_DIR}/juju" juju credentials aws --format=json | jq -r '."client-credentials"."aws"."cloud-credentials"."fake-credential-name"."details"."access-key"' | check null + JUJU_DATA="${TEST_DIR}/juju" juju credentials aws --format=json | yq -r '."client-credentials"."aws"."cloud-credentials"."fake-credential-name"."details"."access-key"' | check null }
tests/suites/credential/controller_credentials.sh+2 −2 modified@@ -3,7 +3,7 @@ run_controller_credentials() { juju show-cloud --controller "${BOOTSTRAPPED_JUJU_CTRL_NAME}" aws 2>/dev/null || juju add-cloud --controller "${BOOTSTRAPPED_JUJU_CTRL_NAME}" aws --force juju add-credential aws -f "./tests/suites/credential/credentials-data/fake-credentials.yaml" --controller "${BOOTSTRAPPED_JUJU_CTRL_NAME}" - OUT=$(juju credentials --controller "${BOOTSTRAPPED_JUJU_CTRL_NAME}" --format=json 2>/dev/null | jq '.[].aws."cloud-credentials"') + OUT=$(juju credentials --controller "${BOOTSTRAPPED_JUJU_CTRL_NAME}" --format=json 2>/dev/null | yq '.[].aws."cloud-credentials"') EXPECTED=$( cat <<'EOF' @@ -21,7 +21,7 @@ EOF exit 1 fi - OUT=$(juju credentials --controller ${BOOTSTRAPPED_JUJU_CTRL_NAME} --show-secrets --format=json 2>/dev/null | jq '.[].aws."cloud-credentials"') + OUT=$(juju credentials --controller ${BOOTSTRAPPED_JUJU_CTRL_NAME} --show-secrets --format=json 2>/dev/null | yq '.[].aws."cloud-credentials"') EXPECTED=$( cat <<'EOF' "fake-credential-name": {
tests/suites/dashboard/dashboard.sh+7 −5 modified@@ -36,12 +36,14 @@ test_dashboard_deploy() { } open_dashboard() { - juju dashboard & - PID=$! + push_daemon_scope + local expected_scope_depth + expected_scope_depth=${DAEMON_SCOPE_DEPTH} + # shellcheck disable=SC2064 + trap "pop_daemon_scope ${expected_scope_depth}" RETURN + + daemon juju dashboard sleep 10 # TODO: capture url from dashboard output curl -L http://localhost:31666 | grep "Juju Dashboard" - kill -SIGINT "$PID" - # TODO: why isn't this killing the child ssh process? - # lsof -n -i | grep 31666 }
tests/suites/deploy/deploy_bundles.sh+11 −10 modified@@ -192,13 +192,13 @@ run_deploy_exported_charmhub_bundle_with_float_revisions() { echo "Create telegraf_bundle_without_revisions.yaml with known latest revisions from charmhub" if [[ -n ${MODEL_ARCH} ]]; then - influxdb_rev=$(juju info influxdb --arch="${MODEL_ARCH}" --format json | jq -r '."channels"."latest"."stable"[0].revision') - telegraf_rev=$(juju info telegraf --arch="${MODEL_ARCH}" --format json | jq -r '."channels"."latest"."stable"[0].revision') - juju_qa_test_rev=$(juju info juju-qa-test --arch="${MODEL_ARCH}" --format json | jq -r '."channels"."latest"."candidate"[0].revision') + influxdb_rev=$(juju info influxdb --arch="${MODEL_ARCH}" --format json | yq -r '."channels"."latest"."stable"[0].revision') + telegraf_rev=$(juju info telegraf --arch="${MODEL_ARCH}" --format json | yq -r '."channels"."latest"."stable"[0].revision') + juju_qa_test_rev=$(juju info juju-qa-test --arch="${MODEL_ARCH}" --format json | yq -r '."channels"."latest"."candidate"[0].revision') else - influxdb_rev=$(juju info influxdb --format json | jq -r '."channels"."latest"."stable"[0].revision') - telegraf_rev=$(juju info telegraf --format json | jq -r '."channels"."latest"."stable"[0].revision') - juju_qa_test_rev=$(juju info juju-qa-test --format json | jq -r '."channels"."latest"."candidate"[0].revision') + influxdb_rev=$(juju info influxdb --format json | yq -r '."channels"."latest"."stable"[0].revision') + telegraf_rev=$(juju info telegraf --format json | yq -r '."channels"."latest"."stable"[0].revision') + juju_qa_test_rev=$(juju info juju-qa-test --format json | yq -r '."channels"."latest"."candidate"[0].revision') fi echo "Make a copy of reference yaml and insert revisions in it" @@ -295,15 +295,16 @@ run_deploy_lxd_profile_bundle() { wait_for "ubuntu" "$(idle_condition "ubuntu" 1 "${i}")" done - short_uuid=$(juju models --format json | - jq -r --arg name "${model_name}" '.models[] | select(.["short-name"]==$name) | ."model-uuid"[0:6]') + full_uuid=$(juju models --format json | + name="${model_name}" yq -r '.models[] | select(.["short-name"]==env(name)) | ."model-uuid"') + short_uuid="${full_uuid:0:6}" lxd_profile_name="juju-${model_name}-${short_uuid}-lxd-profile" for i in 0 1 2 3; do machine_n_lxd0="$(machine_container_path "${i}" "${i}"/lxd/0)" - juju status --format=json | jq "${machine_n_lxd0}" | check "${lxd_profile_name}" + juju status --format=json | yq "${machine_n_lxd0}" | check "${lxd_profile_name}" machine_n_lxd1="$(machine_container_path "${i}" "${i}"/lxd/1)" - juju status --format=json | jq "${machine_n_lxd1}" | check "${lxd_profile_name}" + juju status --format=json | yq "${machine_n_lxd1}" | check "${lxd_profile_name}" done destroy_model "${model_name}"
tests/suites/deploy/deploy_charms.sh+33 −28 modified@@ -41,8 +41,8 @@ run_deploy_charm_placement_directive() { # Verify based used to create the machines was used during # deploy. - base_name=$(juju status --format=json | jq -r '.applications."ubuntu-lite".base.name') - base_chan=$(juju status --format=json | jq -r '.applications."ubuntu-lite".base.channel') + base_name=$(juju status --format=json | yq -r '.applications."ubuntu-lite".base.name') + base_chan=$(juju status --format=json | yq -r '.applications."ubuntu-lite".base.channel') echo "$base_name@$base_chan" | check "$expected_base" destroy_model "test-deploy-charm-placement-directive" @@ -80,10 +80,10 @@ run_deploy_specific_series() { juju deploy "$charm_name" app1 juju deploy "$charm_name" app2 --base "$expected_base" - base_name1=$(juju status --format=json | jq -r ".applications.app1.base.name") - base_chan1=$(juju status --format=json | jq -r ".applications.app1.base.channel") - base_name2=$(juju status --format=json | jq -r ".applications.app2.base.name") - base_chan2=$(juju status --format=json | jq -r ".applications.app2.base.channel") + base_name1=$(juju status --format=json | yq -r ".applications.app1.base.name") + base_chan1=$(juju status --format=json | yq -r ".applications.app1.base.channel") + base_name2=$(juju status --format=json | yq -r ".applications.app2.base.name") + base_chan2=$(juju status --format=json | yq -r ".applications.app2.base.channel") destroy_model "test-deploy-specific-series" @@ -104,11 +104,12 @@ run_deploy_lxd_profile_charm() { juju deploy juju-qa-lxd-profile-without-devices --series jammy wait_for "lxd-profile-without-devices" "$(idle_condition "lxd-profile-without-devices")" - short_uuid=$(juju models --format json | - jq -r --arg name "${model_name}" '.models[] | select(.["short-name"]==$name) | ."model-uuid"[0:6]') + full_uuid=$(juju models --format json | + name="${model_name}" yq -r '.models[] | select(.["short-name"]==env(name)) | ."model-uuid"') + short_uuid="${full_uuid:0:6}" lxd_profile="juju-test-deploy-lxd-profile-${short_uuid}-lxd-profile" - juju status --format=json | jq '.machines | .["0"] | .["lxd-profiles"] | keys[0]' | check "${lxd_profile}" + juju status --format=json | yq '.machines | .["0"] | select(.["lxd-profiles"]) | .["lxd-profiles"] | keys[0]' | check "${lxd_profile}" destroy_model "test-deploy-lxd-profile" } @@ -127,11 +128,12 @@ run_deploy_lxd_profile_charm_container() { juju deploy juju-qa-lxd-profile-without-devices --to lxd --series jammy wait_for "lxd-profile-without-devices" "$(idle_condition "lxd-profile-without-devices")" - short_uuid=$(juju models --format json | - jq -r --arg name "${model_name}" '.models[] | select(.["short-name"]==$name) | ."model-uuid"[0:6]') + full_uuid=$(juju models --format json | + name="${model_name}" yq -r '.models[] | select(.["short-name"]==env(name)) | ."model-uuid"') + short_uuid="${full_uuid:0:6}" lxd_profile="juju-test-deploy-lxd-profile-container-${short_uuid}-lxd-profile" - juju status --format=json | jq '.machines | .["0"] | .containers | .["0/lxd/0"] | .["lxd-profiles"] | keys[0]' | + juju status --format=json | yq '.machines | .["0"] | .containers | .["0/lxd/0"] | select(.["lxd-profiles"]) | .["lxd-profiles"] | keys[0]' | check "${lxd_profile}" destroy_model "test-deploy-lxd-profile-container" @@ -168,10 +170,11 @@ run_deploy_local_lxd_profile_charm() { juju integrate lxd-profile-subordinate lxd-profile wait_for "lxd-profile" "$(idle_condition "lxd-profile")" - wait_for "lxd-profile-subordinate" ".applications | keys[1]" + wait_for "lxd-profile-subordinate" "select(.applications) | .applications | keys[1]" - short_uuid=$(juju models --format json | - jq -r --arg name "${model_name}" '.models[] | select(.["short-name"]==$name) | ."model-uuid"[0:6]') + full_uuid=$(juju models --format json | + name="${model_name}" yq -r '.models[] | select(.["short-name"]==env(name)) | ."model-uuid"') + short_uuid="${full_uuid:0:6}" lxd_profile_name="juju-test-deploy-local-lxd-profile-${short_uuid}-lxd-profile" lxd_profile_sub_name="juju-test-deploy-local-lxd-profile-${short_uuid}-lxd-profile-subordinate" @@ -180,16 +183,16 @@ run_deploy_local_lxd_profile_charm() { machine_0="$(machine_path 0)" wait_for "${lxd_profile_sub_name}" "${machine_0}" - juju status --format=json | jq "${machine_0}" | check "${lxd_profile_name}" - juju status --format=json | jq "${machine_0}" | check "${lxd_profile_sub_name}" + juju status --format=json | yq "${machine_0}" | check "${lxd_profile_name}" + juju status --format=json | yq "${machine_0}" | check "${lxd_profile_sub_name}" juju add-unit "lxd-profile" machine_1="$(machine_path 1)" wait_for "${lxd_profile_sub_name}" "${machine_1}" - juju status --format=json | jq "${machine_1}" | check "${lxd_profile_name}" - juju status --format=json | jq "${machine_1}" | check "${lxd_profile_sub_name}" + juju status --format=json | yq "${machine_1}" | check "${lxd_profile_name}" + juju status --format=json | yq "${machine_1}" | check "${lxd_profile_sub_name}" destroy_model "test-deploy-local-lxd-profile" } @@ -214,8 +217,9 @@ run_deploy_lxd_to_machine() { wait_for "lxd-profile-alt" "$(idle_condition "lxd-profile-alt")" - short_uuid=$(juju models --format json | - jq -r --arg name "${model_name}" '.models[] | select(.["short-name"]==$name) | ."model-uuid"[0:6]') + full_uuid=$(juju models --format json | + name="${model_name}" yq -r '.models[] | select(.["short-name"]==env(name)) | ."model-uuid"') + short_uuid="${full_uuid:0:6}" lxd_profile_0="juju-test-deploy-lxd-machine-${short_uuid}-lxd-profile-alt-0" lxd_profile_1="juju-test-deploy-lxd-machine-${short_uuid}-lxd-profile-alt-1" @@ -284,19 +288,20 @@ run_deploy_lxd_to_container() { juju integrate lxd-profile-subordinate lxd-profile-alt wait_for "lxd-profile-alt" "$(idle_condition "lxd-profile-alt")" - wait_for "lxd-profile-subordinate" ".applications | keys[1]" + wait_for "lxd-profile-subordinate" "select(.applications) | .applications | keys[1]" machine_0="$(machine_container_path 0 0/lxd/0)" wait_for "lxd-profile-subordinate" "${machine_0}" - short_uuid=$(juju models --format json | - jq -r --arg name "${model_name}" '.models[] | select(.["short-name"]==$name) | ."model-uuid"[0:6]') + full_uuid=$(juju models --format json | + name="${model_name}" yq -r '.models[] | select(.["short-name"]==env(name)) | ."model-uuid"') + short_uuid="${full_uuid:0:6}" lxd_profile_name="juju-test-deploy-lxd-container-${short_uuid}-lxd-profile-alt" lxd_profile_sub_name="juju-test-deploy-lxd-container-${short_uuid}-lxd-profile-subordinate" - juju status --format=json | jq "${machine_0}" | check "${lxd_profile_name}" - juju status --format=json | jq "${machine_0}" | check "${lxd_profile_sub_name}" + juju status --format=json | yq "${machine_0}" | check "${lxd_profile_name}" + juju status --format=json | yq "${machine_0}" | check "${lxd_profile_sub_name}" lxd_profile_0="juju-test-deploy-lxd-container-${short_uuid}-lxd-profile-alt-0" lxd_profile_1="juju-test-deploy-lxd-container-${short_uuid}-lxd-profile-alt-1" @@ -414,7 +419,7 @@ machine_path() { machine=${1} - echo ".machines | .[\"${machine}\"] | .[\"lxd-profiles\"] | keys" + echo ".machines | .[\"${machine}\"] | select(.[\"lxd-profiles\"]) | .[\"lxd-profiles\"] | keys" } machine_container_path() { @@ -423,5 +428,5 @@ machine_container_path() { machine=${1} container=${2} - echo ".machines | .[\"${machine}\"] | .containers | .[\"${container}\"] | .[\"lxd-profiles\"] | keys" + echo ".machines | .[\"${machine}\"] | .containers | .[\"${container}\"] | select(.[\"lxd-profiles\"]) | .[\"lxd-profiles\"] | keys" }
tests/suites/deploy/deploy_default_series.sh+4 −4 modified@@ -10,8 +10,8 @@ run_deploy_default_series() { juju deploy ubuntu --storage "files=tmpfs" wait_for "ubuntu" "$(idle_condition "ubuntu")" - ubuntu_base_name=$(juju status --format=json | jq ".applications.ubuntu.base.name") - ubuntu_base_ch=$(juju status --format=json | jq ".applications.ubuntu.base.channel") + ubuntu_base_name=$(juju status --format=json | yq ".applications.ubuntu.base.name") + ubuntu_base_ch=$(juju status --format=json | yq ".applications.ubuntu.base.channel") echo "$ubuntu_base_name" | check "ubuntu" echo "$ubuntu_base_ch" | check "22.04" @@ -30,8 +30,8 @@ run_deploy_not_default_series() { juju deploy ubuntu --storage "files=tmpfs" --base ubuntu@24.04 wait_for "ubuntu" "$(idle_condition "ubuntu")" - ubuntu_base_name=$(juju status --format=json | jq ".applications.ubuntu.base.name") - ubuntu_base_ch=$(juju status --format=json | jq ".applications.ubuntu.base.channel") + ubuntu_base_name=$(juju status --format=json | yq ".applications.ubuntu.base.name") + ubuntu_base_ch=$(juju status --format=json | yq ".applications.ubuntu.base.channel") echo "$ubuntu_base_name" | check "ubuntu" echo "$ubuntu_base_ch" | check "24.04"
tests/suites/deploy/deploy_revision.sh+4 −4 modified@@ -11,7 +11,7 @@ run_deploy_revision() { wait_for "juju-qa-test" "$(charm_rev "juju-qa-test" 23)" # check resource revision per channel specified. - got=$(juju resources juju-qa-test --format json | jq -S '.resources[0] | .["revision"] == "1"') + got=$(juju resources juju-qa-test --format json | yq '.resources[0] | .["revision"] == "1"') check_contains "${got}" "true" wait_for "juju-qa-test" "$(idle_condition "juju-qa-test")" @@ -20,7 +20,7 @@ run_deploy_revision() { wait_for "resource line one: testing one." "$(workload_status juju-qa-test 0).message" # check resource revision again per channel specified. - juju resources juju-qa-test --format json | jq -S '.resources[0] | .[ "revision"] == "1"' + juju resources juju-qa-test --format json | yq '.resources[0] | .[ "revision"] == "1"' destroy_model "${model_name}" } @@ -38,7 +38,7 @@ run_deploy_revision_resource() { wait_for "juju-qa-test" "$(charm_rev "juju-qa-test" 23)" # check resource revision as specified in command. - got=$(juju resources juju-qa-test --format json | jq -S '.resources[0] | .["revision"] == "4"') + got=$(juju resources juju-qa-test --format json | yq '.resources[0] | .["revision"] == "4"') check_contains "${got}" "true" wait_for "juju-qa-test" "$(idle_condition "juju-qa-test")" @@ -47,7 +47,7 @@ run_deploy_revision_resource() { wait_for "resource line one: testing four." "$(workload_status juju-qa-test 0).message" # check resource revision again per channel specified. - juju resources juju-qa-test --format json | jq -S '.resources[0] | .[ "revision"] == "4"' + juju resources juju-qa-test --format json | yq '.resources[0] | .[ "revision"] == "4"' destroy_model "${model_name}" }
tests/suites/deploy/os.sh+4 −4 modified@@ -51,8 +51,8 @@ run_deploy_centos7() { # juju deploy ./tests/suites/deploy/charms/centos-dummy-sink --base centos@7 --constraints instance-type=t3.medium - juju status --format=json | jq '.applications."dummy-sink".base.name' | check "centos" - juju status --format=json | jq '.applications."dummy-sink".base.channel' | check "7" + juju status --format=json | yq '.applications."dummy-sink".base.name' | check "centos" + juju status --format=json | yq '.applications."dummy-sink".base.channel' | check "7" wait_for "dummy-sink" "$(idle_condition "dummy-sink")" @@ -83,8 +83,8 @@ run_deploy_centos9() { # juju deploy ./tests/suites/deploy/charms/centos-dummy-sink --base centos@9 --constraints root-disk=10G - juju status --format=json | jq '.applications."dummy-sink".base.name' | check "centos" - juju status --format=json | jq '.applications."dummy-sink".base.channel' | check "9" + juju status --format=json | yq '.applications."dummy-sink".base.name' | check "centos" + juju status --format=json | yq '.applications."dummy-sink".base.channel' | check "9" wait_for "dummy-sink" "$(idle_condition "dummy-sink")"
tests/suites/firewall/ssh_allow.sh+12 −12 modified@@ -10,18 +10,18 @@ run_firewall_ssh_ec2() { echo "==> Verifying default setting" juju model-config ssh-allow | check "0.0.0.0/0,::/0" - model_uuid=$(juju show-model --format json | jq -r '.["firewall-ssh"]["model-uuid"]') - secgroup=$(aws ec2 describe-security-groups | jq -r ".SecurityGroups[] | select(.GroupName == \"juju-${model_uuid}\")") - echo $secgroup | jq -r ".IpPermissions[] | select(.FromPort == 22) | .IpRanges[0].CidrIp" | check "0.0.0.0/0" - echo $secgroup | jq -r ".IpPermissions[] | select(.FromPort == 22) | .Ipv6Ranges[0].CidrIpv6" | check "::/0" + model_uuid=$(juju show-model --format json | yq -r '.["firewall-ssh"]["model-uuid"]') + secgroup=$(aws ec2 describe-security-groups | yq -r ".SecurityGroups[] | select(.GroupName == \"juju-${model_uuid}\")") + echo $secgroup | yq -r ".IpPermissions[] | select(.FromPort == 22) | .IpRanges[0].CidrIp" | check "0.0.0.0/0" + echo $secgroup | yq -r ".IpPermissions[] | select(.FromPort == 22) | .Ipv6Ranges[0].CidrIpv6" | check "::/0" echo "==> Verifying changed setting" juju model-config ssh-allow="192.168.0.0/24" attempt=0 while true; do - secgroup=$(aws ec2 describe-security-groups | jq -r ".SecurityGroups[] | select(.GroupName == \"juju-${model_uuid}\")") - ingress=$(echo $secgroup | jq -r ".IpPermissions[] | select(.FromPort == 22) | .IpRanges[0].CidrIp") - ingressv6=$(echo $secgroup | jq -r ".IpPermissions[] | select(.FromPort == 22) | .IpRanges[0].CidrIpv6") + secgroup=$(aws ec2 describe-security-groups | yq -r ".SecurityGroups[] | select(.GroupName == \"juju-${model_uuid}\")") + ingress=$(echo $secgroup | yq -r ".IpPermissions[] | select(.FromPort == 22) | .IpRanges[0].CidrIp") + ingressv6=$(echo $secgroup | yq -r ".IpPermissions[] | select(.FromPort == 22) | .IpRanges[0].CidrIpv6") if [ "${ingress}" == "192.168.0.0/24" ] && [ "${ingressv6}" == "null" ]; then break fi @@ -42,15 +42,15 @@ run_firewall_ssh_gce() { juju add-machine wait_for_machine_agent_status "0" "started" - model_uuid=$(juju show-model --format json | jq -r '.["firewall-ssh"]["model-uuid"]') + model_uuid=$(juju show-model --format json | yq -r '.["firewall-ssh"]["model-uuid"]') network_tag="juju-${model_uuid}" echo "==> Verifying default setting" default_rule=$(gcloud compute firewall-rules list \ --filter="targetTags.list():${network_tag}" \ --format=json) - echo "$default_rule" | jq -r '.[0].sourceRanges[0]' | check "0.0.0.0/0" - echo "$default_rule" | jq -r '.[0].allowed[0].ports[0]' | check "22" + echo "$default_rule" | yq -r '.[0].sourceRanges[0]' | check "0.0.0.0/0" + echo "$default_rule" | yq -r '.[0].allowed[0].ports[0]' | check "22" echo "==> Verifying changed setting" juju model-config ssh-allow="192.168.0.0/24" @@ -60,8 +60,8 @@ run_firewall_ssh_gce() { updated_rule=$(gcloud compute firewall-rules list \ --filter="targetTags.list():${network_tag}" \ --format=json) - echo "$updated_rule" | jq -r '.[0].allowed[0].ports[0]' | check "22" - ingress=$(echo "$updated_rule" | jq -r '.[0].sourceRanges[0]') + echo "$updated_rule" | yq -r '.[0].allowed[0].ports[0]' | check "22" + ingress=$(echo "$updated_rule" | yq -r '.[0].sourceRanges[0]') if [ "${ingress}" == "192.168.0.0/24" ]; then break fi
tests/suites/kubeflow/deploy_kubeflow.sh+4 −4 modified@@ -11,18 +11,18 @@ run_deploy_kubeflow() { juju deploy kubeflow --trust --channel 1.9 echo "==> Checking kubeflow deployment" - num_apps=$(juju status --format json | jq '.applications | length') + num_apps=$(juju status --format json | yq '.applications | length') wait_for "training-operator" "$(active_idle_condition "training-operator" $((num_apps - 1)))" 1800 - jupyter_ip=$(microk8s kubectl -n kubeflow get svc istio-ingressgateway-workload -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + jupyter_ip=$(kubectl -n kubeflow get svc istio-ingressgateway-workload -o jsonpath='{.status.loadBalancer.ingress[0].ip}') attempt=0 # shellcheck disable=SC2046,SC2143,SC2091,SC2086 - until $(check_contains "$(curl ${jupyter_ip})" "Found" >/dev/null 2>&1); do + until check_contains "$(curl ${jupyter_ip})" "Found" >/dev/null 2>&1; do echo "[+] (attempt ${attempt}) jupyter ui" sleep "${SHORT_TIMEOUT}" attempt=$((attempt + 1)) - if [[ ${attempt} -gt 10 ]]; then + if [[ ${attempt} -gt 30 ]]; then echo "failed waiting for jupyter ui" exit 1 fi
tests/suites/manual/deploy_manual_aws.sh+8 −8 modified@@ -75,39 +75,39 @@ run_deploy_manual_aws() { local vpc_id igw_id sg_id subnet_id - OUT=$(aws ec2 describe-vpcs | jq '.Vpcs[] | select(.Tags[]? | select((.Key=="Name") and (.Value=="manual-deploy")))' || true) + OUT=$(aws ec2 describe-vpcs | yq '.Vpcs[] | select(.Tags[]? | select((.Key=="Name") and (.Value=="manual-deploy")))' || true) if [[ -z ${OUT} ]]; then vpc_id=$(create_vpc) echo "===> Created vpc $vpc_id" else - vpc_id=$(echo "${OUT}" | jq -r '.VpcId' || true) + vpc_id=$(echo "${OUT}" | yq -r '.VpcId' || true) echo "===> Re-using vpc $vpc_id" fi - OUT=$(aws ec2 describe-internet-gateways | jq ".InternetGateways[] | select(.Attachments[0].VpcId == \"${vpc_id}\")") + OUT=$(aws ec2 describe-internet-gateways | vpc_id=$vpc_id yq '.InternetGateways[] | select(.Attachments[0].VpcId == env(vpc_id))') if [[ -z ${OUT} ]]; then igw_id=$(create_igw) echo "===> Created igw $igw_id" else - igw_id=$(echo "${OUT}" | jq -r '.InternetGatewayId') + igw_id=$(echo "${OUT}" | yq -r '.InternetGatewayId') echo "===> Re-using igw $igw_id" fi - OUT=$(aws ec2 describe-subnets | jq ".Subnets[] | select(.VpcId == \"${vpc_id}\")" || true) + OUT=$(aws ec2 describe-subnets | vpc_id=$vpc_id yq '.Subnets[] | select(.VpcId == env(vpc_id))' || true) if [[ -z ${OUT} ]]; then subnet_id=$(create_subnet) echo "===> Created subnet $subnet_id" else - subnet_id=$(echo "${OUT}" | jq -r '.SubnetId') + subnet_id=$(echo "${OUT}" | yq -r '.SubnetId') echo "===> Re-using subnet $subnet_id" fi - OUT=$(aws ec2 describe-security-groups | jq ".SecurityGroups[] | select(.VpcId==\"${vpc_id}\" and .GroupName==\"ci-manual-deploy\")" || true) + OUT=$(aws ec2 describe-security-groups | vpc_id=$vpc_id yq '.SecurityGroups[] | select(.VpcId==env(vpc_id) and .GroupName=="ci-manual-deploy")' || true) if [[ -z ${OUT} ]]; then sg_id=$(create_secgroup) echo "===> Created secgroup $sg_id" else - sg_id=$(echo "${OUT}" | jq -r '.GroupId') + sg_id=$(echo "${OUT}" | yq -r '.GroupId') echo "===> Re-using secgroup $sg_id" fi
tests/suites/manual/deploy_manual_lxd.sh+1 −1 modified@@ -99,7 +99,7 @@ run_deploy_manual_lxd() { attempt=0 while [[ ${attempt} -lt 30 ]]; do address=$(lxc list --format json | - jq --raw-output ".[] | select(.name == \"${container_name}\") | .state.network.eth0.addresses | map(select( .family == \"inet\")) | .[0].address") + container_name=$container_name yq -r '.[] | select(.name == env(container_name)) | .state.network.eth0.addresses | map(select( .family == "inet")) | .[0].address') if echo "${address}" | grep -q '^[0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+$'; then echo "Using container address ${address}"
tests/suites/manual/deploy_manual.sh+1 −1 modified@@ -46,7 +46,7 @@ manual_deploy() { juju enable-ha --to "1,2" 2>&1 | tee "${TEST_DIR}/enable-ha.log" wait_for "controller" "$(active_condition "controller" 0)" - machine_base=$(juju machines --format=json | jq -r '.machines | .["0"] | (.base.name+"@"+.base.channel)') + machine_base=$(juju machines --format=json | yq -r '.machines | .["0"] | .base | "\(.name)@\(.channel)"') if [[ -z ${machine_base} ]]; then echo "machine 0 has invalid base"
tests/suites/manual/spaces.sh+6 −6 modified@@ -51,17 +51,17 @@ run_spaces_manual_aws() { image_id="${OUT}" echo "===> Ensure at least 3 default subnets exists" - check_ge "$(aws ec2 describe-availability-zones | jq -r ".AvailabilityZones | length")" 3 + check_ge "$(aws ec2 describe-availability-zones | yq -r ".AvailabilityZones | length")" 3 aws ec2 create-default-subnet --availability-zone "${BOOTSTRAP_REGION}a" 2>/dev/null || true aws ec2 create-default-subnet --availability-zone "${BOOTSTRAP_REGION}b" 2>/dev/null || true aws ec2 create-default-subnet --availability-zone "${BOOTSTRAP_REGION}c" 2>/dev/null || true - sub1=$(aws ec2 describe-subnets | jq -r '.Subnets[] | select(.DefaultForAz==true and .CidrBlock=="172.31.0.0/20") | .SubnetId') - sub2=$(aws ec2 describe-subnets | jq -r '.Subnets[] | select(.DefaultForAz==true and .CidrBlock=="172.31.16.0/20") | .SubnetId') - sub3=$(aws ec2 describe-subnets | jq -r '.Subnets[] | select(.DefaultForAz==true and .CidrBlock=="172.31.32.0/20") | .SubnetId') + sub1=$(aws ec2 describe-subnets | yq -r '.Subnets[] | select(.DefaultForAz==true and .CidrBlock=="172.31.0.0/20") | .SubnetId') + sub2=$(aws ec2 describe-subnets | yq -r '.Subnets[] | select(.DefaultForAz==true and .CidrBlock=="172.31.16.0/20") | .SubnetId') + sub3=$(aws ec2 describe-subnets | yq -r '.Subnets[] | select(.DefaultForAz==true and .CidrBlock=="172.31.32.0/20") | .SubnetId') # Ensure we have a security group allowing SSH and controller access. - OUT=$(aws ec2 describe-security-groups | jq '.SecurityGroups[] | select(.GroupName=="ci-spaces-manual-ssh")' || true) + OUT=$(aws ec2 describe-security-groups | yq '.SecurityGroups[] | select(.GroupName=="ci-spaces-manual-ssh")' || true) if [[ -z ${OUT} ]]; then sg_id=$(aws ec2 create-security-group --group-name "ci-spaces-manual-ssh" --description "SSH access for manual spaces test" --query 'GroupId' --output text) aws ec2 authorize-security-group-ingress --group-id "${sg_id}" --protocol tcp --port 22 --cidr 0.0.0.0/0 @@ -76,7 +76,7 @@ run_spaces_manual_aws() { aws ec2 authorize-security-group-ingress --group-id "${sg_id}" --protocol tcp --port 0-65535 --source-group "${sg_id}" aws ec2 authorize-security-group-ingress --group-id "${sg_id}" --protocol udp --port 0-65535 --source-group "${sg_id}" else - sg_id=$(echo "${OUT}" | jq -r '.GroupId') + sg_id=$(echo "${OUT}" | yq -r '.GroupId') fi # Create a key-pair so that we can provision machines via SSH.
tests/suites/model/destroy.sh+4 −4 modified@@ -12,23 +12,23 @@ run_model_destroy() { ensure "model-destroy" "${file}" echo "Ensure current model is 'model-destroy'" - juju models --format json | jq -r '."current-model"' | check model-destroy + juju models --format json | yq -r '."current-model"' | check model-destroy echo "Add new model 'model-new'" juju add-model model-new echo "Ensure current model is 'model-new'" - juju models --format json | jq -r '."current-model"' | check model-new + juju models --format json | yq -r '."current-model"' | check model-new destroy_model "model-new" - is_destroyed=$(juju models --format json | jq -r '.models[] | select(."short-name" == "model-new")') + is_destroyed=$(juju models --format json | yq -r '.models[] | select(."short-name" == "model-new")') if [[ -z ${is_destroyed} ]]; then is_destroyed=true; fi check_contains "${is_destroyed}" true juju switch model-destroy echo "Ensure current model is 'model-destroy'" - juju models --format json | jq -r '."current-model"' | check model-destroy + juju models --format json | yq -r '."current-model"' | check model-destroy destroy_model "model-destroy" }
tests/suites/model/multi.sh+1 −1 modified@@ -48,7 +48,7 @@ check_services() { juju config dummy-source token="${token}" attempt=0 - until [[ $(juju status --format json | jq -er ".applications | .[\"dummy-source\"] | .units | .[\"dummy-source/0\"] | .[\"workload-status\"] | select(.[\"message\"] == \"Token is ${token}\") | .message") ]]; do + until [[ $(juju status --format json | yq -er ".applications | .[\"dummy-source\"] | .units | .[\"dummy-source/0\"] | .[\"workload-status\"] | select(.[\"message\"] == \"Token is ${token}\") | .message") ]]; do echo "[+] (attempt ${attempt}) polling status" sleep "${SHORT_TIMEOUT}" attempt=$((attempt + 1))
tests/suites/network/network_health.sh+4 −4 modified@@ -35,7 +35,7 @@ run_network_health() { check_default_routes() { echo "[+] checking default routes" - for machine in $(juju machines --format=json | jq -r ".machines | keys | .[]"); do + for machine in $(juju machines --format=json | yq -r "select(.machines) | .machines | keys | .[]"); do default=$(juju exec --machine "$machine" -- ip route show | grep default) if [ -z "$default" ]; then echo "No default route detected for machine ${machine}" @@ -49,7 +49,7 @@ check_accessibility() { for net_health_unit in "network-health-focal/0" "network-health-jammy/0"; do - ip="$(juju show-unit $net_health_unit --format json | jq -r ".[\"$net_health_unit\"] | .[\"public-address\"]")" + ip="$(juju show-unit $net_health_unit --format json | yq -r ".[\"$net_health_unit\"] | .[\"public-address\"]")" curl_cmd="curl -s http://${ip}:8039" @@ -76,8 +76,8 @@ run_ip_address_change() { wait_for "juju-qa-test" "$(active_condition "juju-qa-test" 0)" - instance_0="$(juju show-machine 0 --format json | jq '.machines["0"] | .["instance-id"]' -r)" - instance_1="$(juju show-machine 1 --format json | jq '.machines["1"] | .["instance-id"]' -r)" + instance_0="$(juju show-machine 0 --format json | yq '.machines["0"] | .["instance-id"]' -r)" + instance_1="$(juju show-machine 1 --format json | yq '.machines["1"] | .["instance-id"]' -r)" old_ip_instance_0="$(lxc exec "${instance_0}" -- hostname -i)" old_ip_instance_1="$(lxc exec "${instance_1}" -- hostname -i)"
tests/suites/refresh/refresh.sh+1 −1 modified@@ -100,7 +100,7 @@ run_refresh_channel_no_new_revision() { juju deploy juju-qa-fixed-rev wait_for "juju-qa-fixed-rev" "$(idle_condition "juju-qa-fixed-rev")" # get revision to ensure it doesn't change - cs_revision=$(juju status --format json | jq -S '.applications | .["juju-qa-fixed-rev"] | .["charm-rev"]') + cs_revision=$(juju status --format json | yq '.applications | .["juju-qa-fixed-rev"] | .["charm-rev"]') juju refresh juju-qa-fixed-rev --channel edge
tests/suites/relations/relation_data_exchange.sh+1 −1 modified@@ -20,7 +20,7 @@ run_relation_data_exchange() { wait_for "dummy-source" "$(idle_condition "dummy-source" 1 0)" echo "Get the leader unit name" - non_leader_dummy_sink_unit=$(juju status dummy-sink --format json | jq -r '.applications."dummy-sink".units | to_entries[] | select(.value.leader!=true) | .key') + non_leader_dummy_sink_unit=$(juju status dummy-sink --format json | yq -r '.applications."dummy-sink".units | to_entries[] | select(.value.leader!=true) | .key') dummy_sink_relation_id=$(juju exec --unit "dummy-sink/leader" 'relation-ids source') dummy_source_relation_id=$(juju exec --unit "dummy-source/leader" 'relation-ids sink') # stop there
tests/suites/relations/relation_model_get.sh+1 −1 modified@@ -23,7 +23,7 @@ run_relation_model_get() { sink_rel_id=$(juju exec --unit dummy-source/0 "relation-ids sink" | cut -d':' -f2) echo "Check relation-model-get hook" - model_uuid=$(juju show-model --format json | jq -r '.["test-relation-model-get"]["model-uuid"]') + model_uuid=$(juju show-model --format json | yq -r '.["test-relation-model-get"]["model-uuid"]') juju exec --unit dummy-source/0 "relation-model-get -r ${sink_rel_id}" | check "${model_uuid}" echo "Setting up cross model relation"
tests/suites/resources/refresh.sh+6 −6 modified@@ -37,14 +37,14 @@ run_resource_refresh_no_new_charm_rev() { # wait for update-status wait_for "resource line one: testing four." "$(workload_status juju-qa-test 0).message" - juju resources juju-qa-test --format json | jq -S '.resources[0] | .[ "revision"] == "4"' + juju resources juju-qa-test --format json | yq '.resources[0] | .[ "revision"] == "4"' juju config juju-qa-test foo-file=false juju refresh juju-qa-test juju config juju-qa-test foo-file=true wait_for "resource line one: testing two." "$(workload_status juju-qa-test 0).message" - juju resources juju-qa-test --format json | jq -S '.resources[0] | .[ "revision"] == "2"' + juju resources juju-qa-test --format json | yq '.resources[0] | .[ "revision"] == "2"' destroy_model "test-${name}" } @@ -65,14 +65,14 @@ run_resource_refresh_no_new_charm_rev_supply_res_rev() { # wait for update-status wait_for "resource line one: testing two." "$(workload_status juju-qa-test 0).message" - juju resources juju-qa-test --format json | jq -S '.resources[0] | .[ "revision"] == "2"' + juju resources juju-qa-test --format json | yq '.resources[0] | .[ "revision"] == "2"' juju config juju-qa-test foo-file=false juju refresh juju-qa-test --resource foo-file=3 juju config juju-qa-test foo-file=true wait_for "resource line one: testing one plus one." "$(workload_status juju-qa-test 0).message" - juju resources juju-qa-test --format json | jq -S '.resources[0] | .[ "revision"] == "3"' + juju resources juju-qa-test --format json | yq '.resources[0] | .[ "revision"] == "3"' destroy_model "test-${name}" } @@ -99,8 +99,8 @@ run_resource_no_upgrade_after_upload() { wait_for "juju-qa-test" "$(idle_condition "juju-qa-test")" # check resource revision hasn't changed. - juju resources juju-qa-test --format json | jq -S '.resources[0] | .[ "revision"] == "0"' - juju resources juju-qa-test --format json | jq -S '.resources[0] | .[ "origin"] == "upload"' + juju resources juju-qa-test --format json | yq '.resources[0] | .[ "revision"] == "0"' + juju resources juju-qa-test --format json | yq '.resources[0] | .[ "origin"] == "upload"' destroy_model "test-${name}" }
tests/suites/secrets_iaas/cmr.sh+1 −1 modified@@ -24,7 +24,7 @@ run_secrets_cmr() { echo "Create and share a secret on the offer side" secret_uri=$(juju exec --unit dummy-source/0 -- secret-add foo=bar) - relation_id=$(juju --show-log show-unit -m model-secrets-offer dummy-source/0 --format json | jq '."dummy-source/0"."relation-info"[0]."relation-id"') + relation_id=$(juju --show-log show-unit -m model-secrets-offer dummy-source/0 --format json | yq '."dummy-source/0"."relation-info"[0]."relation-id"') juju exec --unit dummy-source/0 -- secret-grant "$secret_uri" -r "$relation_id" echo "Checking: the secret can be read by the consumer"
tests/suites/secrets_iaas/juju.sh+13 −13 modified@@ -32,18 +32,18 @@ check_secrets() { check_contains "$(juju exec --unit dummy-source/0 -- secret-get "$secret_owned_by_dummy_source")" 'owned-by: dummy-source-app' echo "Checking: secret-get by URI - metadata" - check_contains "$(juju exec --unit dummy-source/0 -- secret-info-get "$secret_owned_by_dummy_source_0" --format json | jq ".${secret_owned_by_dummy_source_0_id}.owner")" 'unit' - check_contains "$(juju exec --unit dummy-source/0 -- secret-info-get "$secret_owned_by_dummy_source" --format json | jq ".${secret_owned_by_dummy_source_id}.owner")" 'application' - check_contains "$(juju exec --unit dummy-source/0 -- secret-info-get "$secret_owned_by_dummy_source" --format json | jq ".${secret_owned_by_dummy_source_id}.revision")" '1' + check_contains "$(juju exec --unit dummy-source/0 -- secret-info-get "$secret_owned_by_dummy_source_0" --format json | yq ".${secret_owned_by_dummy_source_0_id}.owner")" 'unit' + check_contains "$(juju exec --unit dummy-source/0 -- secret-info-get "$secret_owned_by_dummy_source" --format json | yq ".${secret_owned_by_dummy_source_id}.owner")" 'application' + check_contains "$(juju exec --unit dummy-source/0 -- secret-info-get "$secret_owned_by_dummy_source" --format json | yq ".${secret_owned_by_dummy_source_id}.revision")" '1' echo "Checking: secret-get by label or consumer label - content" check_contains "$(juju exec --unit dummy-source/0 -- secret-get --label=dummy-source_0)" 'owned-by: dummy-source/0' check_contains "$(juju exec --unit dummy-source/0 -- secret-get --label=dummy-source-app)" 'owned-by: dummy-source-app' echo "Checking: secret-get by label - metadata" - check_contains "$(juju exec --unit dummy-source/0 -- secret-info-get --label=dummy-source_0 --format json | jq ".${secret_owned_by_dummy_source_0_id}.label")" 'dummy-source_0' + check_contains "$(juju exec --unit dummy-source/0 -- secret-info-get --label=dummy-source_0 --format json | yq ".${secret_owned_by_dummy_source_0_id}.label")" 'dummy-source_0' - relation_id=$(juju --show-log show-unit dummy-source/0 --format json | jq '."dummy-source/0"."relation-info"[0]."relation-id"') + relation_id=$(juju --show-log show-unit dummy-source/0 --format json | yq '."dummy-source/0"."relation-info"[0]."relation-id"') juju exec --unit dummy-source/0 -- secret-grant "$secret_owned_by_dummy_source_0" -r "$relation_id" juju exec --unit dummy-source/0 -- secret-grant "$secret_owned_by_dummy_source" -r "$relation_id" @@ -67,7 +67,7 @@ check_secrets() { echo "Set different content for $secret_owned_by_dummy_source." juju exec --unit dummy-source/0 -- secret-set "$secret_owned_by_dummy_source_id" foo=bar - check_contains "$(juju exec --unit dummy-source/0 -- secret-info-get "$secret_owned_by_dummy_source" --format json | jq ".${secret_owned_by_dummy_source_id}.revision")" '2' + check_contains "$(juju exec --unit dummy-source/0 -- secret-info-get "$secret_owned_by_dummy_source" --format json | yq ".${secret_owned_by_dummy_source_id}.revision")" '2' check_contains "$(juju exec --unit dummy-source/0 -- secret-get --refresh "$secret_owned_by_dummy_source")" 'foo: bar' echo "Checking: secret-revoke by relation ID" @@ -80,22 +80,22 @@ check_secrets() { echo "Checking secret rotate" juju exec --unit dummy-source/0 -- secret-set "$secret_owned_by_dummy_source_0" --rotate daily - check_contains "$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | jq ".[].rotation")" "daily" - original_rotate_time="$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | jq ".[].rotates")" + check_contains "$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | yq ".[].rotation")" "daily" + original_rotate_time="$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | yq ".[].rotates")" # We set a new rotate time into the future and we need to retain # the current next rotate time. juju exec --unit dummy-source/0 -- secret-set "$secret_owned_by_dummy_source_0" --rotate monthly - check_contains "$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | jq ".[].rotation")" "monthly" - next_rotate_time="$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | jq ".[].rotates")" + check_contains "$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | yq ".[].rotation")" "monthly" + next_rotate_time="$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | yq ".[].rotates")" if [[ $original_rotate_time != "$next_rotate_time" ]]; then echo "secret next rotate time was updated in error" exit 1 fi # We set a new rotate time sooner than the current rotate time so we need to # update the next rotate time. juju exec --unit dummy-source/0 -- secret-set "$secret_owned_by_dummy_source_0" --rotate hourly - check_contains "$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | jq ".[].rotation")" "hourly" - next_rotate_time="$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | jq ".[].rotates")" + check_contains "$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | yq ".[].rotation")" "hourly" + next_rotate_time="$(juju show-secret "$secret_owned_by_dummy_source_0" --format json | yq ".[].rotates")" if [[ $original_rotate_time == "$next_rotate_time" ]]; then echo "secret next rotate time was not updated" exit 1 @@ -243,7 +243,7 @@ run_obsolete_revisions() { # Check that the secret-remove hook is run for the 10 obsolete revisions. attempt=0 while true; do - num_hooks=$(juju show-status-log juju-qa-test/0 --format json -n 100 | jq -r "[.[] | select(.message != null) | select(.message | contains(\"running secret-remove hook\") and contains(\"${secret_id}\"))] | length") + num_hooks=$(juju show-status-log juju-qa-test/0 --format json -n 100 | yq -r "[.[] | select(.message != null) | select(.message | contains(\"running secret-remove hook\") and .message | contains(\"${secret_id}\"))] | length") if [ "$num_hooks" -eq 10 ]; then break fi
tests/suites/secrets_iaas/k8s.sh+24 −18 modified@@ -34,28 +34,34 @@ run_secrets_k8s() { } prepare_k8s() { - if ! which "microk8s" >/dev/null 2>&1; then - sudo snap install microk8s --channel 1.32-strict - sudo microk8s.enable hostpath-storage - sudo microk8s.enable rbac - sudo microk8s status --wait-ready + if ! kubectl wait --for=jsonpath='{.status.phase}'=Active ns/kube-system; then + if ! which "microk8s" >/dev/null 2>&1; then + sudo snap install microk8s --channel 1.32-strict + sudo microk8s.enable hostpath-storage + sudo microk8s.enable rbac + sudo microk8s status --wait-ready + fi + fi + if ! kubectl wait --for=jsonpath='{.status.phase}'=Active ns/kube-system; then + echo "No suitable kubernetes cluster for test" + exit 1 fi - endpoint=$(microk8s.config | yq ".clusters[0] .cluster .server") - cacert=$(microk8s.config | yq ".clusters[0] .cluster .certificate-authority-data" | base64 -d | sed 's/^/ /') + endpoint=$(kubectl config view --raw --flatten | yq ".clusters[0] .cluster .server") + cacert=$(kubectl config view --raw --flatten | yq ".clusters[0] .cluster .certificate-authority-data" | base64 -d | sed 's/^/ /') namespace=juju-secrets serviceaccount=default - microk8s.kubectl create ns ${namespace} --dry-run=client -o yaml | microk8s.kubectl apply -f - - microk8s.kubectl create --save-config -n ${namespace} serviceaccount ${serviceaccount} --dry-run=client -o yaml | microk8s.kubectl apply -f - - microk8s.kubectl create --save-config clusterrole juju-secrets --verb='*' \ - --resource=namespaces,secrets,serviceaccounts,serviceaccounts/token,clusterroles,clusterrolebindings --dry-run=client -o yaml | microk8s.kubectl apply -f - - microk8s.kubectl create --save-config clusterrolebinding juju-secrets --clusterrole=juju-secrets \ - --serviceaccount=${namespace}:${serviceaccount} --dry-run=client -o yaml | microk8s.kubectl apply -f - - microk8s.kubectl create --save-config role juju-secrets --namespace=${namespace} --verb='*' \ - --resource=secrets,serviceaccounts,serviceaccounts/token,roles,rolebindings --dry-run=client -o yaml | microk8s.kubectl apply -f - - microk8s.kubectl create --save-config rolebinding juju-secrets --namespace=${namespace} --role=juju-secrets \ - --serviceaccount=${namespace}:${serviceaccount} --dry-run=client -o yaml | microk8s.kubectl apply -f - - token=$(microk8s.kubectl create token ${serviceaccount} --namespace ${namespace}) + kubectl create ns ${namespace} --dry-run=client -o yaml | kubectl apply -f - + kubectl create --save-config -n ${namespace} serviceaccount ${serviceaccount} --dry-run=client -o yaml | kubectl apply -f - + kubectl create --save-config clusterrole juju-secrets --verb='*' \ + --resource=namespaces,secrets,serviceaccounts,serviceaccounts/token,clusterroles,clusterrolebindings --dry-run=client -o yaml | kubectl apply -f - + kubectl create --save-config clusterrolebinding juju-secrets --clusterrole=juju-secrets \ + --serviceaccount=${namespace}:${serviceaccount} --dry-run=client -o yaml | kubectl apply -f - + kubectl create --save-config role juju-secrets --namespace=${namespace} --verb='*' \ + --resource=secrets,serviceaccounts,serviceaccounts/token,roles,rolebindings --dry-run=client -o yaml | kubectl apply -f - + kubectl create --save-config rolebinding juju-secrets --namespace=${namespace} --role=juju-secrets \ + --serviceaccount=${namespace}:${serviceaccount} --dry-run=client -o yaml | kubectl apply -f - + token=$(kubectl create token ${serviceaccount} --namespace ${namespace}) cat >"${TEST_DIR}/k8sconfig.yaml" <<EOF endpoint: ${endpoint}
tests/suites/secrets_iaas/vault.sh+14 −14 modified@@ -47,7 +47,7 @@ run_secret_drain() { model_name='model-secrets-drain' add_model "$model_name" - vault_backend_name='myvault' + vault_backend_name='model-secrets-drain-vault-backend' juju add-secret-backend "$vault_backend_name" vault endpoint="$VAULT_ADDR" token="$VAULT_TOKEN" ca-cert="$(cat "$VAULT_CAPATH")" juju --show-log deploy jameinel-ubuntu-lite @@ -62,10 +62,10 @@ run_secret_drain() { juju model-config secret-backend="$vault_backend_name" - model_uuid=$(juju show-model $model_name --format json | jq -r ".[\"${model_name}\"][\"model-uuid\"]") + model_uuid=$(juju show-model $model_name --format json | yq -r ".[\"${model_name}\"][\"model-uuid\"]") attempt=0 - until [[ $(vault kv list -format json "${model_name}-${model_uuid: -6}" | jq length) -eq 2 ]]; do + until [[ $(vault kv list -format json "${model_name}-${model_uuid: -6}" | yq length) -eq 2 ]]; do if [[ ${attempt} -ge 30 ]]; then echo "Failed: expected all secrets get drained to vault." exit 1 @@ -77,7 +77,7 @@ run_secret_drain() { juju model-config secret-backend=auto attempt=0 - until [[ $(vault kv list -format json "${model_name}-${model_uuid: -6}" | jq length) -eq 0 ]]; do + until [[ $(vault kv list -format json "${model_name}-${model_uuid: -6}" | yq length) -eq 0 ]]; do if [[ ${attempt} -ge 30 ]]; then echo "Failed: expected all secrets get drained back to juju controller." exit 1 @@ -98,13 +98,13 @@ run_user_secret_drain() { prepare_vault - vault_backend_name='myvault' + vault_backend_name='user-secrets-drain-vault-backend' juju add-secret-backend "$vault_backend_name" vault endpoint="$VAULT_ADDR" token="$VAULT_TOKEN" ca-cert="$(cat "$VAULT_CAPATH")" model_name='model-user-secrets-drain' add_model "$model_name" juju --show-log model-config secret-backend="$vault_backend_name" -m "$model_name" - model_uuid=$(juju show-model $model_name --format json | jq -r ".[\"${model_name}\"][\"model-uuid\"]") + model_uuid=$(juju show-model $model_name --format json | yq -r ".[\"${model_name}\"][\"model-uuid\"]") juju --show-log deploy ubuntu-lite wait_for "active" '.applications["ubuntu-lite"] | ."application-status".current' @@ -114,7 +114,7 @@ run_user_secret_drain() { secret_short_uri=${secret_uri##*:} juju show-secret --reveal "$secret_uri" - check_contains "$(vault kv list -format json "${model_name}-${model_uuid: -6}" | jq length)" 1 + check_contains "$(vault kv list -format json "${model_name}-${model_uuid: -6}" | yq length)" 1 juju --show-log grant-secret "$secret_uri" ubuntu-lite check_contains "$(juju exec --unit ubuntu-lite/0 -- secret-get $secret_short_uri)" "owned-by: $model_name-1" @@ -127,7 +127,7 @@ run_user_secret_drain() { # ensure the user secrets are all in internal backend, no secret in vault. attempt=0 - until [[ $(vault kv list -format json "${model_name}-${model_uuid: -6}" | jq length) -eq 0 ]]; do + until [[ $(vault kv list -format json "${model_name}-${model_uuid: -6}" | yq length) -eq 0 ]]; do if [[ ${attempt} -ge 30 ]]; then echo "Failed: expected all secrets get drained back to juju controller." exit 1 @@ -141,7 +141,7 @@ run_user_secret_drain() { # ensure the user secrets are in the vault backend. attempt=0 - until [[ $(vault kv list -format json "${model_name}-${model_uuid: -6}" | jq length) -eq 2 ]]; do + until [[ $(vault kv list -format json "${model_name}-${model_uuid: -6}" | yq length) -eq 2 ]]; do if [[ ${attempt} -ge 30 ]]; then echo "Failed: expected all secrets get drained to vault." exit 1 @@ -173,7 +173,7 @@ prepare_vault() { juju --show-log expose vault wait_for "blocked" "$(workload_status vault 0).current" - vault_public_addr=$(juju status --format json | jq -r '.applications.vault.units."vault/0"."public-address"') + vault_public_addr=$(juju status --format json | yq -r '.applications.vault.units."vault/0"."public-address"') export VAULT_ADDR="https://${vault_public_addr}:8200" mkdir -p ~/snap/vault/common/ TMP=$(mktemp -d ~/snap/vault/common/cacert-XXXXX) @@ -202,11 +202,11 @@ prepare_vault() { export VAULT_CAPATH="$TMP/vault.pem" vault status || true vault_init_output=$(vault operator init -key-shares=5 -key-threshold=3 -format json) - vault_token=$(echo "$vault_init_output" | jq -r .root_token) + vault_token=$(echo "$vault_init_output" | yq -r .root_token) export VAULT_TOKEN="$vault_token" - unseal_key0=$(echo "$vault_init_output" | jq -r '.unseal_keys_b64[0]') - unseal_key1=$(echo "$vault_init_output" | jq -r '.unseal_keys_b64[1]') - unseal_key2=$(echo "$vault_init_output" | jq -r '.unseal_keys_b64[2]') + unseal_key0=$(echo "$vault_init_output" | yq -r '.unseal_keys_b64[0]') + unseal_key1=$(echo "$vault_init_output" | yq -r '.unseal_keys_b64[1]') + unseal_key2=$(echo "$vault_init_output" | yq -r '.unseal_keys_b64[2]') vault operator unseal "$unseal_key0" vault operator unseal "$unseal_key1"
tests/suites/secrets_k8s/cmr.sh+71 −0 added@@ -0,0 +1,71 @@ +run_secrets_cmr() { + echo + + echo "First set up a cross model relation" + add_model "model-secrets-offer" + juju --show-log deploy prometheus-k8s source --trust + juju --show-log offer source:self-metrics-endpoint + wait_for "source" "$(idle_condition "source")" + + add_model "model-secrets-consume" + juju --show-log deploy prometheus-k8s sink --trust + juju --show-log integrate sink:metrics-endpoint model-secrets-offer.source + wait_for "sink" "$(idle_condition "sink")" + + juju switch "model-secrets-offer" + wait_for "1" '.offers["source"]["active-connected-count"]' + + echo "Create and share a secret on the offer side" + secret_uri=$(juju exec --unit source/0 -- secret-add foo=bar) + relation_id=$(juju --show-log show-unit -m model-secrets-offer source/0 --format json | yq '."source/0"."relation-info" | .[] | select(."endpoint"=="self-metrics-endpoint") | ."relation-id"') + juju exec --unit source/0 -- secret-grant "$secret_uri" -r "$relation_id" + + echo "Checking: the secret can be read by the consumer" + juju switch "model-secrets-consume" + echo "Checking: secret-get by URI - consume content" + check_contains "$(juju exec --unit sink/0 -- secret-get --label mylabel "$secret_uri")" 'foo: bar' + echo "Checking: secret-get by URI - consume content" + check_contains "$(juju exec --unit sink/0 -- secret-get --label mylabel)" 'foo: bar' + + echo "Checking: add a new revision and check consumer can see it" + juju switch "model-secrets-offer" + juju exec --unit source/0 -- secret-set "$secret_uri" foo=bar2 + juju switch "model-secrets-consume" + check_contains "$(juju exec --unit sink/0 -- secret-get --label mylabel)" 'foo: bar' + check_contains "$(juju exec --unit sink/0 -- secret-get --label mylabel --peek)" 'foo: bar2' + check_contains "$(juju exec --unit sink/0 -- secret-get --label mylabel)" 'foo: bar' + check_contains "$(juju exec --unit sink/0 -- secret-get --label mylabel --refresh)" 'foo: bar2' + check_contains "$(juju exec --unit sink/0 -- secret-get --label mylabel)" 'foo: bar2' + + echo "Checking: suspend relation and check access is lost" + juju switch "model-secrets-offer" + juju suspend-relation "$relation_id" + juju switch "model-secrets-consume" + check_contains "$(juju exec --unit sink/0 -- secret-get "$secret_uri" 2>&1)" 'permission denied' + echo "Checking: resume relation and access is restored" + juju switch "model-secrets-offer" + juju resume-relation "$relation_id" + juju switch "model-secrets-consume" + check_contains "$(juju exec --unit sink/0 -- secret-get --label mylabel)" 'foo: bar2' + + echo "Checking: secret-revoke by relation ID" + juju switch "model-secrets-offer" + juju exec --unit source/0 -- secret-revoke "$secret_uri" --relation "$relation_id" + juju switch "model-secrets-consume" + check_contains "$(juju exec --unit sink/0 -- secret-get "$secret_uri" 2>&1)" 'permission denied' +} + +test_secrets_cmr() { + if [ "$(skip 'test_secrets_cmr')" ]; then + echo "==> TEST SKIPPED: test_secrets_cmr" + return + fi + + ( + set_verbosity + + cd .. || exit + + run "run_secrets_cmr" + ) +}
tests/suites/secrets_k8s/k8s.sh+47 −64 modified@@ -7,31 +7,32 @@ run_secrets() { # k8s secrets are stored in an external backend. # These checks ensure the secrets are deleted when the units and app are deleted. echo "deploy an app and create an app owned secret and a unit owned secret" - juju --show-log deploy alertmanager-k8s + juju --show-log deploy alertmanager-k8s --trust wait_for "alertmanager-k8s" "$(active_idle_condition "alertmanager-k8s" 0 0)" wait_for "active" '.applications["alertmanager-k8s"] | ."application-status".current' full_uri1=$(juju exec --unit alertmanager-k8s/0 -- secret-add foo=bar) short_uri1=${full_uri1##*/} full_uri2=$(juju exec --unit alertmanager-k8s/0 -- secret-add --owner unit foo=bar2) short_uri2=${full_uri2##*/} - check_contains "$(microk8s kubectl -n "$model_name" get secrets -o json | jq -r '.items[].metadata.name | select(. == "'"${short_uri1}"'-1")')" "${short_uri1}-1" - check_contains "$(microk8s kubectl -n "$model_name" get secrets -o json | jq -r '.items[].metadata.name | select(. == "'"${short_uri2}"'-1")')" "${short_uri2}-1" + check_contains "$(kubectl -n "$model_name" get secrets -o json | yq -r '.items[].metadata.name | select(. == "'"${short_uri1}"'-1")')" "${short_uri1}-1" + check_contains "$(kubectl -n "$model_name" get secrets -o json | yq -r '.items[].metadata.name | select(. == "'"${short_uri2}"'-1")')" "${short_uri2}-1" echo "add another unit and create a unit owned secret" juju --show-log scale-application alertmanager-k8s 2 wait_for "alertmanager-k8s" "$(active_idle_condition "alertmanager-k8s" 0 1)" full_uri3=$(juju exec --unit alertmanager-k8s/1 -- secret-add --owner unit foo=bar3) short_uri3=${full_uri3##*/} - check_contains "$(microk8s kubectl -n "$model_name" get secrets -o json | jq -r '.items[].metadata.name | select(. == "'"${short_uri3}"'-1")')" "${short_uri3}-1" + check_contains "$(kubectl -n "$model_name" get secrets -o json | yq -r '.items[].metadata.name | select(. == "'"${short_uri3}"'-1")')" "${short_uri3}-1" echo "remove a unit and check only its secret is removed" juju --show-log scale-application alertmanager-k8s 1 - check_contains "$(microk8s kubectl -n "$model_name" get secrets -o json | jq -r '.items[].metadata.name | select(. == "'"${short_uri1}"'-1")')" "${short_uri1}-1" - check_contains "$(microk8s kubectl -n "$model_name" get secrets -o json | jq -r '.items[].metadata.name | select(. == "'"${short_uri2}"'-1")')" "${short_uri2}-1" + wait_for_unit_count "alertmanager-k8s" 1 + check_contains "$(kubectl -n "$model_name" get secrets -o json | yq -r '.items[].metadata.name | select(. == "'"${short_uri1}"'-1")')" "${short_uri1}-1" + check_contains "$(kubectl -n "$model_name" get secrets -o json | yq -r '.items[].metadata.name | select(. == "'"${short_uri2}"'-1")')" "${short_uri2}-1" attempt=0 - until [[ -z $(microk8s kubectl -n "$model_name" get secrets -o json | jq -r '.items[].metadata.name | select(. == "'"${short_uri3}"'-1")') ]]; do + until [[ -z $(kubectl -n "$model_name" get secrets -o json | yq -r '.items[].metadata.name | select(. == "'"${short_uri3}"'-1")') ]]; do if [[ ${attempt} -ge 30 ]]; then - echo "Failed: secrets were not deleted on unit removal." + echo "Failed: secrets were not deleted on unit 1 removal." exit 1 fi sleep 2 @@ -40,11 +41,12 @@ run_secrets() { echo "remove the last unit and check only the app owned secret remains" juju --show-log scale-application alertmanager-k8s 0 - check_contains "$(microk8s kubectl -n "$model_name" get secrets -o json | jq -r '.items[].metadata.name | select(. == "'"${short_uri1}"'-1")')" "${short_uri1}-1" + wait_for_unit_count "alertmanager-k8s" 0 + check_contains "$(kubectl -n "$model_name" get secrets -o json | yq -r '.items[].metadata.name | select(. == "'"${short_uri1}"'-1")')" "${short_uri1}-1" attempt=0 - until [[ -z $(microk8s kubectl -n "$model_name" get secrets -o json | jq -r '.items[].metadata.name | select(. == "'"${short_uri2}"'-1")') ]]; do + until [[ -z $(kubectl -n "$model_name" get secrets -o json | yq -r '.items[].metadata.name | select(. == "'"${short_uri2}"'-1")') ]]; do if [[ ${attempt} -ge 30 ]]; then - echo "Failed: secrets were not deleted on unit removal." + echo "Failed: secrets were not deleted on unit 0 removal." exit 1 fi sleep 2 @@ -54,22 +56,18 @@ run_secrets() { echo "remove the app and the app owned secret should be deleted too" juju --show-log remove-application alertmanager-k8s attempt=0 - until [[ -z $(microk8s kubectl -n "$model_name" get secrets -o json | jq -r '.items[].metadata.name | select(. == "'"${short_uri1}"'-1")') ]]; do + until [[ -z $(kubectl -n "$model_name" get secrets -o json | yq -r '.items[].metadata.name | select(. == "'"${short_uri1}"'-1")') ]]; do if [[ ${attempt} -ge 30 ]]; then - echo "Failed: secrets were not deleted on app removal." + echo "Failed: application owned secrets were not deleted on app removal." exit 1 fi sleep 2 attempt=$((attempt + 1)) done - juju --show-log deploy alertmanager-k8s hello - # TODO(anvial): remove the revision flag once we update alertmanager-k8s charm - # (https://discourse.charmhub.io/t/old-ingress-relation-removal/12944) - # or we choose an alternative pair of charms to integrate. - juju --show-log deploy nginx-ingress-integrator nginx --channel=latest/stable --revision=83 + juju --show-log deploy alertmanager-k8s hello --trust + juju --show-log deploy nginx-ingress-integrator nginx --trust --config service-hostname=hello.test juju --show-log integrate nginx hello - juju --show-log trust nginx --scope=cluster # create user secrets. juju --show-log add-secret mysecret owned-by="$model_name" --info "this is a user secret" @@ -100,18 +98,18 @@ run_secrets() { check_contains "$(juju exec --unit hello/0 -- secret-get $unit_owned_full_uri)" 'owned-by: hello/0' check_contains "$(juju exec --unit hello/0 -- secret-get $app_owned_full_uri)" 'owned-by: hello-app' - echo "Checking: secret-get by URI - metadata" - check_contains "$(juju exec --unit hello/0 -- secret-info-get $unit_owned_full_uri --format json | jq .${unit_owned_short_uri}.owner)" unit - check_contains "$(juju exec --unit hello/0 -- secret-info-get $app_owned_full_uri --format json | jq .${app_owned_short_uri}.owner)" application + echo "Checking: secret-info-get by URI - metadata" + check_contains "$(juju exec --unit hello/0 -- secret-info-get $unit_owned_full_uri --format json | yq ".${unit_owned_short_uri}.owner")" unit + check_contains "$(juju exec --unit hello/0 -- secret-info-get $app_owned_full_uri --format json | yq ".${app_owned_short_uri}.owner")" application echo "Checking: secret-get by label or consumer label - content" check_contains "$(juju exec --unit hello/0 -- secret-get --label=hello_0)" 'owned-by: hello/0' check_contains "$(juju exec --unit hello/0 -- secret-get --label=hello-app)" 'owned-by: hello-app' - echo "Checking: secret-get by label - metadata" - check_contains "$(juju exec --unit hello/0 -- secret-info-get --label=hello_0 --format json | jq ".${unit_owned_short_uri}.label")" hello_0 + echo "Checking: secret-info-get by label - metadata" + check_contains "$(juju exec --unit hello/0 -- secret-info-get --label=hello_0 --format json | yq ".${unit_owned_short_uri}.label")" hello_0 - relation_id=$(juju --show-log show-unit hello/0 --format json | jq '."hello/0"."relation-info"[0]."relation-id"') + relation_id=$(juju --show-log show-unit hello/0 --format json | yq '."hello/0"."relation-info"[0]."relation-id"') juju exec --unit hello/0 -- secret-grant "$unit_owned_full_uri" -r "$relation_id" juju exec --unit hello/0 -- secret-grant "$app_owned_full_uri" -r "$relation_id" @@ -125,16 +123,8 @@ run_secrets() { check_contains "$(juju exec --unit nginx/0 -- secret-get --label=consumer_label_secret_owned_by_hello_0)" 'owned-by: hello/0' check_contains "$(juju exec --unit nginx/0 -- secret-get --label=consumer_label_secret_owned_by_hello)" 'owned-by: hello-app' - echo "Check owner unit's k8s role rules to ensure we are using the k8s secret provider" - check_contains "$(microk8s kubectl -n "$model_name" get roles/unit-hello-0 -o json | jq ".rules[] | select( has(\"resourceNames\") ) | select( .resourceNames[] | contains(\"${unit_owned_short_uri}-1\") ) | .verbs[0] ")" '*' - check_contains "$(microk8s kubectl -n "$model_name" get roles/unit-hello-0 -o json | jq ".rules[] | select( has(\"resourceNames\") ) | select( .resourceNames[] | contains(\"${app_owned_short_uri}-1\") ) | .verbs[0] ")" '*' - - # Check consumer unit's k8s role rules to ensure we are using the k8s secret provider. - check_contains "$(microk8s kubectl -n "$model_name" get roles/unit-nginx-0 -o json | jq ".rules[] | select( has(\"resourceNames\") ) | select( .resourceNames[] | contains(\"${unit_owned_short_uri}-1\") ) | .verbs[0] ")" 'get' - check_contains "$(microk8s kubectl -n "$model_name" get roles/unit-nginx-0 -o json | jq ".rules[] | select( has(\"resourceNames\") ) | select( .resourceNames[] | contains(\"${app_owned_short_uri}-1\") ) | .verbs[0] ")" 'get' - - check_contains "$(microk8s kubectl -n "$model_name" get "secrets/${unit_owned_short_uri}-1" -o json | jq -r '.data["owned-by"]' | base64 -d)" "hello/0" - check_contains "$(microk8s kubectl -n "$model_name" get "secrets/${app_owned_short_uri}-1" -o json | jq -r '.data["owned-by"]' | base64 -d)" "hello-app" + check_contains "$(kubectl -n "$model_name" get "secrets/${unit_owned_short_uri}-1" -o json | yq -r '.data["owned-by"]' | base64 -d)" "hello/0" + check_contains "$(kubectl -n "$model_name" get "secrets/${app_owned_short_uri}-1" -o json | yq -r '.data["owned-by"]' | base64 -d)" "hello-app" echo "Checking: secret-revoke by relation ID" juju exec --unit hello/0 -- secret-revoke "$app_owned_full_uri" --relation "$relation_id" @@ -162,7 +152,7 @@ run_user_secrets() { model_name='model-user-secrets-k8s' juju --show-log add-model "$model_name" --config secret-backend=auto - model_uuid=$(juju show-model $model_name --format json | jq -r ".[\"${model_name}\"][\"model-uuid\"]") + model_uuid=$(juju show-model $model_name --format json | yq -r ".[\"${model_name}\"][\"model-uuid\"]") juju --show-log deploy snappass-test @@ -217,7 +207,7 @@ run_user_secrets() { juju --show-log remove-secret $secret_uri check_contains "$(juju --show-log secrets --format yaml | yq length)" '0' - until [[ -z $(microk8s kubectl -n "$model_name" get secrets -o json | jq -r '.items[].metadata.name | select(. == "'"${secret_short_uri}"'-1")') ]]; do + until [[ -z $(kubectl -n "$model_name" get secrets -o json | yq -r '.items[].metadata.name | select(. == "'"${secret_short_uri}"'-1")') ]]; do if [[ ${attempt} -ge 30 ]]; then echo "Failed: user secret was not deleted." exit 1 @@ -232,7 +222,7 @@ run_secret_drain() { juju --show-log add-model "$model_name" prepare_vault - vault_backend_name='myvault' + vault_backend_name='secret-drain-vault-backend' juju add-secret-backend "$vault_backend_name" vault endpoint="$VAULT_ADDR" token="$VAULT_TOKEN" juju --show-log deploy snappass-test hello @@ -247,13 +237,13 @@ run_secret_drain() { juju show-secret --reveal "$unit_owned_full_uri" juju show-secret --reveal "$app_owned_full_uri" - check_contains "$(microk8s kubectl -n "$model_name" get secrets -l 'app.juju.is/created-by=hello')" "${unit_owned_short_uri}-1" - check_contains "$(microk8s kubectl -n "$model_name" get secrets -l 'app.juju.is/created-by=hello')" "${app_owned_short_uri}-1" + check_contains "$(kubectl -n "$model_name" get secrets -l 'app.juju.is/created-by=hello')" "${unit_owned_short_uri}-1" + check_contains "$(kubectl -n "$model_name" get secrets -l 'app.juju.is/created-by=hello')" "${app_owned_short_uri}-1" juju model-config secret-backend="$vault_backend_name" attempt=0 - until [[ $(microk8s kubectl -n "$model_name" get secrets -l 'app.juju.is/created-by=hello' -o json | jq '.items | length') -eq 0 ]]; do + until [[ $(kubectl -n "$model_name" get secrets -l 'app.juju.is/created-by=hello' -o json | yq '.items | length') -eq 0 ]]; do if [[ ${attempt} -ge 30 ]]; then echo "Failed: expected all secrets get drained to vault, so k8s has no secrets." exit 1 @@ -262,13 +252,13 @@ run_secret_drain() { attempt=$((attempt + 1)) done - model_uuid=$(juju show-model $model_name --format json | jq -r ".[\"${model_name}\"][\"model-uuid\"]") - check_contains "$(vault kv list -format json "${model_name}-${model_uuid: -6}" | jq length)" 2 + model_uuid=$(juju show-model $model_name --format json | yq -r ".[\"${model_name}\"][\"model-uuid\"]") + check_contains "$(vault kv list -format json "${model_name}-${model_uuid: -6}" | yq length)" 2 juju model-config secret-backend=auto attempt=0 - until [[ "$(microk8s kubectl -n $model_name get secrets -l 'app.juju.is/created-by=hello')" =~ ${unit_owned_short_uri}-1 ]]; do + until [[ "$(kubectl -n $model_name get secrets -l 'app.juju.is/created-by=hello')" =~ ${unit_owned_short_uri}-1 ]]; do if [[ ${attempt} -ge 30 ]]; then echo "Failed: expected secret ${unit_owned_short_uri}-1 gets drained to k8s." exit 1 @@ -278,7 +268,7 @@ run_secret_drain() { done attempt=0 - until [[ "$(microk8s kubectl -n $model_name get secrets -l 'app.juju.is/created-by=hello')" =~ ${app_owned_short_uri}-1 ]]; do + until [[ "$(kubectl -n $model_name get secrets -l 'app.juju.is/created-by=hello')" =~ ${app_owned_short_uri}-1 ]]; do if [[ ${attempt} -ge 30 ]]; then echo "Failed: expected secret ${app_owned_short_uri}-1 gets drained to k8s." exit 1 @@ -287,18 +277,18 @@ run_secret_drain() { attempt=$((attempt + 1)) done - check_contains "$(vault kv list -format json "${model_name}-${model_uuid: -6}" | jq length)" 0 + check_contains "$(vault kv list -format json "${model_name}-${model_uuid: -6}" | yq length)" 0 destroy_model "$model_name" } run_user_secret_drain() { model_name='model-user-secrets-k8s-drain' juju --show-log add-model "$model_name" - model_uuid=$(juju show-model $model_name --format json | jq -r ".[\"${model_name}\"][\"model-uuid\"]") + model_uuid=$(juju show-model $model_name --format json | yq -r ".[\"${model_name}\"][\"model-uuid\"]") prepare_vault - vault_backend_name='myvault' + vault_backend_name='user-secret-drain-vault-backend' juju add-secret-backend "$vault_backend_name" vault endpoint="$VAULT_ADDR" token="$VAULT_TOKEN" juju --show-log deploy snappass-test hello @@ -313,13 +303,13 @@ run_user_secret_drain() { juju --show-log grant-secret "$secret_uri" hello check_contains "$(juju exec --unit hello/0 -- secret-get $secret_short_uri)" "owned-by: $model_name-1" - check_contains "$(microk8s kubectl -n "$model_name" get secrets -l "app.juju.is/created-by=$model_uuid" -o jsonpath='{.items[*].metadata.name}')" "${secret_short_uri}-1" + check_contains "$(kubectl -n "$model_name" get secrets -l "app.juju.is/created-by=$model_uuid" -o jsonpath='{.items[*].metadata.name}')" "${secret_short_uri}-1" juju model-config secret-backend="$vault_backend_name" # ensure the user secret is removed from k8s backend. attempt=0 - until [[ $(microk8s kubectl -n "$model_name" get secrets -l "app.juju.is/created-by=$model_uuid" -o json | jq '.items | length') -eq 0 ]]; do + until [[ $(kubectl -n "$model_name" get secrets -l "app.juju.is/created-by=$model_uuid" -o json | yq '.items | length') -eq 0 ]]; do if [[ ${attempt} -ge 30 ]]; then echo "Failed: expected all secrets get drained to vault, so k8s has no secrets." exit 1 @@ -328,17 +318,17 @@ run_user_secret_drain() { attempt=$((attempt + 1)) done - model_uuid=$(juju show-model $model_name --format json | jq -r ".[\"${model_name}\"][\"model-uuid\"]") + model_uuid=$(juju show-model $model_name --format json | yq -r ".[\"${model_name}\"][\"model-uuid\"]") # ensure the user secret is in vault backend. - check_contains "$(vault kv list -format json "${model_name}-${model_uuid: -6}" | jq length)" 1 + check_contains "$(vault kv list -format json "${model_name}-${model_uuid: -6}" | yq length)" 1 # ensure the application can still read the user secret. check_contains "$(juju exec --unit hello/0 -- secret-get $secret_short_uri)" "owned-by: $model_name-1" juju model-config secret-backend=auto # ensure the user secret is drained back to k8s backend. attempt=0 - until [[ "$(microk8s kubectl -n $model_name get secrets -l "app.juju.is/created-by=$model_uuid")" =~ ${secret_short_uri}-1 ]]; do + until [[ "$(kubectl -n $model_name get secrets -l "app.juju.is/created-by=$model_uuid")" =~ ${secret_short_uri}-1 ]]; do if [[ ${attempt} -ge 30 ]]; then echo "Failed: expected secret ${secret_short_uri}-1 gets drained to k8s." exit 1 @@ -348,7 +338,7 @@ run_user_secret_drain() { done # ensure the user secret is removed from vault backend. - check_contains "$(vault kv list -format json "${model_name}-${model_uuid: -6}" | jq length)" 0 + check_contains "$(vault kv list -format json "${model_name}-${model_uuid: -6}" | yq length)" 0 # ensure the application can still read the user secret. check_contains "$(juju exec --unit hello/0 -- secret-get $secret_short_uri)" "owned-by: $model_name-1" @@ -361,12 +351,7 @@ run_test_add_multiple_secrets_parallel() { model_name='multiple-secrets-parallel-k8s-model' model_log_file="${TEST_DIR}/${model_name}.log" - cleanup_resources() { - # Remove files in this test in case other k8s test uses the same file names. - rm -f "$ctrl_log_file" "$model_log_file" - export KILL_CONTROLLER=true - } - trap cleanup_resources EXIT HUP INT TERM + export KILL_CONTROLLER=true # Verify all added secret IDs exist. verify_secrets_exist() { @@ -397,9 +382,7 @@ run_test_add_multiple_secrets_parallel() { verify_secrets_exist "$ctrl_log_file" # Remove all secrets that were added to controller. - for i in $(seq 1 100); do - juju remove-secret "test${i}" - done + seq 1 100 | xargs -P5 -I{} juju remove-secret "test{}" juju add-model "$model_name" # Check logs during juju add-secret in non-controller model for any errors. @@ -417,7 +400,7 @@ prepare_vault() { ip=$(hostname -I | awk '{print $1}') root_token='root' - timeout 45m vault server -dev -dev-listen-address="${ip}:8200" -dev-root-token-id="$root_token" & + daemon vault server -dev -dev-listen-address="${ip}:8200" -dev-root-token-id="$root_token" export VAULT_ADDR="http://${ip}:8200" export VAULT_TOKEN="$root_token"
tests/suites/secrets_k8s/task.sh+8 −5 modified@@ -6,13 +6,15 @@ test_secrets_k8s() { set_verbosity - case "${BOOTSTRAP_PROVIDER:-}" in - "k8s") + if [[ ${BOOTSTRAP_PROVIDER} != "k8s" ]]; then + echo "==> TEST SKIPPED: test_secrets_k8s test runs on k8s only" + return + fi + + case "${BOOTSTRAP_CLOUD:-}" in + "microk8s") microk8s enable ingress >/dev/null 2>&1 || true ;; - *) - echo "==> TEST SKIPPED: caas secrets tests, not a k8s provider" - ;; esac echo "==> Checking for dependencies" @@ -27,6 +29,7 @@ test_secrets_k8s() { test_user_secrets test_user_secret_drain test_add_multiple_secrets_parallel + test_secrets_cmr # Takes too long to tear down, so forcibly destroy it export KILL_CONTROLLER=true
tests/suites/sidecar/sidecar.sh+1 −1 modified@@ -60,7 +60,7 @@ test_deploy_and_force_remove_application() { check_snappass() { attempt=1 while true; do - address=$(juju status --format=json | jq -r '.applications["snappass-test"].units["snappass-test/0"].address') + address=$(juju status --format=json | yq -r '.applications["snappass-test"].units["snappass-test/0"].address') if curl "http://${address}:5000" | grep Snappass; then break fi
tests/suites/spaces_ec2/juju_bind.sh+2 −2 modified@@ -15,8 +15,8 @@ run_juju_bind() { hotplug_nic_id=$2 add_multi_nic_machine "$hotplug_nic_id" - juju_machine_id=$(juju show-machine --format json | jq -r '.["machines"] | keys[0]') - ifaces=$(juju ssh ${juju_machine_id} 'ip -j link' | jq -r '.[].ifname | select(. | startswith("en") or startswith("eth"))') + juju_machine_id=$(juju show-machine --format json | yq -r 'select(.["machines"]) | .["machines"] | keys[0]') + ifaces=$(juju ssh ${juju_machine_id} 'ip -j link' | yq -r '.[].ifname | select(. == "en*" or . == "eth*")') primary_iface=$(echo $ifaces | cut -d " " -f1) hotplug_iface=$(echo $ifaces | cut -d " " -f2) configure_multi_nic_netplan "$juju_machine_id" "$hotplug_iface"
tests/suites/spaces_ec2/machines_in_spaces.sh+1 −1 modified@@ -19,7 +19,7 @@ run_machines_in_spaces() { wait_for_machine_agent_status "2" "started" echo "Verify machines are assigned to correct spaces" - alpha_cidrs="$(juju spaces --format json | jq -r '.spaces[] | select(.name == "alpha").subnets | to_entries[] | select(.value["provider-id"] | contains("INFAN") | not) | .key')" + alpha_cidrs="$(juju spaces --format json | yq -r '.spaces[] | select(.name == "alpha").subnets | to_entries[] | select(.value["provider-id"] | contains("INFAN") | not) | .key')" assert_machine_ip_is_in_cidrs "0" "${alpha_cidrs}" machine_1_space_ip=$(assert_machine_ip_is_in_cidrs "1" "${alpha_cidrs}") machine_2_space_ip=$(assert_machine_ip_is_in_cidrs "2" "172.31.254.0/24")
tests/suites/spaces_ec2/task.sh+5 −5 modified@@ -41,16 +41,16 @@ test_spaces_ec2() { } ensure_subnet() { - isolated_subnet_id=$(aws ec2 describe-subnets --filters Name=cidr-block,Values=172.31.254.0/24 2>/dev/null | jq -r '.Subnets[0].SubnetId') + isolated_subnet_id=$(aws ec2 describe-subnets --filters Name=cidr-block,Values=172.31.254.0/24 2>/dev/null | yq -r '.Subnets[0].SubnetId') if [ "$isolated_subnet_id" != "null" ]; then cleanup_stale_nics echo "$isolated_subnet_id" return fi # Create a subnet in the default vpc - vpc_id=$(aws ec2 describe-vpcs | jq -r ".Vpcs[0].VpcId") - subnet_id=$(aws ec2 create-subnet --vpc-id "${vpc_id}" --cidr-block "172.31.254.0/24" | jq -r ".Subnet.SubnetId") + vpc_id=$(aws ec2 describe-vpcs | yq -r ".Vpcs[0].VpcId") + subnet_id=$(aws ec2 create-subnet --vpc-id "${vpc_id}" --cidr-block "172.31.254.0/24" | yq -r ".Subnet.SubnetId") if [ -z "${subnet_id}" ] || [ "${subnet_id}" == "null" ]; then echo "$(red "failed to create subnet in vpc $vpc_id")" 1>&2 exit 1 @@ -60,7 +60,7 @@ ensure_subnet() { setup_nic_for_space_tests() { isolated_subnet_id=${1} - hotplug_nic_id=$(aws ec2 create-network-interface --subnet-id "$isolated_subnet_id" --description="hot-pluggable NIC for space tests" 2>"${TEST_DIR}/create-network-interface-stderr.log" | jq -r '.NetworkInterface.NetworkInterfaceId') + hotplug_nic_id=$(aws ec2 create-network-interface --subnet-id "$isolated_subnet_id" --description="hot-pluggable NIC for space tests" 2>"${TEST_DIR}/create-network-interface-stderr.log" | yq -r '.NetworkInterface.NetworkInterfaceId') if [ -z "$hotplug_nic_id" ] || [ "$hotplug_nic_id" == "null" ]; then # shellcheck disable=SC2046 echo $(red "Unable to create extra NIC for space tests; please check that your account has permissions to create NICs. Failed with:") 1>&2 @@ -82,7 +82,7 @@ cleanup_stale_nics() { # try to delete anything older in a best effort manner. # TODO(jack-w-shaw) fix this. This: - # 1) Should use jq + # 1) Should use yq # 2) Should work. At the moment the created_at tag is not created, so all nics are destroyed aws ec2 describe-network-interfaces --filter Name=description,Values="hot-pluggable NIC for space tests" | grep 'NetworkInterfaceId\|Value' |
tests/suites/spaces_ec2/upgrade_charm_with_bind.sh+2 −2 modified@@ -15,8 +15,8 @@ run_upgrade_charm_with_bind() { hotplug_nic_id=$2 add_multi_nic_machine "$hotplug_nic_id" - juju_machine_id=$(juju show-machine --format json | jq -r '.["machines"] | keys[0]') - ifaces=$(juju ssh ${juju_machine_id} 'ip -j link' | jq -r '.[].ifname | select(. | startswith("en") or startswith("eth"))') + juju_machine_id=$(juju show-machine --format json | yq -r 'select(.["machines"]) | .["machines"] | keys[0]') + ifaces=$(juju ssh ${juju_machine_id} 'ip -j link' | yq -r '.[].ifname | select(. == "en*" or . == "eth*")') primary_iface=$(echo $ifaces | cut -d " " -f1) hotplug_iface=$(echo $ifaces | cut -d " " -f2) configure_multi_nic_netplan "$juju_machine_id" "$hotplug_iface"
tests/suites/spaces_ec2/util.sh+3 −3 modified@@ -7,9 +7,9 @@ add_multi_nic_machine() { hotplug_nic_id=$1 # Ensure machine is deployed to the same az as our nic - az=$(aws ec2 describe-network-interfaces --filters Name=network-interface-id,Values="$hotplug_nic_id" | jq -r ".NetworkInterfaces[0].AvailabilityZone") + az=$(aws ec2 describe-network-interfaces --filters Name=network-interface-id,Values="$hotplug_nic_id" | yq -r ".NetworkInterfaces[0].AvailabilityZone") juju add-machine --constraints zones="${az}" - juju_machine_id=$(juju show-machine --format json | jq -r '.["machines"] | keys[0]') + juju_machine_id=$(juju show-machine --format json | yq -r 'select(.["machines"]) | .["machines"] | keys[0]') echo "[+] waiting for machine ${juju_machine_id} to start..." wait_for_machine_agent_status "$juju_machine_id" "started" @@ -19,7 +19,7 @@ add_multi_nic_machine() { # shellcheck disable=SC2046,SC2086 aws ec2 attach-network-interface --device-index 1 \ --network-interface-id ${hotplug_nic_id} \ - --instance-id $(juju show-machine --format json | jq -r ".[\"machines\"] | .[\"${juju_machine_id}\"] | .[\"instance-id\"]") + --instance-id $(juju show-machine --format json | yq -r ".[\"machines\"] | .[\"${juju_machine_id}\"] | .[\"instance-id\"]") # Wait until the new NIC is UP timeout=${3:-600} # default timeout: 600s = 10m
tests/suites/spaces_gce/machines_in_spaces.sh+1 −1 modified@@ -18,7 +18,7 @@ run_machines_in_spaces() { wait_for_machine_agent_status "2" "started" echo "Verify machines are assigned to correct spaces" - alpha_cidrs="$(juju spaces --format json | jq -r '.spaces[] | select(.name == "alpha").subnets | to_entries[] | select(.value["provider-id"] | contains("INFAN") | not) | .key')" + alpha_cidrs="$(juju spaces --format json | yq -r '.spaces[] | select(.name == "alpha").subnets | to_entries[] | select(.value["provider-id"] | contains("INFAN") | not) | .key')" assert_machine_ip_is_in_cidrs "0" "${alpha_cidrs}" machine_1_space_ip=$(assert_machine_ip_is_in_cidrs "1" "${alpha_cidrs}") machine_2_space_ip=$(assert_machine_ip_is_in_cidrs "2" "10.104.0.0/20")
tests/suites/spaces_gce/task.sh+10 −7 modified@@ -32,32 +32,35 @@ test_spaces_gce() { ensure_subnets() { local network_name=$1 local region=$2 - existing_network=$(gcloud compute networks list --format json | jq -r '.[] | select(.name=="'"${network_name}"'") | .name') + existing_network=$(gcloud compute networks list --format json | network_name=$network_name yq -r '.[] | select(.name==env(network_name)) | .name') if [ "$existing_network" == "" ]; then echo "Creating VPC ${network_name} in region ${region}" gcloud compute networks create "$network_name" --subnet-mode="custom" --description "test vpc for juju qa" fi icmp_rule_name="juju-qa-test-allow-icmp" - existing_icmp_firewall=$(gcloud compute firewall-rules list --format json | jq -r '.[] | select(.network | endswith("networks/'"${network_name}"'")) | select(.name == "'"${icmp_rule_name}"'") | .name') + existing_icmp_firewall=$(gcloud compute firewall-rules list --format json | network_name=$network_name icmp_rule_name=$icmp_rule_name yq -r '.[] | select(.network == "*networks/\(env(network_name))") | select(.name == env(icmp_rule_name)) | .name') if [ "$existing_icmp_firewall" == "" ]; then echo "Creating ICMP rule ${icmp_rule_name}" gcloud compute firewall-rules create "$icmp_rule_name" --network "${network_name}" --allow="icmp" --source-ranges="0.0.0.0/0" fi ssh_rule_name="juju-qa-test-allow-ssh" - existing_ssh_firewall=$(gcloud compute firewall-rules list --format json | jq -r '.[] | select(.network | endswith("networks/'"${network_name}"'")) | select(.name == "'"${ssh_rule_name}"'") | .name') + existing_ssh_firewall=$(gcloud compute firewall-rules list --format json | network_name=$network_name ssh_rule_name=$ssh_rule_name yq -r '.[] | select(.network == "*networks/\(env(network_name))") | select(.name == env(ssh_rule_name)) | .name') if [ "$existing_ssh_firewall" == "" ]; then echo "Creating SSH rule ${ssh_rule_name}" gcloud compute firewall-rules create "$ssh_rule_name" --network "${network_name}" --allow="tcp:22" --source-ranges="0.0.0.0/0" fi for i in "subnet1 10.104.0.0/20" "subnet2 10.142.0.0/20"; do - set -- $i - existing_range=$(gcloud compute networks subnets list --regions "${region}" --format json | jq -r '.[] | select(.network | endswith("networks/'"${network_name}"'")) | select(.ipCidrRange=="'"${2}"'") | .ipCidrRange') + local subnet + local cidr + subnet=$(echo $i | awk -F' ' '{print $1}') + cidr=$(echo $i | awk -F' ' '{print $2}') + existing_range=$(gcloud compute networks subnets list --regions "${region}" --format json | network_name=$network_name cidr=$cidr yq -r '.[] | select(.network == "*networks/\(env(network_name))") | select(.ipCidrRange==env(cidr)) | .ipCidrRange') if [ "$existing_range" == "" ]; then - echo "Creating subnet $1 with CIDR range $2" - gcloud compute networks subnets create "${1}" --region "${region}" --network "${network_name}" --range "${2}" + echo "Creating subnet $subnet with CIDR range $cidr" + gcloud compute networks subnets create "${subnet}" --region "${region}" --network "${network_name}" --range "${cidr}" fi done }
tests/suites/static_analysis/lint_go.sh+2 −2 modified@@ -8,7 +8,7 @@ run_api_imports() { continue fi - got=$(go run ./scripts/import-inspector "$dir" 2>/dev/null | jq -r ".[]") + got=$(go run ./scripts/import-inspector "$dir" 2>/dev/null | yq -r ".[]") python3 tests/suites/static_analysis/lint_go.py -a "${allowed}" -g "${got}" || (echo "Error: API Client import failure in $dir" && exit 1) done } @@ -78,7 +78,7 @@ run_govulncheck() { echo "Ignoring vulnerabilities: ${ignoreMatcher}" allVulns=$(govulncheck -format openvex "github.com/juju/juju/...") - filteredVulns=$(echo ${allVulns} | jq -r '.statements[] | select(.status == "affected") | .vulnerability.name' | grep -vE "${ignoreMatcher}") + filteredVulns=$(echo ${allVulns} | yq -r '.statements[] | select(.status == "affected") | .vulnerability.name' | grep -vE "${ignoreMatcher}") if [[ -n ${filteredVulns} ]]; then (echo >&2 -e "\\nError: govulncheck has issues:\\n\\n${filteredVulns}")
tests/suites/static_analysis/versions.sh+1 −1 modified@@ -10,7 +10,7 @@ run_check_go_version() { check_go_version() { exit_code=0 - target_version="$(go mod edit -json | jq -r .Go | awk 'BEGIN{FS="."} {print $1"."$2}')" + target_version="$(go mod edit -json | yq -r .Go | awk 'BEGIN{FS="."} {print $1"."$2}')" snapcraft_go_juju_version="$(yq -r '.parts | .["juju"] | .["build-snaps"].[] | select(. == "go/*")' snap/snapcraft.yaml | awk -F'/' '{print $2}')" echo "${snapcraft_go_juju_version}" | grep -q "${target_version}"
tests/suites/storage/charm_storage.sh+9 −9 modified@@ -9,11 +9,11 @@ run_charm_storage() { ensure "${model_name}" "${file}" echo "Assessing default storage pools" - juju list-storage-pools -m "${model_name}" --format json | jq '.loop | .provider' | check "loop" - juju list-storage-pools -m "${model_name}" --format json | jq '.rootfs | .provider' | check "rootfs" - juju list-storage-pools -m "${model_name}" --format json | jq '.tmpfs | .provider' | check "tmpfs" + juju list-storage-pools -m "${model_name}" --format json | yq '.loop | .provider' | check "loop" + juju list-storage-pools -m "${model_name}" --format json | yq '.rootfs | .provider' | check "rootfs" + juju list-storage-pools -m "${model_name}" --format json | yq '.tmpfs | .provider' | check "tmpfs" if [ "${BOOTSTRAP_PROVIDER:-}" == "ec2" ]; then - juju list-storage-pools -m "${model_name}" --format json | jq '.["ebs-ssd"] | .provider' | check "ebs" + juju list-storage-pools -m "${model_name}" --format json | yq '.["ebs-ssd"] | .provider' | check "ebs" fi echo "Default storage pools PASSED" @@ -26,10 +26,10 @@ run_charm_storage() { # Assess the above created storage pools. echo "Assessing storage pool" - juju list-storage-pools -m "${model_name}" --format json | jq '.rooty | .provider' | check "rootfs" - juju list-storage-pools -m "${model_name}" --format json | jq '.tempy | .provider' | check "tmpfs" - juju list-storage-pools -m "${model_name}" --format json | jq '.loopy | .provider' | check "loop" - juju list-storage-pools -m "${model_name}" --format json | jq '.ebsy | .provider' | check "ebs" + juju list-storage-pools -m "${model_name}" --format json | yq '.rooty | .provider' | check "rootfs" + juju list-storage-pools -m "${model_name}" --format json | yq '.tempy | .provider' | check "tmpfs" + juju list-storage-pools -m "${model_name}" --format json | yq '.loopy | .provider' | check "loop" + juju list-storage-pools -m "${model_name}" --format json | yq '.ebsy | .provider' | check "ebs" echo "Storage pool PASSED" # Assess charm storage with the filesystem storage provider @@ -130,7 +130,7 @@ assess_rootfs() { wait_for_storage "attached" "$(filesystem_status 0 0).current" # assert the filesystem size requested_storage=1024 - acquired_storage=$(juju storage --format json | jq '.filesystems | .["0/0"] | select(.pool=="rootfs") | .size ') + acquired_storage=$(juju storage --format json | yq '.filesystems | .["0/0"] | select(.pool=="rootfs") | .size ') if [ "$requested_storage" -gt "$acquired_storage" ]; then echo "acquired storage size $acquired_storage should be greater than the requested storage $requested_storage" exit 1
tests/suites/storage_k8s/add-unit.sh+28 −28 modified@@ -21,9 +21,9 @@ test_add_unit_attach_storage() { wait_for_storage "attached" '.storage["pgdata/2"]["status"].current' # Capture the provisioned PersistentVolume ID. - PV_0=$(juju storage --format json | jq -r '.volumes["0"]."provider-id"') - PV_1=$(juju storage --format json | jq -r '.volumes["1"]."provider-id"') - PV_2=$(juju storage --format json | jq -r '.volumes["2"]."provider-id"') + PV_0=$(juju storage --format json | yq -r '.volumes["0"]."provider-id"') + PV_1=$(juju storage --format json | yq -r '.volumes["1"]."provider-id"') + PV_2=$(juju storage --format json | yq -r '.volumes["2"]."provider-id"') # Clean up: remove the application and associated storage (retain PV). juju remove-application postgresql-k8s --no-prompt --force @@ -35,10 +35,10 @@ test_add_unit_attach_storage() { # Prepare PersistentVolumes for reuse: set reclaim policy to Retain and remove claimRef. for pv in "${PV_0}" "${PV_1}" "${PV_2}"; do - microk8s kubectl patch pv "${pv}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' - PVC=$(microk8s kubectl get pv "${pv}" -o jsonpath='{.spec.claimRef.name}') - microk8s kubectl delete pvc "${PVC}" -n "${model_name}" --ignore-not-found=true - microk8s kubectl patch pv "${pv}" --type merge -p '{"spec":{"claimRef": null}}' + kubectl patch pv "${pv}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' + PVC=$(kubectl get pv "${pv}" -o jsonpath='{.spec.claimRef.name}') + kubectl delete pvc "${PVC}" -n "${model_name}" --ignore-not-found=true + kubectl patch pv "${pv}" --type merge -p '{"spec":{"claimRef": null}}' done juju add-model "${second_model_name}" @@ -59,21 +59,21 @@ test_add_unit_attach_storage() { # Verify PVs are bound and PVCs have correct labels for pv in "${PV_0}" "${PV_1}" "${PV_2}"; do - OUT=$(microk8s kubectl get pv "${pv}" -o json | jq '.status.phase') + OUT=$(kubectl get pv "${pv}" -o json | yq '.status.phase') echo "${OUT}" | check "Bound" - NEW_PVC=$(microk8s kubectl get pv "${pv}" -o jsonpath='{.spec.claimRef.name}') - PVC_JSON=$(microk8s kubectl get pvc -n "${second_model_name}" "${NEW_PVC}" -o json) + NEW_PVC=$(kubectl get pv "${pv}" -o jsonpath='{.spec.claimRef.name}') + PVC_JSON=$(kubectl get pvc -n "${second_model_name}" "${NEW_PVC}" -o json) - echo "${PVC_JSON}" | jq '.metadata.labels."storage.juju.is/name"' | check "pgdata" - echo "${PVC_JSON}" | jq '.metadata.labels."app.kubernetes.io/managed-by"' | check "juju" - echo "${PVC_JSON}" | jq '.metadata.annotations."juju-storage-owner"' | check "psql-k8s" + echo "${PVC_JSON}" | yq '.metadata.labels."storage.juju.is/name"' | check "pgdata" + echo "${PVC_JSON}" | yq '.metadata.labels."app.kubernetes.io/managed-by"' | check "juju" + echo "${PVC_JSON}" | yq '.metadata.annotations."juju-storage-owner"' | check "psql-k8s" done # Verify volume provider IDs match the original PVs for i in 0 1 2; do eval "expected_pv=\$PV_${i}" - OUT=$(juju storage --format json | jq ".volumes.\"${i}\".\"provider-id\"") + OUT=$(juju storage --format json | yq ".volumes.\"${i}\".\"provider-id\"") # shellcheck disable=SC2154 echo "${OUT}" | check "${expected_pv}" done @@ -103,20 +103,20 @@ test_add_unit_duplicate_pvc_exists() { wait_for_storage "attached" '.storage["pgdata/0"]["status"].current' # Capture the provisioned PersistentVolume ID. - PV=$(juju storage --format json | jq -r '.volumes["0"]."provider-id"') - PVC=$(microk8s kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') + PV=$(juju storage --format json | yq -r '.volumes["0"]."provider-id"') + PVC=$(kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') juju remove-unit postgresql-k8s --num-units 1 --force wait_for "null" '.applications."postgresql-k8s".units' # Patch PVC to have incorrect label to simulate duplicate PVC scenario - microk8s kubectl patch pvc "${PVC}" \ + kubectl patch pvc "${PVC}" \ -n "${model_name}" \ -p '{"metadata":{"labels":{"storage.juju.is/name":"not-pgdata"}}}' - # Avoid race condition of attaching storage before microk8s kubectl patching completes + # Avoid race condition of attaching storage before kubectl patching completes attempt=0 - until microk8s kubectl get pvc "${PVC}" -n "${model_name}" -o json | jq -r '.metadata.labels."storage.juju.is/name"' | grep -q "not-pgdata"; do + until kubectl get pvc "${PVC}" -n "${model_name}" -o json | yq -r '.metadata.labels."storage.juju.is/name"' | grep -q "not-pgdata"; do echo "[+] (attempt ${attempt}) waiting for PVC patch to complete" sleep "${SHORT_TIMEOUT}" attempt=$((attempt + 1)) @@ -130,11 +130,11 @@ test_add_unit_duplicate_pvc_exists() { # Should not scale due to wrong label value juju add-unit postgresql-k8s --attach-storage pgdata/0 sleep "${SHORT_TIMEOUT}" - OUT=$(microk8s kubectl get statefulset -n "${model_name}" postgresql-k8s -o jsonpath='{.spec.replicas}') + OUT=$(kubectl get statefulset -n "${model_name}" postgresql-k8s -o jsonpath='{.spec.replicas}') echo "${OUT}" | check 0 # Fix the PVC label to allow successful attachment - microk8s kubectl patch pvc "${PVC}" \ + kubectl patch pvc "${PVC}" \ -n "${model_name}" \ -p '{"metadata":{"labels":{"storage.juju.is/name":"pgdata"}}}' @@ -167,9 +167,9 @@ test_add_unit_attach_storage_scaling_race_condition() { wait_for_storage "attached" '.storage["pgdata/2"]["status"].current' # Capture the provisioned PersistentVolume ID. - PV_0=$(juju storage --format json | jq -r '.volumes["0"]."provider-id"') - PV_1=$(juju storage --format json | jq -r '.volumes["1"]."provider-id"') - PV_2=$(juju storage --format json | jq -r '.volumes["2"]."provider-id"') + PV_0=$(juju storage --format json | yq -r '.volumes["0"]."provider-id"') + PV_1=$(juju storage --format json | yq -r '.volumes["1"]."provider-id"') + PV_2=$(juju storage --format json | yq -r '.volumes["2"]."provider-id"') # Clean up: remove the application and associated storage (retain PV). juju remove-application postgresql-k8s --no-prompt --force @@ -181,10 +181,10 @@ test_add_unit_attach_storage_scaling_race_condition() { # Prepare PersistentVolumes for reuse: set reclaim policy to Retain and remove claimRef. for pv in "${PV_0}" "${PV_1}" "${PV_2}"; do - microk8s kubectl patch pv "${pv}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' - PVC=$(microk8s kubectl get pv "${pv}" -o jsonpath='{.spec.claimRef.name}') - microk8s kubectl delete pvc "${PVC}" -n "${model_name}" --ignore-not-found=true - microk8s kubectl patch pv "${pv}" --type merge -p '{"spec":{"claimRef": null}}' + kubectl patch pv "${pv}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' + PVC=$(kubectl get pv "${pv}" -o jsonpath='{.spec.claimRef.name}') + kubectl delete pvc "${PVC}" -n "${model_name}" --ignore-not-found=true + kubectl patch pv "${pv}" --type merge -p '{"spec":{"claimRef": null}}' done juju add-model "${second_model_name}"
tests/suites/storage_k8s/deploy.sh+14 −14 modified@@ -20,7 +20,7 @@ test_deploy_attach_storage() { wait_for_storage "attached" '.storage["pgdata/0"]["status"].current' # Capture the provisioned PersistentVolume ID. - PV=$(juju storage --format json | jq -r '.volumes["0"]."provider-id"') + PV=$(juju storage --format json | yq -r '.volumes["0"]."provider-id"') # Clean up: remove the application and associated storage (retain PV). juju remove-application postgresql-k8s --no-prompt @@ -29,10 +29,10 @@ test_deploy_attach_storage() { wait_for "{}" ".storage" # Clean up: make sure PersistentVolume is in available status - microk8s kubectl patch pv "${PV}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' - PVC=$(microk8s kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') - microk8s kubectl delete pvc "${PVC}" -n "${model_name}" - microk8s kubectl patch pv "${PV}" --type merge -p '{"spec":{"claimRef": null}}' + kubectl patch pv "${PV}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' + PVC=$(kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') + kubectl delete pvc "${PVC}" -n "${model_name}" + kubectl patch pv "${PV}" --type merge -p '{"spec":{"claimRef": null}}' # Import filesystem as pgdata/0 in second model. juju add-model "${second_model_name}" @@ -44,31 +44,31 @@ test_deploy_attach_storage() { juju deploy postgresql-k8s --channel 14/stable --trust --attach-storage pgdata/0 psql-k8s wait_for_storage "attached" '.storage["pgdata/0"]["status"].current' - OUT=$(microk8s kubectl get pv "${PV}" -o json | jq '.status.phase') + OUT=$(kubectl get pv "${PV}" -o json | yq '.status.phase') echo "${OUT}" | check "Bound" # Make sure new PV/PVC is used by the postgresql-k8s charm - NEW_PVC=$(microk8s kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') + NEW_PVC=$(kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') OUT=$( - microk8s kubectl get pvc -n "${second_model_name}" "${NEW_PVC}" -o json | - jq '.metadata.labels."storage.juju.is/name"' + kubectl get pvc -n "${second_model_name}" "${NEW_PVC}" -o json | + yq '.metadata.labels."storage.juju.is/name"' ) echo "${OUT}" | check "pgdata" OUT=$( - microk8s kubectl get pvc -n "${second_model_name}" "${NEW_PVC}" -o json | - jq '.metadata.labels."app.kubernetes.io/managed-by"' + kubectl get pvc -n "${second_model_name}" "${NEW_PVC}" -o json | + yq '.metadata.labels."app.kubernetes.io/managed-by"' ) echo "${OUT}" | check "juju" OUT=$( - microk8s kubectl get pvc -n "${second_model_name}" "${NEW_PVC}" -o json | - jq '.metadata.annotations."juju-storage-owner"' + kubectl get pvc -n "${second_model_name}" "${NEW_PVC}" -o json | + yq '.metadata.annotations."juju-storage-owner"' ) echo "${OUT}" | check "psql-k8s" # Make sure pv name have been update in volumes. OUT=$( - juju storage --format json | jq '.volumes."0"."provider-id"' + juju storage --format json | yq '.volumes."0"."provider-id"' ) echo "${OUT}" | check "${PV}"
tests/suites/storage_k8s/import.sh+12 −12 modified@@ -19,7 +19,7 @@ test_import_filesystem() { wait_for_storage "attached" '.storage["pgdata/0"]["status"].current' # Capture the provisioned PersistentVolume ID. - PV=$(juju storage --format json | jq -r '.volumes["0"]."provider-id"') + PV=$(juju storage --format json | yq -r '.volumes["0"]."provider-id"') # Clean up: remove the application and associated storage (retain PV). juju remove-application postgresql-k8s --no-prompt @@ -35,7 +35,7 @@ test_import_filesystem() { "importing volume \"${PV}\" with reclaim policy \"Delete\" not supported \(must be \"Retain\"\)" # Fix: update the PersistentVolume's reclaim policy to 'Retain'. - microk8s kubectl patch pv "${PV}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' + kubectl patch pv "${PV}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' # Attempt to import the PersistentVolume: expect failure due to existing claimRef. set +e @@ -45,9 +45,9 @@ test_import_filesystem() { "importing volume \"${PV}\" already bound to a claim not supported" # Fix: delete the PVC and remove the claimRef from the PersistentVolume. - PVC=$(microk8s kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') - microk8s kubectl delete pvc "${PVC}" -n "${model_name}" - microk8s kubectl patch pv "${PV}" --type merge -p '{"spec":{"claimRef": null}}' + PVC=$(kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') + kubectl delete pvc "${PVC}" -n "${model_name}" + kubectl patch pv "${PV}" --type merge -p '{"spec":{"claimRef": null}}' # Final attempt: import the PersistentVolume successfully. OUT=$(juju import-filesystem kubernetes "${PV}" pgdata 2>&1) @@ -80,7 +80,7 @@ test_force_import_filesystem() { wait_for_storage "attached" '.storage["pgdata/0"]["status"].current' # Capture the provisioned PersistentVolume ID. - PV=$(juju storage --format json | jq -r '.volumes["0"]."provider-id"') + PV=$(juju storage --format json | yq -r '.volumes["0"]."provider-id"') # Clean up: remove the application and associated storage (retain PV). juju remove-application postgresql-k8s --no-prompt @@ -96,9 +96,9 @@ test_force_import_filesystem() { "importing volume \"${PV}\" with reclaim policy \"Delete\" not supported \(must be \"Retain\"\)" # Test import PV which PVC not managed by juju. - PVC=$(microk8s kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') - ORIGINAL_LABEL=$(microk8s kubectl get pvc "${PVC}" -n "${model_name}" -o json | jq -r '.metadata.labels["app.kubernetes.io/managed-by"]') - microk8s kubectl label pvc -n "${model_name}" "${PVC}" app.kubernetes.io/managed-by=not-juju --overwrite + PVC=$(kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') + ORIGINAL_LABEL=$(kubectl get pvc "${PVC}" -n "${model_name}" -o json | yq -r '.metadata.labels["app.kubernetes.io/managed-by"]') + kubectl label pvc -n "${model_name}" "${PVC}" app.kubernetes.io/managed-by=not-juju --overwrite set +e OUT=$(juju import-filesystem kubernetes "${PV}" pgdata --force 2>&1) @@ -107,17 +107,17 @@ test_force_import_filesystem() { echo "${OUT}" | check \ "importing volume: importing PersistentVolume \"${PV}\" whose PersistentVolumeClaim is not managed by juju: unexpected storage labels" - microk8s kubectl label pvc -n "${model_name}" "${PVC}" app.kubernetes.io/managed-by="${ORIGINAL_LABEL}" --overwrite + kubectl label pvc -n "${model_name}" "${PVC}" app.kubernetes.io/managed-by="${ORIGINAL_LABEL}" --overwrite # Final attempt: import the PersistentVolume successfully. OUT=$(juju import-filesystem kubernetes "${PV}" pgdata --force 2>&1) wait_for_storage "detached" '.storage["pgdata/1"]["status"].current' # Ensure pv imported & status is available. - PVC=$(microk8s kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') + PVC=$(kubectl get pv "${PV}" -o jsonpath='{.spec.claimRef.name}') echo "${PVC}" | check "" - RECLAIM_POLICY=$(microk8s kubectl get pv "${PV}" -o jsonpath='{.spec.persistentVolumeReclaimPolicy}') + RECLAIM_POLICY=$(kubectl get pv "${PV}" -o jsonpath='{.spec.persistentVolumeReclaimPolicy}') echo "${RECLAIM_POLICY}" | check "Retain" # Destroy the test model.
tests/suites/storage_k8s/task.sh+1 −1 modified@@ -11,7 +11,7 @@ test_storage_k8s() { echo "==> Checking for dependencies" check_dependencies juju - microk8s config >"${TEST_DIR}"/kube.conf + kubectl config view --raw --flatten >"${TEST_DIR}"/kube.conf export KUBE_CONFIG="${TEST_DIR}"/kube.conf test_import_filesystem
tests/suites/storage/persistent_storage.sh+3 −3 modified@@ -28,7 +28,7 @@ run_persistent_storage() { wait_for "active" "$(workload_status "dummy-storage" 0).current" echo "Checking total number of storage unit(s)." - assert_storage 2 ".storage | keys | length" + assert_storage 2 "select(.storage) | .storage | keys | length" echo "Checking names of storage unit(s)." assert_storage "single-blk/0" "$(label 0)" assert_storage "single-fs/1" "$(label 1)" @@ -82,7 +82,7 @@ run_persistent_storage() { assert_storage false '.storage | has("single-fs/1")' echo "Checking total number of storage unit(s)." - assert_storage 1 ".storage | keys | length" + assert_storage 1 "select(.storage) | .storage | keys | length" echo "Check for existence of single block storage" assert_storage true '.storage | has("single-blk/0")' echo "single-blk/0 found in storage list." @@ -101,7 +101,7 @@ run_persistent_storage() { # wait for current workload-status to be active wait_for "active" "$(workload_status "dummy-storage" 1).current" # assert storage unit count - assert_storage 1 ".storage | keys | length" + assert_storage 1 "select(.storage) | .storage | keys | length" echo "Checking existence of single block device storage single-blk/0." assert_storage true '.storage | has("single-blk/0")' # assert persistent setting
tests/suites/unit/unit_series.sh+2 −2 modified@@ -18,8 +18,8 @@ run_unit_set_series() { wait_for "ubuntu" "$(idle_condition "ubuntu" 0 1)" echo "Check the base for machine of added unit" - juju status --format=json | jq -r '.machines | .["1"] | .base | .name' | grep "ubuntu" - juju status --format=json | jq -r '.machines | .["1"] | .base | .channel' | grep "24.04" + juju status --format=json | yq -r '.machines | .["1"] | .base | .name' | grep "ubuntu" + juju status --format=json | yq -r '.machines | .["1"] | .base | .channel' | grep "24.04" destroy_model "unit-series" }
tests/suites/upgrade_series/base.sh+1 −1 modified@@ -37,7 +37,7 @@ assert_machine_base() { local machine expected_base actual_base machine=$1 expected_base=$2 - actual_base=$(juju status --format=json | jq -r ".machines[\"$machine\"] | (.base.name+\"@\"+.base.channel)") + actual_base=$(juju status --format=json | machine=$machine yq -r '.machines[env(machine)] | .base | "\(.name)@\(.channel)"') if [[ $expected_base == "$actual_base" ]]; then echo "Machine $machine has base $actual_base"
tests/suites/upgrade/streams.sh+9 −5 modified@@ -58,7 +58,6 @@ exec_simplestream_metadata() { --prevent-fallback \ -d "./tests/suites/upgrade/streams/" - add_clean_func "kill_server" start_server "./tests/suites/upgrade/streams/tools" # Find a routable address to the server that isn't the loopback address. @@ -73,12 +72,17 @@ exec_simplestream_metadata() { name="test-upgrade-${test_name}-stream" + local extra_opts + if [[ ${stable_version} == "3.6.14" ]]; then + extra_opts="--config juju-db-snap-channel=4.4/stable" + fi + file="${TEST_DIR}/test-upgrade-${test_name}-stream.log" ${bootstrap_juju_client} bootstrap "lxd" "${name}" \ --show-log \ --agent-version="${stable_version}" \ --bootstrap-series="${BOOTSTRAP_SERIES}" \ - --config agent-metadata-url="http://${server_address}:8666/" 2>&1 | OUTPUT "${file}" + --config agent-metadata-url="http://${server_address}:8666/" ${extra_opts:-} 2>&1 | OUTPUT "${file}" echo "${name}" >>"${TEST_DIR}/jujus" juju add-model test-upgrade-"${test_name}" @@ -87,14 +91,14 @@ exec_simplestream_metadata() { local CURRENT UPDATED - CURRENT=$(juju machines -m controller --format=json | jq -r '.machines | .["0"] | .["juju-status"] | .version') + CURRENT=$(juju machines -m controller --format=json | yq -r '.machines | .["0"] | .["juju-status"] | .version') echo "==> Current juju version ${CURRENT}" juju upgrade-controller --agent-version="${jujud_version}" attempt=0 while true; do - UPDATED=$(timeout 30 juju machines -m controller --format=json | jq -r '.machines | .["0"] | .["juju-status"] | .version' || echo "${CURRENT}") + UPDATED=$(timeout 30 juju machines -m controller --format=json | yq -r '.machines | .["0"] | .["juju-status"] | .version' || echo "${CURRENT}") if [ "$CURRENT" != "$UPDATED" ]; then break fi @@ -112,7 +116,7 @@ exec_simplestream_metadata() { juju switch test-upgrade-"${test_name}" juju upgrade-model while true; do - UPDATED=$(timeout 30 juju machines --format=json | jq -r '.machines | .["0"] | .["juju-status"] | .version' || echo "${CURRENT}") + UPDATED=$(timeout 30 juju machines --format=json | yq -r '.machines | .["0"] | .["juju-status"] | .version' || echo "${CURRENT}") if [ "$CURRENT" != "$UPDATED" ]; then break fi
tests/suites/user/manage.sh+16 −16 modified@@ -8,7 +8,7 @@ run_user_grant_revoke() { ensure "user-grant-revoke" "${file}" echo "Check that current user is admin" - juju whoami --format=json | jq -r '."user"' | check "admin" + juju whoami --format=json | yq -r '."user"' | check "admin" echo "Add user with read rights" juju show-user readuser 2>/dev/null || juju add-user readuser @@ -23,19 +23,19 @@ run_user_grant_revoke() { juju grant adminuser admin "user-grant-revoke" echo "Check rights for added users" - juju show-model "user-grant-revoke" --format=json | jq -r '."user-grant-revoke"."users"."readuser"."access"' | check "read" - juju show-model "user-grant-revoke" --format=json | jq -r '."user-grant-revoke"."users"."writeuser"."access"' | check "write" - juju show-model "user-grant-revoke" --format=json | jq -r '."user-grant-revoke"."users"."adminuser"."access"' | check "admin" + juju show-model "user-grant-revoke" --format=json | yq -r '."user-grant-revoke"."users"."readuser"."access"' | check "read" + juju show-model "user-grant-revoke" --format=json | yq -r '."user-grant-revoke"."users"."writeuser"."access"' | check "write" + juju show-model "user-grant-revoke" --format=json | yq -r '."user-grant-revoke"."users"."adminuser"."access"' | check "admin" echo "Revoke rights" juju revoke readuser read "user-grant-revoke" juju revoke writeuser write "user-grant-revoke" juju revoke adminuser admin "user-grant-revoke" echo "Check rights for added users after revoke" - juju show-model "user-grant-revoke" --format=json | jq -r '."user-grant-revoke"."users"."readuser"."access"' | check null - juju show-model "user-grant-revoke" --format=json | jq -r '."user-grant-revoke"."users"."writeuser"."access"' | check "read" - juju show-model "user-grant-revoke" --format=json | jq -r '."user-grant-revoke"."users"."adminuser"."access"' | check "write" + juju show-model "user-grant-revoke" --format=json | yq -r '."user-grant-revoke"."users"."readuser"."access"' | check null + juju show-model "user-grant-revoke" --format=json | yq -r '."user-grant-revoke"."users"."writeuser"."access"' | check "read" + juju show-model "user-grant-revoke" --format=json | yq -r '."user-grant-revoke"."users"."adminuser"."access"' | check "write" destroy_model "user-grant-revoke" } @@ -50,7 +50,7 @@ run_user_disable_enable() { ensure "user-disable-enable" "${file}" echo "Check that current user is admin" - juju whoami --format=json | jq -r '."user"' | check "admin" + juju whoami --format=json | yq -r '."user"' | check "admin" echo "Add testuser" juju show-user testuser 2>/dev/null || juju add-user testuser @@ -60,13 +60,13 @@ run_user_disable_enable() { juju disable-user testuser echo "Check testuser is disabled" - juju show-user testuser --format=json | jq -r '."disabled"' | check true + juju show-user testuser --format=json | yq -r '."disabled"' | check true echo "Enable testuser" juju enable-user testuser echo "Check testuser is enabled" - juju show-user testuser --format=json | jq -r '."disabled"' | check null + juju show-user testuser --format=json | yq -r '."disabled"' | check null destroy_model "user-disable-enable" } @@ -81,7 +81,7 @@ run_user_controller_access() { ensure "user-controller-access" "${file}" echo "Check that current user is admin" - juju whoami --format=json | jq -r '."user"' | check "admin" + juju whoami --format=json | yq -r '."user"' | check "admin" echo "Add user with login rights" juju show-user junioradmin 2>/dev/null || juju add-user junioradmin @@ -91,16 +91,16 @@ run_user_controller_access() { juju grant senioradmin superuser echo "Check rights for added users" - juju users --format=json | jq -r '.[] | select(."user-name"=="junioradmin") | ."access"' | check "login" - juju users --format=json | jq -r '.[] | select(."user-name"=="senioradmin") | ."access"' | check "superuser" + juju users --format=json | yq -r '.[] | select(."user-name"=="junioradmin") | ."access"' | check "login" + juju users --format=json | yq -r '.[] | select(."user-name"=="senioradmin") | ."access"' | check "superuser" echo "Revoke rights" juju revoke junioradmin login juju revoke senioradmin superuser echo "Check rights for added users after revoke" - juju users --format=json | jq -r '.[] | select(."user-name"=="junioradmin") | ."access"' | check "" - juju users --format=json | jq -r '.[] | select(."user-name"=="senioradmin") | ."access"' | check "login" + juju users --format=json | yq -r '.[] | select(."user-name"=="junioradmin") | ."access"' | check "" + juju users --format=json | yq -r '.[] | select(."user-name"=="senioradmin") | ."access"' | check "login" destroy_model "user-controller-access" } @@ -115,7 +115,7 @@ run_user_remove() { ensure "user-remove" "${file}" echo "Check that current user is admin" - juju whoami --format=json | jq -r '."user"' | check "admin" + juju whoami --format=json | yq -r '."user"' | check "admin" echo "Add testuser2" juju show-user testuser2 2>/dev/null || juju add-user testuser2
tests/suites/user/register.sh+1 −1 modified@@ -2,7 +2,7 @@ run_user_register() { echo echo "Check that current user is admin" - juju whoami --format=json | jq -r '."user"' | check "admin" + juju whoami --format=json | yq -r '."user"' | check "admin" echo "Add user with read rights" juju remove-user -y bob 2>/dev/null || true
upgrades/backend.go+7 −0 modified@@ -23,6 +23,7 @@ type StateBackend interface { AddVirtualHostKeys() error SplitMigrationStatusMessages() error PopulateApplicationStorageUniqueID() error + OpenControllerAPIPort() error } // Model is an interface providing access to the details of a model within the @@ -55,6 +56,12 @@ func (s stateBackend) SplitMigrationStatusMessages() error { return state.SplitMigrationStatusMessages(s.pool) } +// OpenControllerAPIPort runs an upgrade to open the controller api port +// on the controller units. +func (s stateBackend) OpenControllerAPIPort() error { + return state.OpenControllerAPIPort(s.pool) +} + // PopulateApplicationStorageUniqueID runs an upgrade to backfill CAAS apps // storage unique IDs. func (s stateBackend) PopulateApplicationStorageUniqueID() error {
upgrades/operations.go+1 −0 modified@@ -22,6 +22,7 @@ var stateUpgradeOperations = func() []Operation { upgradeToVersion{version.MustParse("3.6.4"), stateStepsFor364()}, upgradeToVersion{version.MustParse("3.6.5"), stateStepsFor365()}, upgradeToVersion{version.MustParse("3.6.13"), stateStepsFor3613()}, + upgradeToVersion{version.MustParse("3.6.15"), stateStepsFor3615()}, } return steps }
upgrades/steps_3615.go+17 −0 added@@ -0,0 +1,17 @@ +// Copyright 2026 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades + +// stateStepsFor3615 returns upgrade steps for Juju 3.6.15 that manipulate state directly. +func stateStepsFor3615() []Step { + return []Step{ + &upgradeStep{ + description: "open controller api port in state", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + return context.State().OpenControllerAPIPort() + }, + }, + } +}
upgrades/steps_3615_test.go+26 −0 added@@ -0,0 +1,26 @@ +// Copyright 2026 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades_test + +import ( + jc "github.com/juju/testing/checkers" + "github.com/juju/version/v2" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" + "github.com/juju/juju/upgrades" +) + +var v3615 = version.MustParse("3.6.15") + +type steps3615Suite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&steps3615Suite{}) + +func (s *steps3615Suite) TestPopulateApplicationStorageUniqueID(c *gc.C) { + step := findStateStep(c, v3615, "open controller api port in state") + c.Assert(step.Targets(), jc.DeepEquals, []upgrades.Target{upgrades.DatabaseMaster}) +}
upgrades/upgrade_test.go+1 −1 modified@@ -596,7 +596,7 @@ func (s *upgradeSuite) TestUpgradeOperationsOrdered(c *gc.C) { func (s *upgradeSuite) TestStateUpgradeOperationsVersions(c *gc.C) { versions := extractUpgradeVersions(c, (*upgrades.StateUpgradeOperations)()) - c.Assert(versions, gc.DeepEquals, []string{"3.6.4", "3.6.5", "3.6.13"}) + c.Assert(versions, gc.DeepEquals, []string{"3.6.4", "3.6.5", "3.6.13", "3.6.15"}) } func (s *upgradeSuite) TestUpgradeOperationsVersions(c *gc.C) {
version/version.go+1 −1 modified@@ -18,7 +18,7 @@ import ( // The presence and format of this constant is very important. // The debian/rules build recipe uses this value for the version // number of the release package. -const version = "3.6.16" +const version = "3.6.20" // UserAgentVersion defines a user agent version used for communication for // outside resources.
Vulnerability mechanics
Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.
References
4- github.com/advisories/GHSA-5cj2-rqqf-hx9pghsaADVISORY
- github.com/juju/juju/security/advisories/GHSA-5cj2-rqqf-hx9pghsavendor-advisoryvdb-entryWEB
- nvd.nist.gov/vuln/detail/CVE-2026-32694ghsaADVISORY
- github.com/juju/juju/commit/d06919eb03ec68156818bcc304b5fe1c39a8f9e9ghsaWEB
News mentions
0No linked articles in our index yet.