Cache driver GetBlob() allows read access to any blob without access control check
Description
zot is an OCI image registry. Prior to 2.1.0, the cache driver GetBlob() allows read access to any blob without access control check. If a Zot accessControl policy allows users read access to some repositories but restricts read access to other repositories and dedupe is enabled (it is enabled by default), then an attacker who knows the name of an image and the digest of a blob (that they do not have read access to), they may maliciously read it via a second repository they do have read access to. This attack is possible because `ImageStore.CheckBlob()` calls `checkCacheBlob()` to find the blob a global cache by searching for the digest. If it is found, it is copied to the user requested repository with copyBlob(). The attack may be mitigated by configuring "dedupe": false in the "storage" settings. The vulnerability is fixed in 2.1.0.
Affected packages
Versions sourced from the GitHub Security Advisory.
| Package | Affected versions | Patched versions |
|---|---|---|
zotregistry.io/zotGo | < 2.1.0 | 2.1.0 |
zotregistry.dev/zotGo | < 2.1.0 | 2.1.0 |
Affected products
1- Range: < 2.1.0
Patches
2e5eacaa082c9aaee0220e46bMerge pull request from GHSA-55r9-5mx9-qq7r
12 files changed · +492 −10
pkg/api/controller_test.go+116 −0 modified@@ -5142,6 +5142,122 @@ func TestGetUsername(t *testing.T) { }) } +func TestAuthorizationMountBlob(t *testing.T) { + Convey("Make a new controller", t, func() { + port := test.GetFreePort() + baseURL := test.GetBaseURL(port) + + conf := config.New() + conf.HTTP.Port = port + // have two users: one for user Policy, and another for default policy + username1, _ := test.GenerateRandomString() + password1, _ := test.GenerateRandomString() + username2, _ := test.GenerateRandomString() + password2, _ := test.GenerateRandomString() + username1 = strings.ToLower(username1) + username2 = strings.ToLower(username2) + + content := test.GetCredString(username1, password1) + test.GetCredString(username2, password2) + htpasswdPath := test.MakeHtpasswdFileFromString(content) + defer os.Remove(htpasswdPath) + + conf.HTTP.Auth = &config.AuthConfig{ + HTPasswd: config.AuthHTPasswd{ + Path: htpasswdPath, + }, + } + + user1Repo := fmt.Sprintf("%s/**", username1) + user2Repo := fmt.Sprintf("%s/**", username2) + + // config with all policy types, to test that the correct one is applied in each case + conf.HTTP.AccessControl = &config.AccessControlConfig{ + Repositories: config.Repositories{ + user1Repo: config.PolicyGroup{ + Policies: []config.Policy{ + { + Users: []string{username1}, + Actions: []string{ + constants.ReadPermission, + constants.CreatePermission, + }, + }, + }, + }, + user2Repo: config.PolicyGroup{ + Policies: []config.Policy{ + { + Users: []string{username2}, + Actions: []string{ + constants.ReadPermission, + constants.CreatePermission, + }, + }, + }, + }, + }, + } + + dir := t.TempDir() + + ctlr := api.NewController(conf) + ctlr.Config.Storage.RootDirectory = dir + + cm := test.NewControllerManager(ctlr) + cm.StartAndWait(port) + defer cm.StopServer() + + userClient1 := resty.New() + userClient1.SetBasicAuth(username1, password1) + + userClient2 := resty.New() + userClient2.SetBasicAuth(username2, password2) + + img := CreateImageWith().RandomLayers(1, 2).DefaultConfig().Build() + + repoName1 := username1 + "/" + "myrepo" + tag := "1.0" + + // upload image with user1 on repoName1 + err := UploadImageWithBasicAuth(img, baseURL, repoName1, tag, username1, password1) + So(err, ShouldBeNil) + + repoName2 := username2 + "/" + "myrepo" + + blobDigest := img.Manifest.Layers[0].Digest + + /* a HEAD request by user2 on blob digest (found in user1Repo) should return 404 + because user2 doesn't have permissions to read user1Repo */ + resp, err := userClient2.R().Head(baseURL + fmt.Sprintf("/v2/%s/blobs/%s", repoName2, blobDigest)) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusNotFound) + + params := make(map[string]string) + params["mount"] = blobDigest.String() + + // trying to mount a blob which can be found in cache, but user doesn't have permission + // should return 202 instead of 201 + resp, err = userClient2.R().SetQueryParams(params).Post(baseURL + "/v2/" + repoName2 + "/blobs/uploads/") + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusAccepted) + + /* a HEAD request by user1 on blob digest (found in user1Repo) should return 200 + because user1 has permission to read user1Repo */ + resp, err = userClient1.R().Head(baseURL + fmt.Sprintf("/v2/%s/blobs/%s", username1+"/"+"mysecondrepo", blobDigest)) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusOK) + + // user2 can upload without dedupe + err = UploadImageWithBasicAuth(img, baseURL, repoName2, tag, username2, password2) + So(err, ShouldBeNil) + + // trying to mount a blob which can be found in cache and user has permission should return 201 instead of 202 + resp, err = userClient2.R().SetQueryParams(params).Post(baseURL + "/v2/" + repoName2 + "/blobs/uploads/") + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusCreated) + }) +} + func TestAuthorizationWithOnlyAnonymousPolicy(t *testing.T) { Convey("Make a new controller", t, func() { const TestRepo = "my-repos/repo"
pkg/api/routes.go+74 −3 modified@@ -873,6 +873,37 @@ func (rh *RouteHandler) DeleteManifest(response http.ResponseWriter, request *ht response.WriteHeader(http.StatusAccepted) } +// canMount checks if a user has read permission on cached blobs with this specific digest. +// returns true if the user have permission to copy blob from cache. +func canMount(userAc *reqCtx.UserAccessControl, imgStore storageTypes.ImageStore, digest godigest.Digest, +) (bool, error) { + canMount := true + + // authz enabled + if userAc != nil { + canMount = false + + repos, err := imgStore.GetAllDedupeReposCandidates(digest) + if err != nil { + // first write + return false, err + } + + if len(repos) == 0 { + canMount = false + } + + // check if user can read any repo which contain this blob + for _, repo := range repos { + if userAc.Can(constants.ReadPermission, repo) { + canMount = true + } + } + } + + return canMount, nil +} + // CheckBlob godoc // @Summary Check image blob/layer // @Description Check an image's blob/layer given a digest @@ -905,7 +936,31 @@ func (rh *RouteHandler) CheckBlob(response http.ResponseWriter, request *http.Re digest := godigest.Digest(digestStr) - ok, blen, err := imgStore.CheckBlob(name, digest) + userAc, err := reqCtx.UserAcFromContext(request.Context()) + if err != nil { + response.WriteHeader(http.StatusInternalServerError) + + return + } + + userCanMount, err := canMount(userAc, imgStore, digest) + if err != nil { + rh.c.Log.Error().Err(err).Msg("unexpected error") + } + + var blen int64 + + if userCanMount { + ok, blen, err = imgStore.CheckBlob(name, digest) + } else { + var lockLatency time.Time + + imgStore.RLock(&lockLatency) + defer imgStore.RUnlock(&lockLatency) + + ok, blen, _, err = imgStore.StatBlob(name, digest) + } + if err != nil { details := zerr.GetDetails(err) if errors.Is(err, zerr.ErrBadBlobDigest) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain @@ -1191,11 +1246,27 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request * } mountDigest := godigest.Digest(mountDigests[0]) + + userAc, err := reqCtx.UserAcFromContext(request.Context()) + if err != nil { + response.WriteHeader(http.StatusInternalServerError) + + return + } + + userCanMount, err := canMount(userAc, imgStore, mountDigest) + if err != nil { + rh.c.Log.Error().Err(err).Msg("unexpected error") + } + // zot does not support cross mounting directly and do a workaround creating using hard link. // check blob looks for actual path (name+mountDigests[0]) first then look for cache and // if found in cache, will do hard link and if fails we will start new upload. - _, _, err := imgStore.CheckBlob(name, mountDigest) - if err != nil { + if userCanMount { + _, _, err = imgStore.CheckBlob(name, mountDigest) + } + + if err != nil || !userCanMount { upload, err := imgStore.NewBlobUpload(name) if err != nil { details := zerr.GetDetails(err)
pkg/storage/cache/boltdb.go+51 −0 modified@@ -174,6 +174,57 @@ func (d *BoltDBDriver) PutBlob(digest godigest.Digest, path string) error { return nil } +func (d *BoltDBDriver) GetAllBlobs(digest godigest.Digest) ([]string, error) { + var blobPath strings.Builder + + blobPaths := []string{} + + if err := d.db.View(func(tx *bbolt.Tx) error { + root := tx.Bucket([]byte(constants.BlobsCache)) + if root == nil { + // this is a serious failure + err := zerr.ErrCacheRootBucket + d.log.Error().Err(err).Msg("failed to access root bucket") + + return err + } + + bucket := root.Bucket([]byte(digest.String())) + if bucket != nil { + origin := bucket.Bucket([]byte(constants.OriginalBucket)) + blobPath.Write(d.getOne(origin)) + originBlob := blobPath.String() + + blobPaths = append(blobPaths, originBlob) + + deduped := bucket.Bucket([]byte(constants.DuplicatesBucket)) + if deduped != nil { + cursor := deduped.Cursor() + + for k, _ := cursor.First(); k != nil; k, _ = cursor.Next() { + var blobPath strings.Builder + + blobPath.Write(k) + + duplicateBlob := blobPath.String() + + if duplicateBlob != originBlob { + blobPaths = append(blobPaths, duplicateBlob) + } + } + + return nil + } + } + + return zerr.ErrCacheMiss + }); err != nil { + return nil, err + } + + return blobPaths, nil +} + func (d *BoltDBDriver) GetBlob(digest godigest.Digest) (string, error) { var blobPath strings.Builder
pkg/storage/cache/boltdb_test.go+43 −0 modified@@ -122,4 +122,47 @@ func TestBoltDBCache(t *testing.T) { So(err, ShouldNotBeNil) So(val, ShouldBeEmpty) }) + + Convey("Test cache.GetAllBlos()", t, func() { + dir := t.TempDir() + + log := log.NewLogger("debug", "") + So(log, ShouldNotBeNil) + + _, err := storage.Create("boltdb", "failTypeAssertion", log) + So(err, ShouldNotBeNil) + + cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{dir, "cache_test", false}, log) + So(cacheDriver, ShouldNotBeNil) + + err = cacheDriver.PutBlob("digest", "first") + So(err, ShouldBeNil) + + err = cacheDriver.PutBlob("digest", "second") + So(err, ShouldBeNil) + + err = cacheDriver.PutBlob("digest", "third") + So(err, ShouldBeNil) + + blobs, err := cacheDriver.GetAllBlobs("digest") + So(err, ShouldBeNil) + + So(blobs, ShouldResemble, []string{"first", "second", "third"}) + + err = cacheDriver.DeleteBlob("digest", "first") + So(err, ShouldBeNil) + + blobs, err = cacheDriver.GetAllBlobs("digest") + So(err, ShouldBeNil) + + So(blobs, ShouldResemble, []string{"second", "third"}) + + err = cacheDriver.DeleteBlob("digest", "third") + So(err, ShouldBeNil) + + blobs, err = cacheDriver.GetAllBlobs("digest") + So(err, ShouldBeNil) + + So(blobs, ShouldResemble, []string{"second"}) + }) }
pkg/storage/cache/cacheinterface.go+2 −0 modified@@ -11,6 +11,8 @@ type Cache interface { // Retrieves the blob matching provided digest. GetBlob(digest godigest.Digest) (string, error) + GetAllBlobs(digest godigest.Digest) ([]string, error) + // Uploads blob to cachedb. PutBlob(digest godigest.Digest, path string) error
pkg/storage/cache/dynamodb.go+36 −0 modified@@ -141,6 +141,42 @@ func (d *DynamoDBDriver) GetBlob(digest godigest.Digest) (string, error) { return out.OriginalBlobPath, nil } +func (d *DynamoDBDriver) GetAllBlobs(digest godigest.Digest) ([]string, error) { + blobPaths := []string{} + + resp, err := d.client.GetItem(context.TODO(), &dynamodb.GetItemInput{ + TableName: aws.String(d.tableName), + Key: map[string]types.AttributeValue{ + "Digest": &types.AttributeValueMemberS{Value: digest.String()}, + }, + }) + if err != nil { + d.log.Error().Err(err).Str("tableName", d.tableName).Msg("failed to get blob") + + return nil, err + } + + out := Blob{} + + if resp.Item == nil { + d.log.Debug().Err(zerr.ErrCacheMiss).Str("digest", string(digest)).Msg("failed to find blob in cache") + + return nil, zerr.ErrCacheMiss + } + + _ = attributevalue.UnmarshalMap(resp.Item, &out) + + blobPaths = append(blobPaths, out.OriginalBlobPath) + + for _, item := range out.DuplicateBlobPath { + if item != out.OriginalBlobPath { + blobPaths = append(blobPaths, item) + } + } + + return blobPaths, nil +} + func (d *DynamoDBDriver) PutBlob(digest godigest.Digest, path string) error { if path == "" { d.log.Error().Err(zerr.ErrEmptyValue).Str("digest", digest.String()).
pkg/storage/cache/dynamodb_test.go+42 −0 modified@@ -144,6 +144,48 @@ func TestDynamoDB(t *testing.T) { So(err, ShouldNotBeNil) So(val, ShouldBeEmpty) }) + + Convey("Test dynamoDB", t, func(c C) { + log := log.NewLogger("debug", "") + + cacheDriver, err := storage.Create("dynamodb", cache.DynamoDBDriverParameters{ + Endpoint: os.Getenv("DYNAMODBMOCK_ENDPOINT"), + TableName: "BlobTable", + Region: "us-east-2", + }, log) + So(cacheDriver, ShouldNotBeNil) + So(err, ShouldBeNil) + + err = cacheDriver.PutBlob("digest", "first") + So(err, ShouldBeNil) + + err = cacheDriver.PutBlob("digest", "second") + So(err, ShouldBeNil) + + err = cacheDriver.PutBlob("digest", "third") + So(err, ShouldBeNil) + + blobs, err := cacheDriver.GetAllBlobs("digest") + So(err, ShouldBeNil) + + So(blobs, ShouldResemble, []string{"first", "second", "third"}) + + err = cacheDriver.DeleteBlob("digest", "first") + So(err, ShouldBeNil) + + blobs, err = cacheDriver.GetAllBlobs("digest") + So(err, ShouldBeNil) + + So(blobs, ShouldResemble, []string{"second", "third"}) + + err = cacheDriver.DeleteBlob("digest", "third") + So(err, ShouldBeNil) + + blobs, err = cacheDriver.GetAllBlobs("digest") + So(err, ShouldBeNil) + + So(blobs, ShouldResemble, []string{"second"}) + }) } func TestDynamoDBError(t *testing.T) {
pkg/storage/imagestore/imagestore.go+31 −0 modified@@ -1114,6 +1114,37 @@ func (is *ImageStore) BlobPath(repo string, digest godigest.Digest) string { return path.Join(is.rootDir, repo, "blobs", digest.Algorithm().String(), digest.Encoded()) } +func (is *ImageStore) GetAllDedupeReposCandidates(digest godigest.Digest) ([]string, error) { + var lockLatency time.Time + + if err := digest.Validate(); err != nil { + return nil, err + } + + is.RLock(&lockLatency) + defer is.RUnlock(&lockLatency) + + blobsPaths, err := is.cache.GetAllBlobs(digest) + if err != nil { + return nil, err + } + + repos := []string{} + + for _, blobPath := range blobsPaths { + // these can be both full paths or relative paths depending on the cache options + if !is.cache.UsesRelativePaths() && path.IsAbs(blobPath) { + blobPath, _ = filepath.Rel(is.rootDir, blobPath) + } + + blobsDirIndex := strings.LastIndex(blobPath, "/blobs/") + + repos = append(repos, blobPath[:blobsDirIndex]) + } + + return repos, nil +} + /* CheckBlob verifies a blob and returns true if the blob is correct
pkg/storage/storage_test.go+70 −0 modified@@ -10,6 +10,7 @@ import ( "io" "os" "path" + "slices" "strings" "sync" "testing" @@ -132,6 +133,75 @@ func TestStorageNew(t *testing.T) { }) } +func TestGetAllDedupeReposCandidates(t *testing.T) { + for _, testcase := range testCases { + testcase := testcase + t.Run(testcase.testCaseName, func(t *testing.T) { + var imgStore storageTypes.ImageStore + if testcase.storageType == storageConstants.S3StorageDriverName { + tskip.SkipS3(t) + + uuid, err := guuid.NewV4() + if err != nil { + panic(err) + } + + testDir := path.Join("/oci-repo-test", uuid.String()) + tdir := t.TempDir() + + var store driver.StorageDriver + store, imgStore, _ = createObjectsStore(testDir, tdir) + defer cleanupStorage(store, testDir) + } else { + dir := t.TempDir() + + log := zlog.Logger{Logger: zerolog.New(os.Stdout)} + metrics := monitoring.NewMetricsServer(false, log) + cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{ + RootDir: dir, + Name: "cache", + UseRelPaths: true, + }, log) + + driver := local.New(true) + + imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver) + } + + Convey("Push repos with deduped blobs", t, func(c C) { + repoNames := []string{ + "first", + "second", + "repo/a", + "repo/a/b/c/d/e/f", + "repo/repo-b/blobs", + "foo/bar/baz", + "blobs/foo/bar/blobs", + "blobs", + "blobs/foo", + } + + storeController := storage.StoreController{DefaultStore: imgStore} + + image := CreateRandomImage() + + for _, repoName := range repoNames { + err := WriteImageToFileSystem(image, repoName, tag, storeController) + So(err, ShouldBeNil) + } + + randomBlobDigest := image.Manifest.Layers[0].Digest + + repos, err := imgStore.GetAllDedupeReposCandidates(randomBlobDigest) + So(err, ShouldBeNil) + slices.Sort(repoNames) + slices.Sort(repos) + So(repoNames, ShouldResemble, repos) + }) + }) + } +} + func TestStorageAPIs(t *testing.T) { for _, testcase := range testCases { testcase := testcase
pkg/storage/types/types.go+1 −0 modified@@ -63,6 +63,7 @@ type ImageStore interface { //nolint:interfacebloat GetAllBlobs(repo string) ([]string, error) PopulateStorageMetrics(interval time.Duration, sch *scheduler.Scheduler) VerifyBlobDigestValue(repo string, digest godigest.Digest) error + GetAllDedupeReposCandidates(digest godigest.Digest) ([]string, error) } type Driver interface { //nolint:interfacebloat
pkg/test/mocks/cache_mock.go+10 −0 modified@@ -6,6 +6,8 @@ type CacheMock struct { // Returns the human-readable "name" of the driver. NameFn func() string + GetAllBlobsFn func(digest godigest.Digest) ([]string, error) + // Retrieves the blob matching provided digest. GetBlobFn func(digest godigest.Digest) (string, error) @@ -68,3 +70,11 @@ func (cacheMock CacheMock) DeleteBlob(digest godigest.Digest, path string) error return nil } + +func (cacheMock CacheMock) GetAllBlobs(digest godigest.Digest) ([]string, error) { + if cacheMock.GetAllBlobsFn != nil { + return cacheMock.GetAllBlobsFn(digest) + } + + return []string{}, nil +}
pkg/test/mocks/image_store_mock.go+16 −7 modified@@ -50,13 +50,14 @@ type MockedImageStore struct { RunDedupeBlobsFn func(interval time.Duration, sch *scheduler.Scheduler) RunDedupeForDigestFn func(ctx context.Context, digest godigest.Digest, dedupe bool, duplicateBlobs []string) error - GetNextDigestWithBlobPathsFn func(repos []string, lastDigests []godigest.Digest) (godigest.Digest, []string, error) - GetAllBlobsFn func(repo string) ([]string, error) - CleanupRepoFn func(repo string, blobs []godigest.Digest, removeRepo bool) (int, error) - PutIndexContentFn func(repo string, index ispec.Index) error - PopulateStorageMetricsFn func(interval time.Duration, sch *scheduler.Scheduler) - StatIndexFn func(repo string) (bool, int64, time.Time, error) - VerifyBlobDigestValueFn func(repo string, digest godigest.Digest) error + GetNextDigestWithBlobPathsFn func(repos []string, lastDigests []godigest.Digest) (godigest.Digest, []string, error) + GetAllBlobsFn func(repo string) ([]string, error) + CleanupRepoFn func(repo string, blobs []godigest.Digest, removeRepo bool) (int, error) + PutIndexContentFn func(repo string, index ispec.Index) error + PopulateStorageMetricsFn func(interval time.Duration, sch *scheduler.Scheduler) + StatIndexFn func(repo string) (bool, int64, time.Time, error) + VerifyBlobDigestValueFn func(repo string, digest godigest.Digest) error + GetAllDedupeReposCandidatesFn func(digest godigest.Digest) ([]string, error) } func (is MockedImageStore) StatIndex(repo string) (bool, int64, time.Time, error) { @@ -419,3 +420,11 @@ func (is MockedImageStore) VerifyBlobDigestValue(repo string, digest godigest.Di return nil } + +func (is MockedImageStore) GetAllDedupeReposCandidates(digest godigest.Digest) ([]string, error) { + if is.GetAllBlobsFn != nil { + return is.GetAllDedupeReposCandidatesFn(digest) + } + + return []string{}, nil +}
Vulnerability mechanics
Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.
References
4- github.com/advisories/GHSA-55r9-5mx9-qq7rghsaADVISORY
- nvd.nist.gov/vuln/detail/CVE-2024-39897ghsaADVISORY
- github.com/project-zot/zot/commit/aaee0220e46bdadd12115ac67c19f9d3153eb1dfghsax_refsource_MISCWEB
- github.com/project-zot/zot/security/advisories/GHSA-55r9-5mx9-qq7rghsax_refsource_CONFIRMWEB
News mentions
0No linked articles in our index yet.