VYPR
High severity8.3NVD Advisory· Published May 14, 2024· Updated Apr 15, 2026

CVE-2024-3727

CVE-2024-3727

Description

A flaw was found in the github.com/containers/image library. This flaw allows attackers to trigger unexpected authenticated registry accesses on behalf of a victim user, causing resource exhaustion, local path traversal, and other attacks.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
github.com/containers/imageGo
< 5.30.15.30.1
github.com/containers/image/v5Go
>= 5.30.0, < 5.30.15.30.1
github.com/containers/image/v5Go
< 5.29.35.29.3

Patches

2
e89480460550

Merge pull request #2418 from mtrmac/digest-unmarshal-5.29

https://github.com/containers/imageMiloslav TrmačMay 16, 2024via ghsa
25 files changed · +371 124
  • copy/progress_bars.go+5 2 modified
    @@ -48,10 +48,13 @@ type progressBar struct {
     // As a convention, most users of progress bars should call mark100PercentComplete on full success;
     // by convention, we don't leave progress bars in partial state when fully done
     // (even if we copied much less data than anticipated).
    -func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar {
    +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) (*progressBar, error) {
     	// shortDigestLen is the length of the digest used for blobs.
     	const shortDigestLen = 12
     
    +	if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +		return nil, err
    +	}
     	prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
     	// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
     	maxPrefixLen := len("Copying blob ") + shortDigestLen
    @@ -104,7 +107,7 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
     	return &progressBar{
     		Bar:          bar,
     		originalSize: info.Size,
    -	}
    +	}, nil
     }
     
     // printCopyInfo prints a "Copying ..." message on the copier if the output is
    
  • copy/single.go+29 10 modified
    @@ -599,7 +599,10 @@ func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
     		destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
     			progressPool := ic.c.newProgressPool()
     			defer progressPool.Wait()
    -			bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
    +			bar, err := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
    +			if err != nil {
    +				return types.BlobInfo{}, err
    +			}
     			defer bar.Abort(false)
     			ic.c.printCopyInfo("config", srcInfo)
     
    @@ -707,11 +710,17 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
     		}
     		if reused {
     			logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
    -			func() { // A scope for defer
    -				bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists")
    +			if err := func() error { // A scope for defer
    +				bar, err := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists")
    +				if err != nil {
    +					return err
    +				}
     				defer bar.Abort(false)
     				bar.mark100PercentComplete()
    -			}()
    +				return nil
    +			}(); err != nil {
    +				return types.BlobInfo{}, "", err
    +			}
     
     			// Throw an event that the layer has been skipped
     			if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
    @@ -730,8 +739,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
     	// Attempt a partial only when the source allows to retrieve a blob partially and
     	// the destination has support for it.
     	if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
    -		if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
    -			bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
    +		reused, blobInfo, err := func() (bool, types.BlobInfo, error) { // A scope for defer
    +			bar, err := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
    +			if err != nil {
    +				return false, types.BlobInfo{}, err
    +			}
     			hideProgressBar := true
     			defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
     				bar.Abort(hideProgressBar)
    @@ -751,18 +763,25 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
     				bar.mark100PercentComplete()
     				hideProgressBar = false
     				logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
    -				return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob)
    +				return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil
     			}
     			logrus.Debugf("Failed to retrieve partial blob: %v", err)
    -			return false, types.BlobInfo{}
    -		}(); reused {
    +			return false, types.BlobInfo{}, nil
    +		}()
    +		if err != nil {
    +			return types.BlobInfo{}, "", err
    +		}
    +		if reused {
     			return blobInfo, cachedDiffID, nil
     		}
     	}
     
     	// Fallback: copy the layer, computing the diffID if we need to do so
     	return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
    -		bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
    +		bar, err := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
    +		if err != nil {
    +			return types.BlobInfo{}, "", err
    +		}
     		defer bar.Abort(false)
     
     		srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
    
  • directory/directory_dest.go+18 4 modified
    @@ -173,7 +173,10 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
     		}
     	}
     
    -	blobPath := d.ref.layerPath(blobDigest)
    +	blobPath, err := d.ref.layerPath(blobDigest)
    +	if err != nil {
    +		return private.UploadedBlob{}, err
    +	}
     	// need to explicitly close the file, since a rename won't otherwise not work on Windows
     	blobFile.Close()
     	explicitClosed = true
    @@ -196,7 +199,10 @@ func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, inf
     	if info.Digest == "" {
     		return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
     	}
    -	blobPath := d.ref.layerPath(info.Digest)
    +	blobPath, err := d.ref.layerPath(info.Digest)
    +	if err != nil {
    +		return false, private.ReusedBlob{}, err
    +	}
     	finfo, err := os.Stat(blobPath)
     	if err != nil && os.IsNotExist(err) {
     		return false, private.ReusedBlob{}, nil
    @@ -216,7 +222,11 @@ func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, inf
     // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
     // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
     func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
    -	return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
    +	path, err := d.ref.manifestPath(instanceDigest)
    +	if err != nil {
    +		return err
    +	}
    +	return os.WriteFile(path, manifest, 0644)
     }
     
     // PutSignaturesWithFormat writes a set of signatures to the destination.
    @@ -229,7 +239,11 @@ func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signa
     		if err != nil {
     			return err
     		}
    -		if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil {
    +		path, err := d.ref.signaturePath(i, instanceDigest)
    +		if err != nil {
    +			return err
    +		}
    +		if err := os.WriteFile(path, blob, 0644); err != nil {
     			return err
     		}
     	}
    
  • directory/directory_src.go+14 3 modified
    @@ -55,7 +55,11 @@ func (s *dirImageSource) Close() error {
     // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
     // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
     func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
    -	m, err := os.ReadFile(s.ref.manifestPath(instanceDigest))
    +	path, err := s.ref.manifestPath(instanceDigest)
    +	if err != nil {
    +		return nil, "", err
    +	}
    +	m, err := os.ReadFile(path)
     	if err != nil {
     		return nil, "", err
     	}
    @@ -66,7 +70,11 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
     // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
     // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
     func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
    -	r, err := os.Open(s.ref.layerPath(info.Digest))
    +	path, err := s.ref.layerPath(info.Digest)
    +	if err != nil {
    +		return nil, -1, err
    +	}
    +	r, err := os.Open(path)
     	if err != nil {
     		return nil, -1, err
     	}
    @@ -84,7 +92,10 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
     func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
     	signatures := []signature.Signature{}
     	for i := 0; ; i++ {
    -		path := s.ref.signaturePath(i, instanceDigest)
    +		path, err := s.ref.signaturePath(i, instanceDigest)
    +		if err != nil {
    +			return nil, err
    +		}
     		sigBlob, err := os.ReadFile(path)
     		if err != nil {
     			if os.IsNotExist(err) {
    
  • directory/directory_test.go+4 3 modified
    @@ -64,7 +64,7 @@ func TestGetPutManifest(t *testing.T) {
     func TestGetPutBlob(t *testing.T) {
     	computedBlob := []byte("test-blob")
     	providedBlob := []byte("provided-blob")
    -	providedDigest := digest.Digest("sha256:provided-test-digest")
    +	providedDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
     
     	ref, _ := refToTempDir(t)
     	cache := memory.New()
    @@ -113,12 +113,13 @@ func (fn readerFromFunc) Read(p []byte) (int, error) {
     // TestPutBlobDigestFailure simulates behavior on digest verification failure.
     func TestPutBlobDigestFailure(t *testing.T) {
     	const digestErrorString = "Simulated digest error"
    -	const blobDigest = digest.Digest("sha256:test-digest")
    +	const blobDigest = digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
     
     	ref, _ := refToTempDir(t)
     	dirRef, ok := ref.(dirReference)
     	require.True(t, ok)
    -	blobPath := dirRef.layerPath(blobDigest)
    +	blobPath, err := dirRef.layerPath(blobDigest)
    +	require.NoError(t, err)
     	cache := memory.New()
     
     	firstRead := true
    
  • directory/directory_transport.go+17 8 modified
    @@ -161,25 +161,34 @@ func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContex
     }
     
     // manifestPath returns a path for the manifest within a directory using our conventions.
    -func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string {
    +func (ref dirReference) manifestPath(instanceDigest *digest.Digest) (string, error) {
     	if instanceDigest != nil {
    -		return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json")
    +		if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
    +			return "", err
    +		}
    +		return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json"), nil
     	}
    -	return filepath.Join(ref.path, "manifest.json")
    +	return filepath.Join(ref.path, "manifest.json"), nil
     }
     
     // layerPath returns a path for a layer tarball within a directory using our conventions.
    -func (ref dirReference) layerPath(digest digest.Digest) string {
    +func (ref dirReference) layerPath(digest digest.Digest) (string, error) {
    +	if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
    +		return "", err
    +	}
     	// FIXME: Should we keep the digest identification?
    -	return filepath.Join(ref.path, digest.Encoded())
    +	return filepath.Join(ref.path, digest.Encoded()), nil
     }
     
     // signaturePath returns a path for a signature within a directory using our conventions.
    -func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string {
    +func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) (string, error) {
     	if instanceDigest != nil {
    -		return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1))
    +		if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
    +			return "", err
    +		}
    +		return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)), nil
     	}
    -	return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
    +	return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)), nil
     }
     
     // versionPath returns a path for the version file within a directory using our conventions.
    
  • directory/directory_transport_test.go+29 7 modified
    @@ -197,8 +197,15 @@ func TestReferenceManifestPath(t *testing.T) {
     	ref, tmpDir := refToTempDir(t)
     	dirRef, ok := ref.(dirReference)
     	require.True(t, ok)
    -	assert.Equal(t, tmpDir+"/manifest.json", dirRef.manifestPath(nil))
    -	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".manifest.json", dirRef.manifestPath(&dhex))
    +	res, err := dirRef.manifestPath(nil)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/manifest.json", res)
    +	res, err = dirRef.manifestPath(&dhex)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".manifest.json", res)
    +	invalidDigest := digest.Digest("sha256:../hello")
    +	_, err = dirRef.manifestPath(&invalidDigest)
    +	assert.Error(t, err)
     }
     
     func TestReferenceLayerPath(t *testing.T) {
    @@ -207,7 +214,11 @@ func TestReferenceLayerPath(t *testing.T) {
     	ref, tmpDir := refToTempDir(t)
     	dirRef, ok := ref.(dirReference)
     	require.True(t, ok)
    -	assert.Equal(t, tmpDir+"/"+hex, dirRef.layerPath("sha256:"+hex))
    +	res, err := dirRef.layerPath("sha256:" + hex)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/"+hex, res)
    +	_, err = dirRef.layerPath(digest.Digest("sha256:../hello"))
    +	assert.Error(t, err)
     }
     
     func TestReferenceSignaturePath(t *testing.T) {
    @@ -216,10 +227,21 @@ func TestReferenceSignaturePath(t *testing.T) {
     	ref, tmpDir := refToTempDir(t)
     	dirRef, ok := ref.(dirReference)
     	require.True(t, ok)
    -	assert.Equal(t, tmpDir+"/signature-1", dirRef.signaturePath(0, nil))
    -	assert.Equal(t, tmpDir+"/signature-10", dirRef.signaturePath(9, nil))
    -	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-1", dirRef.signaturePath(0, &dhex))
    -	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-10", dirRef.signaturePath(9, &dhex))
    +	res, err := dirRef.signaturePath(0, nil)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/signature-1", res)
    +	res, err = dirRef.signaturePath(9, nil)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/signature-10", res)
    +	res, err = dirRef.signaturePath(0, &dhex)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-1", res)
    +	res, err = dirRef.signaturePath(9, &dhex)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-10", res)
    +	invalidDigest := digest.Digest("sha256:../hello")
    +	_, err = dirRef.signaturePath(0, &invalidDigest)
    +	assert.Error(t, err)
     }
     
     func TestReferenceVersionPath(t *testing.T) {
    
  • docker/docker_client.go+17 3 modified
    @@ -952,6 +952,8 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
     	return c.detectPropertiesError
     }
     
    +// fetchManifest fetches a manifest for (the repo of ref) + tagOrDigest.
    +// The caller is responsible for ensuring tagOrDigest uses the expected format.
     func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) {
     	path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest)
     	headers := map[string][]string{
    @@ -1034,6 +1036,9 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
     		}
     	}
     
    +	if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters
    +		return nil, 0, err
    +	}
     	path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String())
     	logrus.Debugf("Downloading %s", path)
     	res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
    @@ -1097,7 +1102,10 @@ func isManifestUnknownError(err error) bool {
     // digest in ref.
     // It returns (nil, nil) if the manifest does not exist.
     func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) {
    -	tag := sigstoreAttachmentTag(digest)
    +	tag, err := sigstoreAttachmentTag(digest)
    +	if err != nil {
    +		return nil, err
    +	}
     	sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag)
     	if err != nil {
     		return nil, err
    @@ -1130,6 +1138,9 @@ func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref do
     // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
     // using the original data structures.
     func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
    +	if err := manifestDigest.Validate(); err != nil { // Make sure manifestDigest.String() does not contain any unexpected characters
    +		return nil, err
    +	}
     	path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
     	res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
     	if err != nil {
    @@ -1153,8 +1164,11 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
     }
     
     // sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest.
    -func sigstoreAttachmentTag(d digest.Digest) string {
    -	return strings.Replace(d.String(), ":", "-", 1) + ".sig"
    +func sigstoreAttachmentTag(d digest.Digest) (string, error) {
    +	if err := d.Validate(); err != nil { // Make sure d.String() doesn’t contain any unexpected characters
    +		return "", err
    +	}
    +	return strings.Replace(d.String(), ":", "-", 1) + ".sig", nil
     }
     
     // Close removes resources associated with an initialized dockerClient, if any.
    
  • docker/docker_image_dest.go+18 4 modified
    @@ -229,6 +229,9 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
     // If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
     // it returns a non-nil error only on an unexpected failure.
     func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
    +	if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters
    +		return false, -1, err
    +	}
     	checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
     	logrus.Debugf("Checking %s", checkPath)
     	res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
    @@ -466,6 +469,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
     		// particular instance.
     		refTail = instanceDigest.String()
     		// Double-check that the manifest we've been given matches the digest we've been given.
    +		// This also validates the format of instanceDigest.
     		matches, err := manifest.MatchesDigest(m, *instanceDigest)
     		if err != nil {
     			return fmt.Errorf("digesting manifest in PutManifest: %w", err)
    @@ -632,19 +636,24 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
     
     	// NOTE: Keep this in sync with docs/signature-protocols.md!
     	for i, signature := range signatures {
    -		sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
    -		err := d.putOneSignature(sigURL, signature)
    +		sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
     		if err != nil {
     			return err
     		}
    +		if err := d.putOneSignature(sigURL, signature); err != nil {
    +			return err
    +		}
     	}
     	// Remove any other signatures, if present.
     	// We stop at the first missing signature; if a previous deleting loop aborted
     	// prematurely, this may not clean up all of them, but one missing signature
     	// is enough for dockerImageSource to stop looking for other signatures, so that
     	// is sufficient.
     	for i := len(signatures); ; i++ {
    -		sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
    +		sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
    +		if err != nil {
    +			return err
    +		}
     		missing, err := d.c.deleteOneSignature(sigURL)
     		if err != nil {
     			return err
    @@ -775,8 +784,12 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
     	if err != nil {
     		return err
     	}
    +	attachmentTag, err := sigstoreAttachmentTag(manifestDigest)
    +	if err != nil {
    +		return err
    +	}
     	logrus.Debugf("Uploading sigstore attachment manifest")
    -	return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest))
    +	return d.uploadManifest(ctx, manifestBlob, attachmentTag)
     }
     
     func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
    @@ -892,6 +905,7 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
     			return err
     		}
     
    +		// manifestDigest is known to be valid because it was not rejected by getExtensionsSignatures above.
     		path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
     		res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
     		if err != nil {
    
  • docker/docker_image.go+6 1 modified
    @@ -88,7 +88,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
     		if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil {
     			return nil, err
     		}
    -		tags = append(tags, tagsHolder.Tags...)
    +		for _, tag := range tagsHolder.Tags {
    +			if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
    +				return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err)
    +			}
    +			tags = append(tags, tag)
    +		}
     
     		link := res.Header.Get("Link")
     		if link == "" {
    
  • docker/docker_image_src.go+16 2 modified
    @@ -194,6 +194,9 @@ func simplifyContentType(contentType string) string {
     // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
     func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
     	if instanceDigest != nil {
    +		if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters
    +			return nil, "", err
    +		}
     		return s.fetchManifest(ctx, instanceDigest.String())
     	}
     	err := s.ensureManifestIsLoaded(ctx)
    @@ -203,6 +206,8 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
     	return s.cachedManifest, s.cachedManifestMIMEType, nil
     }
     
    +// fetchManifest fetches a manifest for tagOrDigest.
    +// The caller is responsible for ensuring tagOrDigest uses the expected format.
     func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
     	return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest)
     }
    @@ -352,6 +357,9 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
     		return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt")
     	}
     
    +	if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters
    +		return nil, nil, err
    +	}
     	path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
     	logrus.Debugf("Downloading %s", path)
     	res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
    @@ -462,7 +470,10 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst
     			return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
     		}
     
    -		sigURL := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
    +		sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
    +		if err != nil {
    +			return nil, err
    +		}
     		signature, missing, err := s.getOneSignature(ctx, sigURL)
     		if err != nil {
     			return nil, err
    @@ -660,7 +671,10 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
     	}
     
     	for i := 0; ; i++ {
    -		sigURL := lookasideStorageURL(c.signatureBase, manifestDigest, i)
    +		sigURL, err := lookasideStorageURL(c.signatureBase, manifestDigest, i)
    +		if err != nil {
    +			return err
    +		}
     		missing, err := c.deleteOneSignature(sigURL)
     		if err != nil {
     			return err
    
  • docker/internal/tarfile/dest.go+10 2 modified
    @@ -111,11 +111,19 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
     			return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err)
     		}
     		d.config = buf
    -		if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
    +		configPath, err := d.archive.configPath(inputInfo.Digest)
    +		if err != nil {
    +			return private.UploadedBlob{}, err
    +		}
    +		if err := d.archive.sendFileLocked(configPath, inputInfo.Size, bytes.NewReader(buf)); err != nil {
     			return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err)
     		}
     	} else {
    -		if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
    +		layerPath, err := d.archive.physicalLayerPath(inputInfo.Digest)
    +		if err != nil {
    +			return private.UploadedBlob{}, err
    +		}
    +		if err := d.archive.sendFileLocked(layerPath, inputInfo.Size, stream); err != nil {
     			return private.UploadedBlob{}, err
     		}
     	}
    
  • docker/internal/tarfile/writer.go+27 7 modified
    @@ -95,7 +95,10 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
     	if !w.legacyLayers.Contains(layerID) {
     		// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
     		// See also the comment in physicalLayerPath.
    -		physicalLayerPath := w.physicalLayerPath(layerDigest)
    +		physicalLayerPath, err := w.physicalLayerPath(layerDigest)
    +		if err != nil {
    +			return err
    +		}
     		if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
     			return fmt.Errorf("creating layer symbolic link: %w", err)
     		}
    @@ -139,6 +142,9 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
     		}
     
     		// This chainID value matches the computation in docker/docker/layer.CreateChainID …
    +		if err := l.Digest.Validate(); err != nil { // This should never fail on this code path, still: make sure the chainID computation is unambiguous.
    +			return err
    +		}
     		if chainID == "" {
     			chainID = l.Digest
     		} else {
    @@ -204,12 +210,20 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
     func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
     	layerPaths := []string{}
     	for _, l := range layerDescriptors {
    -		layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest))
    +		p, err := w.physicalLayerPath(l.Digest)
    +		if err != nil {
    +			return err
    +		}
    +		layerPaths = append(layerPaths, p)
     	}
     
     	var item *ManifestItem
    +	configPath, err := w.configPath(configDigest)
    +	if err != nil {
    +		return err
    +	}
     	newItem := ManifestItem{
    -		Config:       w.configPath(configDigest),
    +		Config:       configPath,
     		RepoTags:     []string{},
     		Layers:       layerPaths,
     		Parent:       "", // We don’t have this information
    @@ -294,21 +308,27 @@ func (w *Writer) Close() error {
     // configPath returns a path we choose for storing a config with the specified digest.
     // NOTE: This is an internal implementation detail, not a format property, and can change
     // any time.
    -func (w *Writer) configPath(configDigest digest.Digest) string {
    -	return configDigest.Hex() + ".json"
    +func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
    +	if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
    +		return "", err
    +	}
    +	return configDigest.Hex() + ".json", nil
     }
     
     // physicalLayerPath returns a path we choose for storing a layer with the specified digest
     // (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
     // NOTE: This is an internal implementation detail, not a format property, and can change
     // any time.
    -func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string {
    +func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
    +	if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
    +		return "", err
    +	}
     	// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
     	// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
     	// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
     	// tries to load every subdirectory as an image and fails if the config is missing.  So, keep the layers
     	// in the root of the tarball.
    -	return layerDigest.Hex() + ".tar"
    +	return layerDigest.Hex() + ".tar", nil
     }
     
     type tarFI struct {
    
  • docker/registries_d.go+5 2 modified
    @@ -286,8 +286,11 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
     // lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
     // base is not nil from the caller
     // NOTE: Keep this in sync with docs/signature-protocols.md!
    -func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL {
    +func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
    +	if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
    +		return nil, err
    +	}
     	sigURL := *base
     	sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
    -	return &sigURL
    +	return &sigURL, nil
     }
    
  • docker/registries_d_test.go+8 1 modified
    @@ -8,6 +8,7 @@ import (
     	"testing"
     
     	"github.com/containers/image/v5/types"
    +	digest "github.com/opencontainers/go-digest"
     	"github.com/stretchr/testify/assert"
     	"github.com/stretchr/testify/require"
     )
    @@ -320,9 +321,15 @@ func TestLookasideStorageURL(t *testing.T) {
     		require.NoError(t, err)
     		expectedURL, err := url.Parse(c.expected)
     		require.NoError(t, err)
    -		res := lookasideStorageURL(baseURL, mdInput, c.index)
    +		res, err := lookasideStorageURL(baseURL, mdInput, c.index)
    +		require.NoError(t, err)
     		assert.Equal(t, expectedURL, res, c.expected)
     	}
    +
    +	baseURL, err := url.Parse("file:///tmp")
    +	require.NoError(t, err)
    +	_, err = lookasideStorageURL(baseURL, digest.Digest("sha256:../hello"), 0)
    +	assert.Error(t, err)
     }
     
     func TestBuiltinDefaultLookasideStorageDir(t *testing.T) {
    
  • openshift/openshift_src.go+3 0 modified
    @@ -109,6 +109,9 @@ func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, inst
     		}
     		imageStreamImageName = s.imageStreamImageName
     	} else {
    +		if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters
    +			return nil, err
    +		}
     		imageStreamImageName = instanceDigest.String()
     	}
     	image, err := s.client.getImage(ctx, imageStreamImageName)
    
  • ostree/ostree_dest.go+10 0 modified
    @@ -345,6 +345,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
     		}
     		d.repo = repo
     	}
    +
    +	if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
    +		return false, private.ReusedBlob{}, err
    +	}
     	branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
     
     	found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
    @@ -470,12 +474,18 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
     		return nil
     	}
     	for _, layer := range d.schema.LayersDescriptors {
    +		if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +			return err
    +		}
     		hash := layer.Digest.Hex()
     		if err = checkLayer(hash); err != nil {
     			return err
     		}
     	}
     	for _, layer := range d.schema.FSLayers {
    +		if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +			return err
    +		}
     		hash := layer.BlobSum.Hex()
     		if err = checkLayer(hash); err != nil {
     			return err
    
  • ostree/ostree_src.go+3 1 modified
    @@ -286,7 +286,9 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
     // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
     // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
     func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
    -
    +	if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +		return nil, -1, err
    +	}
     	blob := info.Digest.Hex()
     
     	// Ensure s.compressed is initialized.  It is build by LayerInfosForCopy.
    
  • pkg/blobcache/blobcache.go+9 3 modified
    @@ -78,12 +78,15 @@ func (b *BlobCache) DeleteImage(ctx context.Context, sys *types.SystemContext) e
     }
     
     // blobPath returns the path appropriate for storing a blob with digest.
    -func (b *BlobCache) blobPath(digest digest.Digest, isConfig bool) string {
    +func (b *BlobCache) blobPath(digest digest.Digest, isConfig bool) (string, error) {
    +	if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters
    +		return "", err
    +	}
     	baseName := digest.String()
     	if isConfig {
     		baseName += ".config"
     	}
    -	return filepath.Join(b.directory, baseName)
    +	return filepath.Join(b.directory, baseName), nil
     }
     
     // findBlob checks if we have a blob for info in cache (whether a config or not)
    @@ -95,7 +98,10 @@ func (b *BlobCache) findBlob(info types.BlobInfo) (string, int64, bool, error) {
     	}
     
     	for _, isConfig := range []bool{false, true} {
    -		path := b.blobPath(info.Digest, isConfig)
    +		path, err := b.blobPath(info.Digest, isConfig)
    +		if err != nil {
    +			return "", -1, false, err
    +		}
     		fileInfo, err := os.Stat(path)
     		if err == nil && (info.Size == -1 || info.Size == fileInfo.Size()) {
     			return path, fileInfo.Size(), isConfig, nil
    
  • pkg/blobcache/dest.go+55 38 modified
    @@ -80,47 +80,58 @@ func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool {
     // and this new file.
     func (d *blobCacheDestination) saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) {
     	defer wg.Done()
    -	// Decompress from and digest the reading end of that pipe.
    -	decompressed, err3 := archive.DecompressStream(decompressReader)
    +	defer decompressReader.Close()
    +
    +	succeeded := false
    +	defer func() {
    +		if !succeeded {
    +			// Remove the temporary file.
    +			if err := os.Remove(tempFile.Name()); err != nil {
    +				logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err)
    +			}
    +		}
    +	}()
    +
     	digester := digest.Canonical.Digester()
    -	if err3 == nil {
    +	if err := func() error { // A scope for defer
    +		defer tempFile.Close()
    +
    +		// Decompress from and digest the reading end of that pipe.
    +		decompressed, err := archive.DecompressStream(decompressReader)
    +		if err != nil {
    +			// Drain the pipe to keep from stalling the PutBlob() thread.
    +			if _, err2 := io.Copy(io.Discard, decompressReader); err2 != nil {
    +				logrus.Debugf("error draining the pipe: %v", err2)
    +			}
    +			return err
    +		}
    +		defer decompressed.Close()
     		// Read the decompressed data through the filter over the pipe, blocking until the
     		// writing end is closed.
    -		_, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
    -	} else {
    -		// Drain the pipe to keep from stalling the PutBlob() thread.
    -		if _, err := io.Copy(io.Discard, decompressReader); err != nil {
    -			logrus.Debugf("error draining the pipe: %v", err)
    -		}
    +		_, err = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
    +		return err
    +	}(); err != nil {
    +		return
     	}
    -	decompressReader.Close()
    -	decompressed.Close()
    -	tempFile.Close()
    +
     	// Determine the name that we should give to the uncompressed copy of the blob.
    -	decompressedFilename := d.reference.blobPath(digester.Digest(), isConfig)
    -	if err3 == nil {
    -		// Rename the temporary file.
    -		if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil {
    -			logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3)
    -			// Remove the temporary file.
    -			if err3 = os.Remove(tempFile.Name()); err3 != nil {
    -				logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
    -			}
    -		} else {
    -			*alternateDigest = digester.Digest()
    -			// Note the relationship between the two files.
    -			if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil {
    -				logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3)
    -			}
    -			if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil {
    -				logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3)
    -			}
    -		}
    -	} else {
    -		// Remove the temporary file.
    -		if err3 = os.Remove(tempFile.Name()); err3 != nil {
    -			logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
    -		}
    +	decompressedFilename, err := d.reference.blobPath(digester.Digest(), isConfig)
    +	if err != nil {
    +		return
    +	}
    +	// Rename the temporary file.
    +	if err := os.Rename(tempFile.Name(), decompressedFilename); err != nil {
    +		logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err)
    +		return
    +	}
    +	succeeded = true
    +	*alternateDigest = digester.Digest()
    +	// Note the relationship between the two files.
    +	if err := ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err != nil {
    +		logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err)
    +	}
    +	if err := ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err != nil {
    +		logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err)
     	}
     }
     
    @@ -145,7 +156,10 @@ func (d *blobCacheDestination) PutBlobWithOptions(ctx context.Context, stream io
     	needToWait := false
     	compression := archive.Uncompressed
     	if inputInfo.Digest != "" {
    -		filename := d.reference.blobPath(inputInfo.Digest, options.IsConfig)
    +		filename, err2 := d.reference.blobPath(inputInfo.Digest, options.IsConfig)
    +		if err2 != nil {
    +			return private.UploadedBlob{}, err2
    +		}
     		tempfile, err = os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
     		if err == nil {
     			stream = io.TeeReader(stream, tempfile)
    @@ -274,7 +288,10 @@ func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []
     	if err != nil {
     		logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err)
     	} else {
    -		filename := d.reference.blobPath(manifestDigest, false)
    +		filename, err := d.reference.blobPath(manifestDigest, false)
    +		if err != nil {
    +			return err
    +		}
     		if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil {
     			logrus.Warnf("error saving manifest as %q: %v", filename, err)
     		}
    
  • pkg/blobcache/src.go+12 4 modified
    @@ -56,7 +56,10 @@ func (s *blobCacheSource) Close() error {
     
     func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
     	if instanceDigest != nil {
    -		filename := s.reference.blobPath(*instanceDigest, false)
    +		filename, err := s.reference.blobPath(*instanceDigest, false)
    +		if err != nil {
    +			return nil, "", err
    +		}
     		manifestBytes, err := os.ReadFile(filename)
     		if err == nil {
     			s.cacheHits++
    @@ -136,8 +139,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
     		replacedInfos := make([]types.BlobInfo, 0, len(infos))
     		for _, info := range infos {
     			var replaceDigest []byte
    -			var err error
    -			blobFile := s.reference.blobPath(info.Digest, false)
    +			blobFile, err := s.reference.blobPath(info.Digest, false)
    +			if err != nil {
    +				return nil, err
    +			}
     			var alternate string
     			switch s.reference.compress {
     			case types.Compress:
    @@ -148,7 +153,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
     				replaceDigest, err = os.ReadFile(alternate)
     			}
     			if err == nil && digest.Digest(replaceDigest).Validate() == nil {
    -				alternate = s.reference.blobPath(digest.Digest(replaceDigest), false)
    +				alternate, err = s.reference.blobPath(digest.Digest(replaceDigest), false)
    +				if err != nil {
    +					return nil, err
    +				}
     				fileInfo, err := os.Stat(alternate)
     				if err == nil {
     					switch info.MediaType {
    
  • storage/storage_dest.go+22 10 modified
    @@ -324,6 +324,13 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
     // tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata.
     // The caller must arrange the blob to be eventually committed using s.commitLayer().
     func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
    +	if digest == "" {
    +		return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
    +	}
    +	if err := digest.Validate(); err != nil {
    +		return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
    +	}
    +
     	// lock the entire method as it executes fairly quickly
     	s.lock.Lock()
     	defer s.lock.Unlock()
    @@ -344,13 +351,6 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
     		}
     	}
     
    -	if digest == "" {
    -		return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
    -	}
    -	if err := digest.Validate(); err != nil {
    -		return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
    -	}
    -
     	// Check if we've already cached it in a file.
     	if size, ok := s.fileSizes[digest]; ok {
     		return true, private.ReusedBlob{
    @@ -803,17 +803,25 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
     		if err != nil {
     			return fmt.Errorf("digesting top-level manifest: %w", err)
     		}
    +		key, err := manifestBigDataKey(manifestDigest)
    +		if err != nil {
    +			return err
    +		}
     		options.BigData = append(options.BigData, storage.ImageBigDataOption{
    -			Key:    manifestBigDataKey(manifestDigest),
    +			Key:    key,
     			Data:   toplevelManifest,
     			Digest: manifestDigest,
     		})
     	}
     	// Set up to save the image's manifest.  Allow looking it up by digest by using the key convention defined by the Store.
     	// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
     	// and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
    +	key, err := manifestBigDataKey(s.manifestDigest)
    +	if err != nil {
    +		return err
    +	}
     	options.BigData = append(options.BigData, storage.ImageBigDataOption{
    -		Key:    manifestBigDataKey(s.manifestDigest),
    +		Key:    key,
     		Data:   s.manifest,
     		Digest: s.manifestDigest,
     	})
    @@ -831,8 +839,12 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
     		})
     	}
     	for instanceDigest, signatures := range s.signatureses {
    +		key, err := signatureBigDataKey(instanceDigest)
    +		if err != nil {
    +			return err
    +		}
     		options.BigData = append(options.BigData, storage.ImageBigDataOption{
    -			Key:    signatureBigDataKey(instanceDigest),
    +			Key:    key,
     			Data:   signatures,
     			Digest: digest.Canonical.FromBytes(signatures),
     		})
    
  • storage/storage_image.go+10 4 modified
    @@ -26,14 +26,20 @@ type storageImageCloser struct {
     // manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
     // If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
     // for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
    -func manifestBigDataKey(digest digest.Digest) string {
    -	return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String()
    +func manifestBigDataKey(digest digest.Digest) (string, error) {
    +	if err := digest.Validate(); err != nil { // Make sure info.Digest.String() uses the expected format and does not collide with other BigData keys.
    +		return "", err
    +	}
    +	return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String(), nil
     }
     
     // signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions.
     // If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
    -func signatureBigDataKey(digest digest.Digest) string {
    -	return "signature-" + digest.Encoded()
    +func signatureBigDataKey(digest digest.Digest) (string, error) {
    +	if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +		return "", err
    +	}
    +	return "signature-" + digest.Encoded(), nil
     }
     
     // Size() returns the previously-computed size of the image, with no error.
    
  • storage/storage_reference.go+8 2 modified
    @@ -73,7 +73,10 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image,
     	// We don't need to care about storage.ImageDigestBigDataKey because
     	// manifests lists are only stored into storage by c/image versions
     	// that know about manifestBigDataKey, and only using that key.
    -	key := manifestBigDataKey(manifestDigest)
    +	key, err := manifestBigDataKey(manifestDigest)
    +	if err != nil {
    +		return false // This should never happen, manifestDigest comes from a reference.Digested, and that validates the format.
    +	}
     	manifestBytes, err := store.ImageBigData(img.ID, key)
     	if err != nil {
     		return false
    @@ -95,7 +98,10 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image,
     	if err != nil {
     		return false
     	}
    -	key = manifestBigDataKey(chosenInstance)
    +	key, err = manifestBigDataKey(chosenInstance)
    +	if err != nil {
    +		return false
    +	}
     	_, err = store.ImageBigData(img.ID, key)
     	return err == nil // true if img.ID is based on chosenInstance.
     }
    
  • storage/storage_src.go+16 3 modified
    @@ -202,7 +202,10 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st
     // GetManifest() reads the image's manifest.
     func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) {
     	if instanceDigest != nil {
    -		key := manifestBigDataKey(*instanceDigest)
    +		key, err := manifestBigDataKey(*instanceDigest)
    +		if err != nil {
    +			return nil, "", err
    +		}
     		blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
     		if err != nil {
     			return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err)
    @@ -214,7 +217,10 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
     		// Prefer the manifest corresponding to the user-specified digest, if available.
     		if s.imageRef.named != nil {
     			if digested, ok := s.imageRef.named.(reference.Digested); ok {
    -				key := manifestBigDataKey(digested.Digest())
    +				key, err := manifestBigDataKey(digested.Digest())
    +				if err != nil {
    +					return nil, "", err
    +				}
     				blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
     				if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
     					return nil, "", err
    @@ -329,7 +335,14 @@ func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instan
     	instance := "default instance"
     	if instanceDigest != nil {
     		signatureSizes = s.SignaturesSizes[*instanceDigest]
    -		key = signatureBigDataKey(*instanceDigest)
    +		k, err := signatureBigDataKey(*instanceDigest)
    +		if err != nil {
    +			return nil, err
    +		}
    +		key = k
    +		if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +			return nil, err
    +		}
     		instance = instanceDigest.Encoded()
     	}
     	if len(signatureSizes) > 0 {
    
132678b47bae

Merge pull request #2404 from mtrmac/digest-unmarshal-5.30

https://github.com/containers/imageMiloslav TrmačMay 9, 2024via ghsa
25 files changed · +376 129
  • copy/progress_bars.go+5 2 modified
    @@ -49,10 +49,13 @@ type progressBar struct {
     // As a convention, most users of progress bars should call mark100PercentComplete on full success;
     // by convention, we don't leave progress bars in partial state when fully done
     // (even if we copied much less data than anticipated).
    -func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar {
    +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) (*progressBar, error) {
     	// shortDigestLen is the length of the digest used for blobs.
     	const shortDigestLen = 12
     
    +	if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +		return nil, err
    +	}
     	prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
     	// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
     	maxPrefixLen := len("Copying blob ") + shortDigestLen
    @@ -105,7 +108,7 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
     	return &progressBar{
     		Bar:          bar,
     		originalSize: info.Size,
    -	}
    +	}, nil
     }
     
     // printCopyInfo prints a "Copying ..." message on the copier if the output is
    
  • copy/single.go+29 10 modified
    @@ -606,7 +606,10 @@ func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
     		destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
     			progressPool := ic.c.newProgressPool()
     			defer progressPool.Wait()
    -			bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
    +			bar, err := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
    +			if err != nil {
    +				return types.BlobInfo{}, err
    +			}
     			defer bar.Abort(false)
     			ic.c.printCopyInfo("config", srcInfo)
     
    @@ -738,15 +741,21 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
     		}
     		if reused {
     			logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
    -			func() { // A scope for defer
    +			if err := func() error { // A scope for defer
     				label := "skipped: already exists"
     				if reusedBlob.MatchedByTOCDigest {
     					label = "skipped: already exists (found by TOC)"
     				}
    -				bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label)
    +				bar, err := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label)
    +				if err != nil {
    +					return err
    +				}
     				defer bar.Abort(false)
     				bar.mark100PercentComplete()
    -			}()
    +				return nil
    +			}(); err != nil {
    +				return types.BlobInfo{}, "", err
    +			}
     
     			// Throw an event that the layer has been skipped
     			if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
    @@ -765,8 +774,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
     	// Attempt a partial only when the source allows to retrieve a blob partially and
     	// the destination has support for it.
     	if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
    -		if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
    -			bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
    +		reused, blobInfo, err := func() (bool, types.BlobInfo, error) { // A scope for defer
    +			bar, err := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
    +			if err != nil {
    +				return false, types.BlobInfo{}, err
    +			}
     			hideProgressBar := true
     			defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
     				bar.Abort(hideProgressBar)
    @@ -789,18 +801,25 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
     				bar.mark100PercentComplete()
     				hideProgressBar = false
     				logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
    -				return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob)
    +				return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil
     			}
     			logrus.Debugf("Failed to retrieve partial blob: %v", err)
    -			return false, types.BlobInfo{}
    -		}(); reused {
    +			return false, types.BlobInfo{}, nil
    +		}()
    +		if err != nil {
    +			return types.BlobInfo{}, "", err
    +		}
    +		if reused {
     			return blobInfo, cachedDiffID, nil
     		}
     	}
     
     	// Fallback: copy the layer, computing the diffID if we need to do so
     	return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
    -		bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
    +		bar, err := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
    +		if err != nil {
    +			return types.BlobInfo{}, "", err
    +		}
     		defer bar.Abort(false)
     
     		srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
    
  • directory/directory_dest.go+18 4 modified
    @@ -173,7 +173,10 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
     		}
     	}
     
    -	blobPath := d.ref.layerPath(blobDigest)
    +	blobPath, err := d.ref.layerPath(blobDigest)
    +	if err != nil {
    +		return private.UploadedBlob{}, err
    +	}
     	// need to explicitly close the file, since a rename won't otherwise not work on Windows
     	blobFile.Close()
     	explicitClosed = true
    @@ -196,7 +199,10 @@ func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, inf
     	if info.Digest == "" {
     		return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
     	}
    -	blobPath := d.ref.layerPath(info.Digest)
    +	blobPath, err := d.ref.layerPath(info.Digest)
    +	if err != nil {
    +		return false, private.ReusedBlob{}, err
    +	}
     	finfo, err := os.Stat(blobPath)
     	if err != nil && os.IsNotExist(err) {
     		return false, private.ReusedBlob{}, nil
    @@ -216,7 +222,11 @@ func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, inf
     // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
     // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
     func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
    -	return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
    +	path, err := d.ref.manifestPath(instanceDigest)
    +	if err != nil {
    +		return err
    +	}
    +	return os.WriteFile(path, manifest, 0644)
     }
     
     // PutSignaturesWithFormat writes a set of signatures to the destination.
    @@ -229,7 +239,11 @@ func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signa
     		if err != nil {
     			return err
     		}
    -		if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil {
    +		path, err := d.ref.signaturePath(i, instanceDigest)
    +		if err != nil {
    +			return err
    +		}
    +		if err := os.WriteFile(path, blob, 0644); err != nil {
     			return err
     		}
     	}
    
  • directory/directory_src.go+14 3 modified
    @@ -55,7 +55,11 @@ func (s *dirImageSource) Close() error {
     // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
     // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
     func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
    -	m, err := os.ReadFile(s.ref.manifestPath(instanceDigest))
    +	path, err := s.ref.manifestPath(instanceDigest)
    +	if err != nil {
    +		return nil, "", err
    +	}
    +	m, err := os.ReadFile(path)
     	if err != nil {
     		return nil, "", err
     	}
    @@ -66,7 +70,11 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
     // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
     // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
     func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
    -	r, err := os.Open(s.ref.layerPath(info.Digest))
    +	path, err := s.ref.layerPath(info.Digest)
    +	if err != nil {
    +		return nil, -1, err
    +	}
    +	r, err := os.Open(path)
     	if err != nil {
     		return nil, -1, err
     	}
    @@ -84,7 +92,10 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
     func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
     	signatures := []signature.Signature{}
     	for i := 0; ; i++ {
    -		path := s.ref.signaturePath(i, instanceDigest)
    +		path, err := s.ref.signaturePath(i, instanceDigest)
    +		if err != nil {
    +			return nil, err
    +		}
     		sigBlob, err := os.ReadFile(path)
     		if err != nil {
     			if os.IsNotExist(err) {
    
  • directory/directory_test.go+4 3 modified
    @@ -64,7 +64,7 @@ func TestGetPutManifest(t *testing.T) {
     func TestGetPutBlob(t *testing.T) {
     	computedBlob := []byte("test-blob")
     	providedBlob := []byte("provided-blob")
    -	providedDigest := digest.Digest("sha256:provided-test-digest")
    +	providedDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
     
     	ref, _ := refToTempDir(t)
     	cache := memory.New()
    @@ -113,12 +113,13 @@ func (fn readerFromFunc) Read(p []byte) (int, error) {
     // TestPutBlobDigestFailure simulates behavior on digest verification failure.
     func TestPutBlobDigestFailure(t *testing.T) {
     	const digestErrorString = "Simulated digest error"
    -	const blobDigest = digest.Digest("sha256:test-digest")
    +	const blobDigest = digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
     
     	ref, _ := refToTempDir(t)
     	dirRef, ok := ref.(dirReference)
     	require.True(t, ok)
    -	blobPath := dirRef.layerPath(blobDigest)
    +	blobPath, err := dirRef.layerPath(blobDigest)
    +	require.NoError(t, err)
     	cache := memory.New()
     
     	firstRead := true
    
  • directory/directory_transport.go+17 8 modified
    @@ -161,25 +161,34 @@ func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContex
     }
     
     // manifestPath returns a path for the manifest within a directory using our conventions.
    -func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string {
    +func (ref dirReference) manifestPath(instanceDigest *digest.Digest) (string, error) {
     	if instanceDigest != nil {
    -		return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json")
    +		if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
    +			return "", err
    +		}
    +		return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json"), nil
     	}
    -	return filepath.Join(ref.path, "manifest.json")
    +	return filepath.Join(ref.path, "manifest.json"), nil
     }
     
     // layerPath returns a path for a layer tarball within a directory using our conventions.
    -func (ref dirReference) layerPath(digest digest.Digest) string {
    +func (ref dirReference) layerPath(digest digest.Digest) (string, error) {
    +	if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
    +		return "", err
    +	}
     	// FIXME: Should we keep the digest identification?
    -	return filepath.Join(ref.path, digest.Encoded())
    +	return filepath.Join(ref.path, digest.Encoded()), nil
     }
     
     // signaturePath returns a path for a signature within a directory using our conventions.
    -func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string {
    +func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) (string, error) {
     	if instanceDigest != nil {
    -		return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1))
    +		if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
    +			return "", err
    +		}
    +		return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)), nil
     	}
    -	return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
    +	return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)), nil
     }
     
     // versionPath returns a path for the version file within a directory using our conventions.
    
  • directory/directory_transport_test.go+29 7 modified
    @@ -197,8 +197,15 @@ func TestReferenceManifestPath(t *testing.T) {
     	ref, tmpDir := refToTempDir(t)
     	dirRef, ok := ref.(dirReference)
     	require.True(t, ok)
    -	assert.Equal(t, tmpDir+"/manifest.json", dirRef.manifestPath(nil))
    -	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".manifest.json", dirRef.manifestPath(&dhex))
    +	res, err := dirRef.manifestPath(nil)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/manifest.json", res)
    +	res, err = dirRef.manifestPath(&dhex)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".manifest.json", res)
    +	invalidDigest := digest.Digest("sha256:../hello")
    +	_, err = dirRef.manifestPath(&invalidDigest)
    +	assert.Error(t, err)
     }
     
     func TestReferenceLayerPath(t *testing.T) {
    @@ -207,7 +214,11 @@ func TestReferenceLayerPath(t *testing.T) {
     	ref, tmpDir := refToTempDir(t)
     	dirRef, ok := ref.(dirReference)
     	require.True(t, ok)
    -	assert.Equal(t, tmpDir+"/"+hex, dirRef.layerPath("sha256:"+hex))
    +	res, err := dirRef.layerPath("sha256:" + hex)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/"+hex, res)
    +	_, err = dirRef.layerPath(digest.Digest("sha256:../hello"))
    +	assert.Error(t, err)
     }
     
     func TestReferenceSignaturePath(t *testing.T) {
    @@ -216,10 +227,21 @@ func TestReferenceSignaturePath(t *testing.T) {
     	ref, tmpDir := refToTempDir(t)
     	dirRef, ok := ref.(dirReference)
     	require.True(t, ok)
    -	assert.Equal(t, tmpDir+"/signature-1", dirRef.signaturePath(0, nil))
    -	assert.Equal(t, tmpDir+"/signature-10", dirRef.signaturePath(9, nil))
    -	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-1", dirRef.signaturePath(0, &dhex))
    -	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-10", dirRef.signaturePath(9, &dhex))
    +	res, err := dirRef.signaturePath(0, nil)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/signature-1", res)
    +	res, err = dirRef.signaturePath(9, nil)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/signature-10", res)
    +	res, err = dirRef.signaturePath(0, &dhex)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-1", res)
    +	res, err = dirRef.signaturePath(9, &dhex)
    +	require.NoError(t, err)
    +	assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-10", res)
    +	invalidDigest := digest.Digest("sha256:../hello")
    +	_, err = dirRef.signaturePath(0, &invalidDigest)
    +	assert.Error(t, err)
     }
     
     func TestReferenceVersionPath(t *testing.T) {
    
  • docker/docker_client.go+17 3 modified
    @@ -952,6 +952,8 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
     	return c.detectPropertiesError
     }
     
    +// fetchManifest fetches a manifest for (the repo of ref) + tagOrDigest.
    +// The caller is responsible for ensuring tagOrDigest uses the expected format.
     func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) {
     	path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest)
     	headers := map[string][]string{
    @@ -1035,6 +1037,9 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
     		}
     	}
     
    +	if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters
    +		return nil, 0, err
    +	}
     	path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String())
     	logrus.Debugf("Downloading %s", path)
     	res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
    @@ -1098,7 +1103,10 @@ func isManifestUnknownError(err error) bool {
     // digest in ref.
     // It returns (nil, nil) if the manifest does not exist.
     func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) {
    -	tag := sigstoreAttachmentTag(digest)
    +	tag, err := sigstoreAttachmentTag(digest)
    +	if err != nil {
    +		return nil, err
    +	}
     	sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag)
     	if err != nil {
     		return nil, err
    @@ -1131,6 +1139,9 @@ func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref do
     // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
     // using the original data structures.
     func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
    +	if err := manifestDigest.Validate(); err != nil { // Make sure manifestDigest.String() does not contain any unexpected characters
    +		return nil, err
    +	}
     	path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
     	res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
     	if err != nil {
    @@ -1154,8 +1165,11 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
     }
     
     // sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest.
    -func sigstoreAttachmentTag(d digest.Digest) string {
    -	return strings.Replace(d.String(), ":", "-", 1) + ".sig"
    +func sigstoreAttachmentTag(d digest.Digest) (string, error) {
    +	if err := d.Validate(); err != nil { // Make sure d.String() doesn’t contain any unexpected characters
    +		return "", err
    +	}
    +	return strings.Replace(d.String(), ":", "-", 1) + ".sig", nil
     }
     
     // Close removes resources associated with an initialized dockerClient, if any.
    
  • docker/docker_image_dest.go+18 4 modified
    @@ -230,6 +230,9 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
     // If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
     // it returns a non-nil error only on an unexpected failure.
     func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
    +	if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters
    +		return false, -1, err
    +	}
     	checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
     	logrus.Debugf("Checking %s", checkPath)
     	res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
    @@ -469,6 +472,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
     		// particular instance.
     		refTail = instanceDigest.String()
     		// Double-check that the manifest we've been given matches the digest we've been given.
    +		// This also validates the format of instanceDigest.
     		matches, err := manifest.MatchesDigest(m, *instanceDigest)
     		if err != nil {
     			return fmt.Errorf("digesting manifest in PutManifest: %w", err)
    @@ -635,19 +639,24 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
     
     	// NOTE: Keep this in sync with docs/signature-protocols.md!
     	for i, signature := range signatures {
    -		sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
    -		err := d.putOneSignature(sigURL, signature)
    +		sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
     		if err != nil {
     			return err
     		}
    +		if err := d.putOneSignature(sigURL, signature); err != nil {
    +			return err
    +		}
     	}
     	// Remove any other signatures, if present.
     	// We stop at the first missing signature; if a previous deleting loop aborted
     	// prematurely, this may not clean up all of them, but one missing signature
     	// is enough for dockerImageSource to stop looking for other signatures, so that
     	// is sufficient.
     	for i := len(signatures); ; i++ {
    -		sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
    +		sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
    +		if err != nil {
    +			return err
    +		}
     		missing, err := d.c.deleteOneSignature(sigURL)
     		if err != nil {
     			return err
    @@ -778,8 +787,12 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
     	if err != nil {
     		return err
     	}
    +	attachmentTag, err := sigstoreAttachmentTag(manifestDigest)
    +	if err != nil {
    +		return err
    +	}
     	logrus.Debugf("Uploading sigstore attachment manifest")
    -	return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest))
    +	return d.uploadManifest(ctx, manifestBlob, attachmentTag)
     }
     
     func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
    @@ -895,6 +908,7 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
     			return err
     		}
     
    +		// manifestDigest is known to be valid because it was not rejected by getExtensionsSignatures above.
     		path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
     		res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
     		if err != nil {
    
  • docker/docker_image.go+6 1 modified
    @@ -88,7 +88,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
     		if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil {
     			return nil, err
     		}
    -		tags = append(tags, tagsHolder.Tags...)
    +		for _, tag := range tagsHolder.Tags {
    +			if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
    +				return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err)
    +			}
    +			tags = append(tags, tag)
    +		}
     
     		link := res.Header.Get("Link")
     		if link == "" {
    
  • docker/docker_image_src.go+16 2 modified
    @@ -194,6 +194,9 @@ func simplifyContentType(contentType string) string {
     // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
     func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
     	if instanceDigest != nil {
    +		if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters
    +			return nil, "", err
    +		}
     		return s.fetchManifest(ctx, instanceDigest.String())
     	}
     	err := s.ensureManifestIsLoaded(ctx)
    @@ -203,6 +206,8 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
     	return s.cachedManifest, s.cachedManifestMIMEType, nil
     }
     
    +// fetchManifest fetches a manifest for tagOrDigest.
    +// The caller is responsible for ensuring tagOrDigest uses the expected format.
     func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
     	return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest)
     }
    @@ -352,6 +357,9 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
     		return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt")
     	}
     
    +	if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters
    +		return nil, nil, err
    +	}
     	path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
     	logrus.Debugf("Downloading %s", path)
     	res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
    @@ -462,7 +470,10 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst
     			return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
     		}
     
    -		sigURL := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
    +		sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
    +		if err != nil {
    +			return nil, err
    +		}
     		signature, missing, err := s.getOneSignature(ctx, sigURL)
     		if err != nil {
     			return nil, err
    @@ -660,7 +671,10 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
     	}
     
     	for i := 0; ; i++ {
    -		sigURL := lookasideStorageURL(c.signatureBase, manifestDigest, i)
    +		sigURL, err := lookasideStorageURL(c.signatureBase, manifestDigest, i)
    +		if err != nil {
    +			return err
    +		}
     		missing, err := c.deleteOneSignature(sigURL)
     		if err != nil {
     			return err
    
  • docker/internal/tarfile/dest.go+10 2 modified
    @@ -111,11 +111,19 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
     			return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err)
     		}
     		d.config = buf
    -		if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
    +		configPath, err := d.archive.configPath(inputInfo.Digest)
    +		if err != nil {
    +			return private.UploadedBlob{}, err
    +		}
    +		if err := d.archive.sendFileLocked(configPath, inputInfo.Size, bytes.NewReader(buf)); err != nil {
     			return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err)
     		}
     	} else {
    -		if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
    +		layerPath, err := d.archive.physicalLayerPath(inputInfo.Digest)
    +		if err != nil {
    +			return private.UploadedBlob{}, err
    +		}
    +		if err := d.archive.sendFileLocked(layerPath, inputInfo.Size, stream); err != nil {
     			return private.UploadedBlob{}, err
     		}
     	}
    
  • docker/internal/tarfile/writer.go+27 7 modified
    @@ -95,7 +95,10 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
     	if !w.legacyLayers.Contains(layerID) {
     		// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
     		// See also the comment in physicalLayerPath.
    -		physicalLayerPath := w.physicalLayerPath(layerDigest)
    +		physicalLayerPath, err := w.physicalLayerPath(layerDigest)
    +		if err != nil {
    +			return err
    +		}
     		if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
     			return fmt.Errorf("creating layer symbolic link: %w", err)
     		}
    @@ -139,6 +142,9 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
     		}
     
     		// This chainID value matches the computation in docker/docker/layer.CreateChainID …
    +		if err := l.Digest.Validate(); err != nil { // This should never fail on this code path, still: make sure the chainID computation is unambiguous.
    +			return err
    +		}
     		if chainID == "" {
     			chainID = l.Digest
     		} else {
    @@ -204,12 +210,20 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
     func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
     	layerPaths := []string{}
     	for _, l := range layerDescriptors {
    -		layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest))
    +		p, err := w.physicalLayerPath(l.Digest)
    +		if err != nil {
    +			return err
    +		}
    +		layerPaths = append(layerPaths, p)
     	}
     
     	var item *ManifestItem
    +	configPath, err := w.configPath(configDigest)
    +	if err != nil {
    +		return err
    +	}
     	newItem := ManifestItem{
    -		Config:       w.configPath(configDigest),
    +		Config:       configPath,
     		RepoTags:     []string{},
     		Layers:       layerPaths,
     		Parent:       "", // We don’t have this information
    @@ -294,21 +308,27 @@ func (w *Writer) Close() error {
     // configPath returns a path we choose for storing a config with the specified digest.
     // NOTE: This is an internal implementation detail, not a format property, and can change
     // any time.
    -func (w *Writer) configPath(configDigest digest.Digest) string {
    -	return configDigest.Hex() + ".json"
    +func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
    +	if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
    +		return "", err
    +	}
    +	return configDigest.Hex() + ".json", nil
     }
     
     // physicalLayerPath returns a path we choose for storing a layer with the specified digest
     // (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
     // NOTE: This is an internal implementation detail, not a format property, and can change
     // any time.
    -func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string {
    +func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
    +	if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
    +		return "", err
    +	}
     	// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
     	// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
     	// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
     	// tries to load every subdirectory as an image and fails if the config is missing.  So, keep the layers
     	// in the root of the tarball.
    -	return layerDigest.Hex() + ".tar"
    +	return layerDigest.Hex() + ".tar", nil
     }
     
     type tarFI struct {
    
  • docker/registries_d.go+5 2 modified
    @@ -286,8 +286,11 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
     // lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
     // base is not nil from the caller
     // NOTE: Keep this in sync with docs/signature-protocols.md!
    -func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL {
    +func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
    +	if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
    +		return nil, err
    +	}
     	sigURL := *base
     	sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
    -	return &sigURL
    +	return &sigURL, nil
     }
    
  • docker/registries_d_test.go+8 1 modified
    @@ -8,6 +8,7 @@ import (
     	"testing"
     
     	"github.com/containers/image/v5/types"
    +	digest "github.com/opencontainers/go-digest"
     	"github.com/stretchr/testify/assert"
     	"github.com/stretchr/testify/require"
     )
    @@ -320,9 +321,15 @@ func TestLookasideStorageURL(t *testing.T) {
     		require.NoError(t, err)
     		expectedURL, err := url.Parse(c.expected)
     		require.NoError(t, err)
    -		res := lookasideStorageURL(baseURL, mdInput, c.index)
    +		res, err := lookasideStorageURL(baseURL, mdInput, c.index)
    +		require.NoError(t, err)
     		assert.Equal(t, expectedURL, res, c.expected)
     	}
    +
    +	baseURL, err := url.Parse("file:///tmp")
    +	require.NoError(t, err)
    +	_, err = lookasideStorageURL(baseURL, digest.Digest("sha256:../hello"), 0)
    +	assert.Error(t, err)
     }
     
     func TestBuiltinDefaultLookasideStorageDir(t *testing.T) {
    
  • openshift/openshift_src.go+3 0 modified
    @@ -109,6 +109,9 @@ func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, inst
     		}
     		imageStreamImageName = s.imageStreamImageName
     	} else {
    +		if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters
    +			return nil, err
    +		}
     		imageStreamImageName = instanceDigest.String()
     	}
     	image, err := s.client.getImage(ctx, imageStreamImageName)
    
  • ostree/ostree_dest.go+10 0 modified
    @@ -345,6 +345,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
     		}
     		d.repo = repo
     	}
    +
    +	if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
    +		return false, private.ReusedBlob{}, err
    +	}
     	branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
     
     	found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
    @@ -470,12 +474,18 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
     		return nil
     	}
     	for _, layer := range d.schema.LayersDescriptors {
    +		if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +			return err
    +		}
     		hash := layer.Digest.Hex()
     		if err = checkLayer(hash); err != nil {
     			return err
     		}
     	}
     	for _, layer := range d.schema.FSLayers {
    +		if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +			return err
    +		}
     		hash := layer.BlobSum.Hex()
     		if err = checkLayer(hash); err != nil {
     			return err
    
  • ostree/ostree_src.go+3 1 modified
    @@ -286,7 +286,9 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
     // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
     // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
     func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
    -
    +	if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +		return nil, -1, err
    +	}
     	blob := info.Digest.Hex()
     
     	// Ensure s.compressed is initialized.  It is build by LayerInfosForCopy.
    
  • pkg/blobcache/blobcache.go+9 3 modified
    @@ -78,12 +78,15 @@ func (b *BlobCache) DeleteImage(ctx context.Context, sys *types.SystemContext) e
     }
     
     // blobPath returns the path appropriate for storing a blob with digest.
    -func (b *BlobCache) blobPath(digest digest.Digest, isConfig bool) string {
    +func (b *BlobCache) blobPath(digest digest.Digest, isConfig bool) (string, error) {
    +	if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters
    +		return "", err
    +	}
     	baseName := digest.String()
     	if isConfig {
     		baseName += ".config"
     	}
    -	return filepath.Join(b.directory, baseName)
    +	return filepath.Join(b.directory, baseName), nil
     }
     
     // findBlob checks if we have a blob for info in cache (whether a config or not)
    @@ -95,7 +98,10 @@ func (b *BlobCache) findBlob(info types.BlobInfo) (string, int64, bool, error) {
     	}
     
     	for _, isConfig := range []bool{false, true} {
    -		path := b.blobPath(info.Digest, isConfig)
    +		path, err := b.blobPath(info.Digest, isConfig)
    +		if err != nil {
    +			return "", -1, false, err
    +		}
     		fileInfo, err := os.Stat(path)
     		if err == nil && (info.Size == -1 || info.Size == fileInfo.Size()) {
     			return path, fileInfo.Size(), isConfig, nil
    
  • pkg/blobcache/dest.go+55 38 modified
    @@ -79,47 +79,58 @@ func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool {
     // and this new file.
     func (d *blobCacheDestination) saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) {
     	defer wg.Done()
    -	// Decompress from and digest the reading end of that pipe.
    -	decompressed, err3 := archive.DecompressStream(decompressReader)
    +	defer decompressReader.Close()
    +
    +	succeeded := false
    +	defer func() {
    +		if !succeeded {
    +			// Remove the temporary file.
    +			if err := os.Remove(tempFile.Name()); err != nil {
    +				logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err)
    +			}
    +		}
    +	}()
    +
     	digester := digest.Canonical.Digester()
    -	if err3 == nil {
    +	if err := func() error { // A scope for defer
    +		defer tempFile.Close()
    +
    +		// Decompress from and digest the reading end of that pipe.
    +		decompressed, err := archive.DecompressStream(decompressReader)
    +		if err != nil {
    +			// Drain the pipe to keep from stalling the PutBlob() thread.
    +			if _, err2 := io.Copy(io.Discard, decompressReader); err2 != nil {
    +				logrus.Debugf("error draining the pipe: %v", err2)
    +			}
    +			return err
    +		}
    +		defer decompressed.Close()
     		// Read the decompressed data through the filter over the pipe, blocking until the
     		// writing end is closed.
    -		_, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
    -	} else {
    -		// Drain the pipe to keep from stalling the PutBlob() thread.
    -		if _, err := io.Copy(io.Discard, decompressReader); err != nil {
    -			logrus.Debugf("error draining the pipe: %v", err)
    -		}
    +		_, err = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
    +		return err
    +	}(); err != nil {
    +		return
     	}
    -	decompressReader.Close()
    -	decompressed.Close()
    -	tempFile.Close()
    +
     	// Determine the name that we should give to the uncompressed copy of the blob.
    -	decompressedFilename := d.reference.blobPath(digester.Digest(), isConfig)
    -	if err3 == nil {
    -		// Rename the temporary file.
    -		if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil {
    -			logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3)
    -			// Remove the temporary file.
    -			if err3 = os.Remove(tempFile.Name()); err3 != nil {
    -				logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
    -			}
    -		} else {
    -			*alternateDigest = digester.Digest()
    -			// Note the relationship between the two files.
    -			if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil {
    -				logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3)
    -			}
    -			if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil {
    -				logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3)
    -			}
    -		}
    -	} else {
    -		// Remove the temporary file.
    -		if err3 = os.Remove(tempFile.Name()); err3 != nil {
    -			logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
    -		}
    +	decompressedFilename, err := d.reference.blobPath(digester.Digest(), isConfig)
    +	if err != nil {
    +		return
    +	}
    +	// Rename the temporary file.
    +	if err := os.Rename(tempFile.Name(), decompressedFilename); err != nil {
    +		logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err)
    +		return
    +	}
    +	succeeded = true
    +	*alternateDigest = digester.Digest()
    +	// Note the relationship between the two files.
    +	if err := ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err != nil {
    +		logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err)
    +	}
    +	if err := ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err != nil {
    +		logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err)
     	}
     }
     
    @@ -144,7 +155,10 @@ func (d *blobCacheDestination) PutBlobWithOptions(ctx context.Context, stream io
     	needToWait := false
     	compression := archive.Uncompressed
     	if inputInfo.Digest != "" {
    -		filename := d.reference.blobPath(inputInfo.Digest, options.IsConfig)
    +		filename, err2 := d.reference.blobPath(inputInfo.Digest, options.IsConfig)
    +		if err2 != nil {
    +			return private.UploadedBlob{}, err2
    +		}
     		tempfile, err = os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
     		if err == nil {
     			stream = io.TeeReader(stream, tempfile)
    @@ -273,7 +287,10 @@ func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []
     	if err != nil {
     		logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err)
     	} else {
    -		filename := d.reference.blobPath(manifestDigest, false)
    +		filename, err := d.reference.blobPath(manifestDigest, false)
    +		if err != nil {
    +			return err
    +		}
     		if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil {
     			logrus.Warnf("error saving manifest as %q: %v", filename, err)
     		}
    
  • pkg/blobcache/src.go+12 4 modified
    @@ -56,7 +56,10 @@ func (s *blobCacheSource) Close() error {
     
     func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
     	if instanceDigest != nil {
    -		filename := s.reference.blobPath(*instanceDigest, false)
    +		filename, err := s.reference.blobPath(*instanceDigest, false)
    +		if err != nil {
    +			return nil, "", err
    +		}
     		manifestBytes, err := os.ReadFile(filename)
     		if err == nil {
     			s.cacheHits++
    @@ -136,8 +139,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
     		replacedInfos := make([]types.BlobInfo, 0, len(infos))
     		for _, info := range infos {
     			var replaceDigest []byte
    -			var err error
    -			blobFile := s.reference.blobPath(info.Digest, false)
    +			blobFile, err := s.reference.blobPath(info.Digest, false)
    +			if err != nil {
    +				return nil, err
    +			}
     			var alternate string
     			switch s.reference.compress {
     			case types.Compress:
    @@ -148,7 +153,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
     				replaceDigest, err = os.ReadFile(alternate)
     			}
     			if err == nil && digest.Digest(replaceDigest).Validate() == nil {
    -				alternate = s.reference.blobPath(digest.Digest(replaceDigest), false)
    +				alternate, err = s.reference.blobPath(digest.Digest(replaceDigest), false)
    +				if err != nil {
    +					return nil, err
    +				}
     				fileInfo, err := os.Stat(alternate)
     				if err == nil {
     					switch info.MediaType {
    
  • storage/storage_dest.go+27 15 modified
    @@ -361,6 +361,18 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
     // tryReusingBlobAsPending implements TryReusingBlobWithOptions for (blobDigest, size or -1), filling s.blobDiffIDs and other metadata.
     // The caller must arrange the blob to be eventually committed using s.commitLayer().
     func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
    +	if blobDigest == "" {
    +		return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
    +	}
    +	if err := blobDigest.Validate(); err != nil {
    +		return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
    +	}
    +	if options.TOCDigest != "" {
    +		if err := options.TOCDigest.Validate(); err != nil {
    +			return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
    +		}
    +	}
    +
     	// lock the entire method as it executes fairly quickly
     	s.lock.Lock()
     	defer s.lock.Unlock()
    @@ -380,18 +392,6 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
     		}
     	}
     
    -	if blobDigest == "" {
    -		return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
    -	}
    -	if err := blobDigest.Validate(); err != nil {
    -		return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
    -	}
    -	if options.TOCDigest != "" {
    -		if err := options.TOCDigest.Validate(); err != nil {
    -			return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
    -		}
    -	}
    -
     	// Check if we have a wasn't-compressed layer in storage that's based on that blob.
     
     	// Check if we've already cached it in a file.
    @@ -1070,17 +1070,25 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
     		if err != nil {
     			return fmt.Errorf("digesting top-level manifest: %w", err)
     		}
    +		key, err := manifestBigDataKey(manifestDigest)
    +		if err != nil {
    +			return err
    +		}
     		options.BigData = append(options.BigData, storage.ImageBigDataOption{
    -			Key:    manifestBigDataKey(manifestDigest),
    +			Key:    key,
     			Data:   toplevelManifest,
     			Digest: manifestDigest,
     		})
     	}
     	// Set up to save the image's manifest.  Allow looking it up by digest by using the key convention defined by the Store.
     	// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
     	// and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
    +	key, err := manifestBigDataKey(s.manifestDigest)
    +	if err != nil {
    +		return err
    +	}
     	options.BigData = append(options.BigData, storage.ImageBigDataOption{
    -		Key:    manifestBigDataKey(s.manifestDigest),
    +		Key:    key,
     		Data:   s.manifest,
     		Digest: s.manifestDigest,
     	})
    @@ -1098,8 +1106,12 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
     		})
     	}
     	for instanceDigest, signatures := range s.signatureses {
    +		key, err := signatureBigDataKey(instanceDigest)
    +		if err != nil {
    +			return err
    +		}
     		options.BigData = append(options.BigData, storage.ImageBigDataOption{
    -			Key:    signatureBigDataKey(instanceDigest),
    +			Key:    key,
     			Data:   signatures,
     			Digest: digest.Canonical.FromBytes(signatures),
     		})
    
  • storage/storage_image.go+10 4 modified
    @@ -21,14 +21,20 @@ var (
     // manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
     // If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
     // for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
    -func manifestBigDataKey(digest digest.Digest) string {
    -	return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String()
    +func manifestBigDataKey(digest digest.Digest) (string, error) {
    +	if err := digest.Validate(); err != nil { // Make sure info.Digest.String() uses the expected format and does not collide with other BigData keys.
    +		return "", err
    +	}
    +	return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String(), nil
     }
     
     // signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions.
     // If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
    -func signatureBigDataKey(digest digest.Digest) string {
    -	return "signature-" + digest.Encoded()
    +func signatureBigDataKey(digest digest.Digest) (string, error) {
    +	if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +		return "", err
    +	}
    +	return "signature-" + digest.Encoded(), nil
     }
     
     // storageImageMetadata is stored, as JSON, in storage.Image.Metadata
    
  • storage/storage_reference.go+8 2 modified
    @@ -73,7 +73,10 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image,
     	// We don't need to care about storage.ImageDigestBigDataKey because
     	// manifests lists are only stored into storage by c/image versions
     	// that know about manifestBigDataKey, and only using that key.
    -	key := manifestBigDataKey(manifestDigest)
    +	key, err := manifestBigDataKey(manifestDigest)
    +	if err != nil {
    +		return false // This should never happen, manifestDigest comes from a reference.Digested, and that validates the format.
    +	}
     	manifestBytes, err := store.ImageBigData(img.ID, key)
     	if err != nil {
     		return false
    @@ -95,7 +98,10 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image,
     	if err != nil {
     		return false
     	}
    -	key = manifestBigDataKey(chosenInstance)
    +	key, err = manifestBigDataKey(chosenInstance)
    +	if err != nil {
    +		return false
    +	}
     	_, err = store.ImageBigData(img.ID, key)
     	return err == nil // true if img.ID is based on chosenInstance.
     }
    
  • storage/storage_src.go+16 3 modified
    @@ -237,7 +237,10 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st
     // GetManifest() reads the image's manifest.
     func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) {
     	if instanceDigest != nil {
    -		key := manifestBigDataKey(*instanceDigest)
    +		key, err := manifestBigDataKey(*instanceDigest)
    +		if err != nil {
    +			return nil, "", err
    +		}
     		blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
     		if err != nil {
     			return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err)
    @@ -249,7 +252,10 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
     		// Prefer the manifest corresponding to the user-specified digest, if available.
     		if s.imageRef.named != nil {
     			if digested, ok := s.imageRef.named.(reference.Digested); ok {
    -				key := manifestBigDataKey(digested.Digest())
    +				key, err := manifestBigDataKey(digested.Digest())
    +				if err != nil {
    +					return nil, "", err
    +				}
     				blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
     				if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
     					return nil, "", err
    @@ -385,7 +391,14 @@ func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instan
     	instance := "default instance"
     	if instanceDigest != nil {
     		signatureSizes = s.metadata.SignaturesSizes[*instanceDigest]
    -		key = signatureBigDataKey(*instanceDigest)
    +		k, err := signatureBigDataKey(*instanceDigest)
    +		if err != nil {
    +			return nil, err
    +		}
    +		key = k
    +		if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
    +			return nil, err
    +		}
     		instance = instanceDigest.Encoded()
     	}
     	if len(signatureSizes) > 0 {
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

51

News mentions

0

No linked articles in our index yet.