VYPR
Moderate severityNVD Advisory· Published Feb 4, 2026· Updated Feb 4, 2026

apko is vulnerable to unbounded resource consumption in expandapk.Split on attacker-controlled .apk streams

CVE-2026-25122

Description

apko allows users to build and publish OCI container images built from apk packages. From version 0.14.8 to before 1.1.0, expandapk.Split drains the first gzip stream of an APK archive via io.Copy(io.Discard, gzi) without explicit bounds. With an attacker-controlled input stream, this can force large gzip inflation work and lead to resource exhaustion (availability impact). The Split function reads the first tar header, then drains the remainder of the gzip stream by reading from the gzip reader directly without any maximum uncompressed byte limit or inflate-ratio cap. A caller that parses attacker-controlled APK streams may be forced to spend excessive CPU time inflating gzip data, leading to timeouts or process slowdown. This issue has been patched in version 1.1.0.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
chainguard.dev/apkoGo
>= 0.14.8, < 1.1.01.1.0

Affected products

1

Patches

1
2be3903fe194

Add limit readers for readers that consume data from external sources. (#2042)

https://github.com/chainguard-dev/apkoBilly LynchJan 30, 2026via ghsa
20 files changed · +814 25
  • internal/cli/build-cpio.go+4 0 modified
    @@ -28,6 +28,7 @@ import (
     	"chainguard.dev/apko/pkg/build"
     	"chainguard.dev/apko/pkg/build/types"
     	"chainguard.dev/apko/pkg/cpio"
    +	"chainguard.dev/apko/pkg/options"
     )
     
     func buildCPIO() *cobra.Command {
    @@ -38,6 +39,7 @@ func buildCPIO() *cobra.Command {
     	var extraBuildRepos []string
     	var extraRepos []string
     	var extraPackages []string
    +	var sizeLimits options.SizeLimits
     
     	cmd := &cobra.Command{
     		Use:     "build-cpio",
    @@ -56,6 +58,7 @@ func buildCPIO() *cobra.Command {
     				build.WithBuildDate(buildDate),
     				build.WithSBOM(sbomPath),
     				build.WithArch(types.ParseArchitecture(buildArch)),
    +				build.WithSizeLimits(sizeLimits),
     			)
     		},
     	}
    @@ -67,6 +70,7 @@ func buildCPIO() *cobra.Command {
     	cmd.Flags().StringSliceVarP(&extraBuildRepos, "build-repository-append", "b", []string{}, "path to extra repositories to include")
     	cmd.Flags().StringSliceVarP(&extraRepos, "repository-append", "r", []string{}, "path to extra repositories to include")
     	cmd.Flags().StringSliceVarP(&extraPackages, "package-append", "p", []string{}, "extra packages to include")
    +	addClientLimitFlags(cmd, &sizeLimits)
     
     	return cmd
     }
    
  • internal/cli/build.go+4 0 modified
    @@ -37,6 +37,7 @@ import (
     	"chainguard.dev/apko/pkg/build"
     	"chainguard.dev/apko/pkg/build/oci"
     	"chainguard.dev/apko/pkg/build/types"
    +	"chainguard.dev/apko/pkg/options"
     	"chainguard.dev/apko/pkg/sbom/generator"
     	"chainguard.dev/apko/pkg/tarfs"
     )
    @@ -58,6 +59,7 @@ func buildCmd() *cobra.Command {
     	var lockfile string
     	var includePaths []string
     	var ignoreSignatures bool
    +	var sizeLimits options.SizeLimits
     
     	cmd := &cobra.Command{
     		Use:   "build",
    @@ -116,6 +118,7 @@ Along the image, apko will generate SBOMs (software bill of materials) describin
     				build.WithTempDir(tmp),
     				build.WithIncludePaths(includePaths),
     				build.WithIgnoreSignatures(ignoreSignatures),
    +				build.WithSizeLimits(sizeLimits),
     			)
     		},
     	}
    @@ -136,6 +139,7 @@ Along the image, apko will generate SBOMs (software bill of materials) describin
     	cmd.Flags().StringVar(&lockfile, "lockfile", "", "a path to .lock.json file (e.g. produced by apko lock) that constraints versions of packages to the listed ones (default '' means no additional constraints)")
     	cmd.Flags().StringSliceVar(&includePaths, "include-paths", []string{}, "Additional include paths where to look for input files (config, base image, etc.). By default apko will search for paths only in workdir. Include paths may be absolute, or relative. Relative paths are interpreted relative to workdir. For adding extra paths for packages, use --repository-append.")
     	cmd.Flags().BoolVar(&ignoreSignatures, "ignore-signatures", false, "ignore repository signature verification")
    +	addClientLimitFlags(cmd, &sizeLimits)
     	return cmd
     }
     
    
  • internal/cli/build-minirootfs.go+4 0 modified
    @@ -26,6 +26,7 @@ import (
     
     	"chainguard.dev/apko/pkg/build"
     	"chainguard.dev/apko/pkg/build/types"
    +	"chainguard.dev/apko/pkg/options"
     	"chainguard.dev/apko/pkg/tarfs"
     )
     
    @@ -38,6 +39,7 @@ func buildMinirootFS() *cobra.Command {
     	var extraBuildRepos []string
     	var extraRepos []string
     	var extraPackages []string
    +	var sizeLimits options.SizeLimits
     
     	cmd := &cobra.Command{
     		Use:     "build-minirootfs",
    @@ -57,6 +59,7 @@ func buildMinirootFS() *cobra.Command {
     				build.WithSBOM(sbomPath),
     				build.WithArch(types.ParseArchitecture(buildArch)),
     				build.WithIgnoreSignatures(ignoreSignatures),
    +				build.WithSizeLimits(sizeLimits),
     			)
     		},
     	}
    @@ -69,6 +72,7 @@ func buildMinirootFS() *cobra.Command {
     	cmd.Flags().StringSliceVarP(&extraBuildRepos, "build-repository-append", "b", []string{}, "path to extra repositories to include")
     	cmd.Flags().StringSliceVarP(&extraRepos, "repository-append", "r", []string{}, "path to extra repositories to include")
     	cmd.Flags().StringSliceVarP(&extraPackages, "package-append", "p", []string{}, "extra packages to include")
    +	addClientLimitFlags(cmd, &sizeLimits)
     
     	return cmd
     }
    
  • internal/cli/flags.go+35 0 added
    @@ -0,0 +1,35 @@
    +// Copyright 2022, 2023 Chainguard, Inc.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package cli
    +
    +import (
    +	"github.com/spf13/cobra"
    +
    +	"chainguard.dev/apko/pkg/options"
    +)
    +
    +// addClientLimitFlags adds size limit flags for APK client operations (fetching indexes, expanding packages).
    +func addClientLimitFlags(cmd *cobra.Command, limits *options.SizeLimits) {
    +	defaults := options.DefaultSizeLimits()
    +
    +	cmd.Flags().Int64Var(&limits.APKIndexDecompressedMaxSize, "max-apkindex-decompressed-size", defaults.APKIndexDecompressedMaxSize,
    +		"maximum decompressed size for APKINDEX archives in bytes, protects against gzip bombs (0=default, -1=no limit)")
    +	cmd.Flags().Int64Var(&limits.APKControlMaxSize, "max-apk-control-size", defaults.APKControlMaxSize,
    +		"maximum decompressed size for APK control sections in bytes (0=default, -1=no limit)")
    +	cmd.Flags().Int64Var(&limits.APKDataMaxSize, "max-apk-data-size", defaults.APKDataMaxSize,
    +		"maximum decompressed size for APK data sections in bytes, protects against gzip bombs (0=default, -1=no limit)")
    +	cmd.Flags().Int64Var(&limits.HTTPResponseMaxSize, "max-http-response-size", defaults.HTTPResponseMaxSize,
    +		"maximum size for HTTP responses in bytes (0=default, -1=no limit)")
    +}
    
  • pkg/apk/apk/apkindex.go+32 2 modified
    @@ -14,11 +14,17 @@ import (
     	"strings"
     	"text/template"
     	"time"
    +
    +	"chainguard.dev/apko/pkg/limitio"
     )
     
     const apkIndexFilename = "APKINDEX"
     const descriptionFilename = "DESCRIPTION"
     
    +// DefaultMaxAPKIndexDecompressedSize is the maximum decompressed size for APKINDEX archives (100 MB).
    +// This protects against gzip bombs where a small compressed file expands to a huge size.
    +const DefaultMaxAPKIndexDecompressedSize = 100 << 20
    +
     // Go template for generating the APKINDEX file from an ApkIndex struct
     var apkIndexTemplate = template.Must(template.New(apkIndexFilename).Funcs(
     	template.FuncMap{
    @@ -197,15 +203,39 @@ func ParsePackageIndex(apkIndexUnpacked io.Reader) ([]*Package, error) {
     	return packages, indexScanner.Err()
     }
     
    -func IndexFromArchive(archive io.ReadCloser) (*APKIndex, error) {
    +// IndexFromArchiveOption configures IndexFromArchive behavior.
    +type IndexFromArchiveOption func(*indexFromArchiveOpts)
    +
    +type indexFromArchiveOpts struct {
    +	decompressedMaxSize int64
    +}
    +
    +// WithDecompressedMaxSize sets the maximum decompressed size for the APKINDEX archive.
    +// Use 0 for default, or < 0 for unlimited.
    +func WithDecompressedMaxSize(size int64) IndexFromArchiveOption {
    +	return func(o *indexFromArchiveOpts) {
    +		o.decompressedMaxSize = size
    +	}
    +}
    +
    +// IndexFromArchive parses an APKINDEX archive. Options can be used to configure
    +// size limits to protect against gzip bombs.
    +func IndexFromArchive(archive io.ReadCloser, opts ...IndexFromArchiveOption) (*APKIndex, error) {
    +	o := &indexFromArchiveOpts{}
    +	for _, opt := range opts {
    +		opt(o)
    +	}
    +
     	gzipReader, err := gzip.NewReader(archive)
     	if err != nil {
     		return nil, err
     	}
     
     	defer gzipReader.Close()
     
    -	tarReader := tar.NewReader(gzipReader)
    +	// Wrap gzipReader with size limit, then create tar reader on top.
    +	// The limit protects against tar bombs where file headers claim huge sizes.
    +	tarReader := tar.NewReader(limitio.NewLimitedReaderWithDefault(gzipReader, o.decompressedMaxSize, DefaultMaxAPKIndexDecompressedSize))
     	apkindex := &APKIndex{}
     
     	for {
    
  • pkg/apk/apk/implementation.go+34 3 modified
    @@ -53,6 +53,11 @@ import (
     	"github.com/chainguard-dev/clog"
     )
     
    +const (
    +	// DefaultHTTPResponseSize is the default maximum size for HTTP responses (2 GB).
    +	DefaultHTTPResponseSize = 2 << 30
    +)
    +
     type APK struct {
     	arch               string
     	version            string
    @@ -65,6 +70,7 @@ type APK struct {
     	noSignatureIndexes []string
     	auth               auth.Authenticator
     	packageGetter      PackageGetter
    +	sizeLimits         *SizeLimits
     
     	// filename to owning package, last write wins
     	installedFiles map[string]*Package
    @@ -74,6 +80,14 @@ type APK struct {
     	ByArch map[string]*APK
     }
     
    +// apkIndexDecompressedMaxSize returns the configured max decompressed APK index size or 0 for default.
    +func (a *APK) apkIndexDecompressedMaxSize() int64 {
    +	if a.sizeLimits != nil && a.sizeLimits.APKIndexDecompressedMaxSize != 0 {
    +		return a.sizeLimits.APKIndexDecompressedMaxSize
    +	}
    +	return 0 // use default
    +}
    +
     func New(ctx context.Context, options ...Option) (*APK, error) {
     	opt := defaultOpts()
     	for _, o := range options {
    @@ -87,17 +101,33 @@ func New(ctx context.Context, options ...Option) (*APK, error) {
     		opt.fs = apkfs.DirFS(ctx, "/")
     	}
     
    -	client := retryablehttp.NewClient()
    +	// Wrap transport with response size limiter
    +	transport := opt.transport
    +	var httpResponseMaxSize int64
    +	if opt.sizeLimits != nil {
    +		httpResponseMaxSize = opt.sizeLimits.HTTPResponseMaxSize
    +	}
    +	transport = newLimitedResponseTransport(transport, httpResponseMaxSize)
     
    -	client.HTTPClient = &http.Client{Transport: opt.transport}
    +	client := retryablehttp.NewClient()
    +	client.HTTPClient = &http.Client{Transport: transport}
     	client.Logger = clog.FromContext(ctx)
     
     	httpClient := client.StandardClient()
     
     	// Create default PackageGetter if none provided
     	packageGetter := opt.packageGetter
     	if packageGetter == nil {
    -		packageGetter = newDefaultPackageGetter(httpClient, opt.cache, opt.auth)
    +		var getterOpts []packageGetterOption
    +		if opt.sizeLimits != nil {
    +			if opt.sizeLimits.APKControlMaxSize != 0 {
    +				getterOpts = append(getterOpts, withAPKControlMaxSize(opt.sizeLimits.APKControlMaxSize))
    +			}
    +			if opt.sizeLimits.APKDataMaxSize != 0 {
    +				getterOpts = append(getterOpts, withAPKDataMaxSize(opt.sizeLimits.APKDataMaxSize))
    +			}
    +		}
    +		packageGetter = newDefaultPackageGetter(httpClient, opt.cache, opt.auth, getterOpts...)
     	}
     
     	return &APK{
    @@ -113,6 +143,7 @@ func New(ctx context.Context, options ...Option) (*APK, error) {
     		installedFiles:     map[string]*Package{},
     		auth:               opt.auth,
     		packageGetter:      packageGetter,
    +		sizeLimits:         opt.sizeLimits,
     	}, nil
     }
     
    
  • pkg/apk/apk/index.go+16 5 modified
    @@ -457,7 +457,11 @@ func parseRepositoryIndex(ctx context.Context, u string, keys map[string][]byte,
     		}
     	}
     	// with a valid signature, convert it to an ApkIndex
    -	index, err := IndexFromArchive(io.NopCloser(bytes.NewReader(b)))
    +	var archiveOpts []IndexFromArchiveOption
    +	if opts.indexDecompressedMaxSize != 0 {
    +		archiveOpts = append(archiveOpts, WithDecompressedMaxSize(opts.indexDecompressedMaxSize))
    +	}
    +	index, err := IndexFromArchive(io.NopCloser(bytes.NewReader(b)), archiveOpts...)
     	if err != nil {
     		return nil, fmt.Errorf("unable to read convert repository index bytes to index struct: %w", err)
     	}
    @@ -466,10 +470,11 @@ func parseRepositoryIndex(ctx context.Context, u string, keys map[string][]byte,
     }
     
     type indexOpts struct {
    -	ignoreSignatures   bool
    -	noSignatureIndexes []string
    -	httpClient         *http.Client
    -	auth               auth.Authenticator
    +	ignoreSignatures         bool
    +	noSignatureIndexes       []string
    +	httpClient               *http.Client
    +	auth                     auth.Authenticator
    +	indexDecompressedMaxSize int64
     }
     type IndexOption func(*indexOpts)
     
    @@ -497,6 +502,12 @@ func WithIndexAuthenticator(a auth.Authenticator) IndexOption {
     	}
     }
     
    +func WithIndexDecompressedMaxSize(size int64) IndexOption {
    +	return func(o *indexOpts) {
    +		o.indexDecompressedMaxSize = size
    +	}
    +}
    +
     func redact(in string) string {
     	asURL, err := url.Parse(in)
     	if err != nil {
    
  • pkg/apk/apk/limited_transport.go+63 0 added
    @@ -0,0 +1,63 @@
    +// Copyright 2026 Chainguard, Inc.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package apk
    +
    +import (
    +	"io"
    +	"net/http"
    +
    +	"chainguard.dev/apko/pkg/limitio"
    +)
    +
    +// limitedResponseTransport wraps an http.RoundTripper and limits the size of response bodies.
    +type limitedResponseTransport struct {
    +	wrapped http.RoundTripper
    +	maxSize int64
    +}
    +
    +// newLimitedResponseTransport creates a new transport that limits HTTP response body sizes.
    +// If maxSize is -1, responses are unlimited.
    +// If maxSize is 0, the default DefaultHTTPResponseSize is used.
    +func newLimitedResponseTransport(wrapped http.RoundTripper, maxSize int64) http.RoundTripper {
    +	return &limitedResponseTransport{
    +		wrapped: wrapped,
    +		maxSize: maxSize,
    +	}
    +}
    +
    +func (t *limitedResponseTransport) RoundTrip(req *http.Request) (*http.Response, error) {
    +	resp, err := t.wrapped.RoundTrip(req)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	// Wrap the response body with a limited reader
    +	resp.Body = &limitedReadCloser{
    +		ReadCloser: resp.Body,
    +		limited:    limitio.NewLimitedReaderWithDefault(resp.Body, t.maxSize, DefaultHTTPResponseSize),
    +	}
    +
    +	return resp, nil
    +}
    +
    +// limitedReadCloser wraps a ReadCloser with size limiting.
    +type limitedReadCloser struct {
    +	io.ReadCloser
    +	limited io.Reader
    +}
    +
    +func (l *limitedReadCloser) Read(p []byte) (int, error) {
    +	return l.limited.Read(p)
    +}
    
  • pkg/apk/apk/options.go+17 0 modified
    @@ -38,6 +38,15 @@ type opts struct {
     	ignoreSignatures   bool
     	transport          http.RoundTripper
     	packageGetter      PackageGetter
    +	sizeLimits         *SizeLimits
    +}
    +
    +// SizeLimits configures maximum sizes for various APK operations.
    +type SizeLimits struct {
    +	APKIndexDecompressedMaxSize int64
    +	APKControlMaxSize           int64
    +	APKDataMaxSize              int64
    +	HTTPResponseMaxSize         int64
     }
     
     type Option func(*opts) error
    @@ -154,6 +163,14 @@ func WithPackageGetter(pg PackageGetter) Option {
     	}
     }
     
    +// WithSizeLimits sets size limits for APK operations.
    +func WithSizeLimits(limits *SizeLimits) Option {
    +	return func(o *opts) error {
    +		o.sizeLimits = limits
    +		return nil
    +	}
    +}
    +
     func defaultOpts() *opts {
     	return &opts{
     		arch:              ArchToAPK(runtime.GOARCH),
    
  • pkg/apk/apk/package_getter.go+36 6 modified
    @@ -54,18 +54,41 @@ var globalApkCache = newFlightCache[string, *expandapk.APKExpanded]()
     // defaultPackageGetter implements the standard disk-caching behavior
     // with in-memory singleflight deduplication using a global cache.
     type defaultPackageGetter struct {
    -	client *http.Client
    -	cache  *cache
    -	auth   auth.Authenticator
    +	client            *http.Client
    +	cache             *cache
    +	auth              auth.Authenticator
    +	apkControlMaxSize int64
    +	apkDataMaxSize    int64
    +}
    +
    +// packageGetterOption is a functional option for configuring defaultPackageGetter.
    +type packageGetterOption func(*defaultPackageGetter)
    +
    +// withAPKControlMaxSize sets the maximum decompressed size for APK control sections.
    +func withAPKControlMaxSize(size int64) packageGetterOption {
    +	return func(d *defaultPackageGetter) {
    +		d.apkControlMaxSize = size
    +	}
    +}
    +
    +// withAPKDataMaxSize sets the maximum decompressed size for APK data sections.
    +func withAPKDataMaxSize(size int64) packageGetterOption {
    +	return func(d *defaultPackageGetter) {
    +		d.apkDataMaxSize = size
    +	}
     }
     
     // newDefaultPackageGetter creates a new defaultPackageGetter with the given configuration.
    -func newDefaultPackageGetter(client *http.Client, cache *cache, authenticator auth.Authenticator) *defaultPackageGetter {
    -	return &defaultPackageGetter{
    +func newDefaultPackageGetter(client *http.Client, cache *cache, authenticator auth.Authenticator, opts ...packageGetterOption) *defaultPackageGetter {
    +	d := &defaultPackageGetter{
     		client: client,
     		cache:  cache,
     		auth:   authenticator,
     	}
    +	for _, opt := range opts {
    +		opt(d)
    +	}
    +	return d
     }
     
     // GetPackage fetches and returns an expanded package.
    @@ -135,7 +158,14 @@ func (d *defaultPackageGetter) getPackageImpl(ctx context.Context, pkg Installab
     	}
     	defer rc.Close()
     
    -	exp, err := expandapk.ExpandApk(ctx, rc, cacheDir)
    +	var expandOpts []expandapk.Option
    +	if d.apkControlMaxSize != 0 {
    +		expandOpts = append(expandOpts, expandapk.WithMaxControlSize(d.apkControlMaxSize))
    +	}
    +	if d.apkDataMaxSize != 0 {
    +		expandOpts = append(expandOpts, expandapk.WithMaxDataSize(d.apkDataMaxSize))
    +	}
    +	exp, err := expandapk.ExpandApkWithOptions(ctx, rc, cacheDir, expandOpts...)
     	if err != nil {
     		return nil, fmt.Errorf("expanding %s: %w", pkg.PackageName(), err)
     	}
    
  • pkg/apk/apk/repo.go+5 1 modified
    @@ -182,11 +182,15 @@ func (a *APK) GetRepositoryIndexes(ctx context.Context, ignoreSignatures bool) (
     	if a.cache != nil {
     		httpClient = a.cache.client(httpClient, true)
     	}
    -	opts := []IndexOption{WithIgnoreSignatures(ignoreSignatures),
    +	opts := []IndexOption{
    +		WithIgnoreSignatures(ignoreSignatures),
     		WithIgnoreSignatureForIndexes(a.noSignatureIndexes...),
     		WithHTTPClient(httpClient),
     		WithIndexAuthenticator(a.auth),
     	}
    +	if sz := a.apkIndexDecompressedMaxSize(); sz != 0 {
    +		opts = append(opts, WithIndexDecompressedMaxSize(sz))
    +	}
     	return GetRepositoryIndexes(ctx, repos, keys, arch, opts...)
     }
     
    
  • pkg/apk/expandapk/expandapk.go+37 4 modified
    @@ -24,6 +24,7 @@ import (
     
     	"chainguard.dev/apko/pkg/apk/expandapk/tarfs"
     	"chainguard.dev/apko/pkg/apk/types"
    +	"chainguard.dev/apko/pkg/limitio"
     	"github.com/klauspost/compress/gzip"
     
     	"go.opentelemetry.io/otel"
    @@ -102,6 +103,9 @@ type APKExpanded struct {
     	PackageSize   int64
     	SignatureSize int64
     
    +	// opts contains the options used during expansion
    +	opts *Options
    +
     	sync.Mutex
     	parsedPkgInfo *types.PackageInfo
     	controlData   []byte
    @@ -145,7 +149,12 @@ func (a *APKExpanded) ControlData() ([]byte, error) {
     			return nil, err
     		}
     
    -		a.controlData, err = io.ReadAll(zr)
    +		// Apply limit: use opts.MaxControlSize if set, otherwise default.
    +		var maxSize int64
    +		if a.opts != nil {
    +			maxSize = a.opts.MaxControlSize
    +		}
    +		a.controlData, err = io.ReadAll(limitio.NewLimitedReaderWithDefault(zr, maxSize, DefaultMaxControlSize))
     		if err != nil {
     			return nil, err
     		}
    @@ -185,7 +194,14 @@ func (a *APKExpanded) PackageData() (*os.File, error) {
     	buf := pooledSlice()
     	defer slicePool.Put(buf)
     
    -	if _, err := io.CopyBuffer(uf, zr, buf); err != nil {
    +	// Wrap the gzip reader with a limit to protect against decompression bombs
    +	var maxSize int64
    +	if a.opts != nil {
    +		maxSize = a.opts.MaxDataSize
    +	}
    +	limitedZr := limitio.NewLimitedReaderWithDefault(zr, maxSize, DefaultMaxDataSize)
    +
    +	if _, err := io.CopyBuffer(uf, limitedZr, buf); err != nil {
     		return nil, fmt.Errorf("decompressing %q: %w", a.PackageFile, err)
     	}
     
    @@ -422,6 +438,19 @@ func (r *expandApkReader) EnableFastRead() {
     // Returns an APKExpanded struct containing references to the file. You *must* call APKExpanded.Close()
     // when finished to clean up the various files.
     func ExpandApk(ctx context.Context, source io.Reader, cacheDir string) (*APKExpanded, error) {
    +	return ExpandApkWithOptions(ctx, source, cacheDir)
    +}
    +
    +// ExpandApkWithOptions is like ExpandApk but accepts functional options to configure
    +// size limits for APK sections.
    +func ExpandApkWithOptions(ctx context.Context, source io.Reader, cacheDir string, opts ...Option) (*APKExpanded, error) {
    +	options := DefaultOptions()
    +	for _, opt := range opts {
    +		if err := opt(options); err != nil {
    +			return nil, fmt.Errorf("applying option: %w", err)
    +		}
    +	}
    +
     	ctx, span := otel.Tracer("go-apk").Start(ctx, "ExpandApk")
     	defer span.End()
     
    @@ -473,7 +502,7 @@ func ExpandApk(ctx context.Context, source io.Reader, cacheDir string) (*APKExpa
     		if !maxStreamsReached {
     			gzi.Multistream(false)
     
    -			if _, err := io.Copy(io.Discard, gzi); err != nil {
    +			if _, err := io.Copy(io.Discard, limitio.NewLimitedReaderWithDefault(gzi, options.MaxControlSize, DefaultMaxControlSize)); err != nil {
     				return nil, fmt.Errorf("expandApk error 3: %w", err)
     			}
     
    @@ -489,7 +518,9 @@ func ExpandApk(ctx context.Context, source io.Reader, cacheDir string) (*APKExpa
     			bw := pooledBufioWriter(tarfile)
     			defer writerPool.Put(bw)
     
    -			tr := io.TeeReader(gzi, bw)
    +			// Wrap the gzip reader with a limit to protect against decompression bombs
    +			limitedGzi := limitio.NewLimitedReaderWithDefault(gzi, options.MaxDataSize, DefaultMaxDataSize)
    +			tr := io.TeeReader(limitedGzi, bw)
     
     			if err := checkSums(ctx, tr); err != nil {
     				return nil, fmt.Errorf("checking sums: %w", err)
    @@ -561,6 +592,8 @@ func ExpandApk(ctx context.Context, source io.Reader, cacheDir string) (*APKExpa
     		PackageFile: gzipStreams[packageIndex],
     		PackageHash: hashes[packageIndex],
     		PackageSize: sizes[packageIndex],
    +
    +		opts: options,
     	}
     	if signed {
     		expanded.SignatureFile = gzipStreams[signatureIndex]
    
  • pkg/apk/expandapk/expandapk_test.go+319 0 modified
    @@ -17,12 +17,19 @@ package expandapk
     import (
     	"archive/tar"
     	"bytes"
    +	"compress/gzip"
    +	"context"
    +	"errors"
    +	"io"
    +	"os"
     	"testing"
     
    +	"github.com/google/go-cmp/cmp"
     	"github.com/stretchr/testify/require"
     
     	"chainguard.dev/apko/pkg/apk/expandapk/tarfs"
     	"chainguard.dev/apko/pkg/apk/types"
    +	"chainguard.dev/apko/pkg/limitio"
     )
     
     func TestPkgInfo(t *testing.T) {
    @@ -126,3 +133,315 @@ datahash = 7d3351ac6c3ebaf18182efb5390061f50d077ce5ade60a15909d91278f70ada7
     		})
     	}
     }
    +
    +func TestSizeLimitExceededError(t *testing.T) {
    +	err := &limitio.SizeLimitExceededError{Limit: 1024}
    +	want := "size limit exceeded: limit is 1024 bytes"
    +	if diff := cmp.Diff(want, err.Error()); diff != "" {
    +		t.Errorf("SizeLimitExceededError.Error() mismatch (-want +got):\n%s", diff)
    +	}
    +}
    +
    +func TestExpandApkWithOptions(t *testing.T) {
    +	file := "testdata/hello-wolfi-2.12.1-r0.apk"
    +
    +	t.Run("default limits work", func(t *testing.T) {
    +		f, err := os.Open(file)
    +		if err != nil {
    +			t.Fatal(err)
    +		}
    +		defer f.Close()
    +
    +		exp, err := ExpandApkWithOptions(context.Background(), f, t.TempDir())
    +		if err != nil {
    +			t.Fatalf("ExpandApkWithOptions() with defaults failed: %v", err)
    +		}
    +		defer exp.Close()
    +
    +		if exp.SignatureFile == "" {
    +			t.Error("expected SignatureFile to be set")
    +		}
    +		if exp.ControlFile == "" {
    +			t.Error("expected ControlFile to be set")
    +		}
    +		if exp.PackageFile == "" {
    +			t.Error("expected PackageFile to be set")
    +		}
    +	})
    +
    +	t.Run("max size limit exceeded", func(t *testing.T) {
    +		f, err := os.Open(file)
    +		if err != nil {
    +			t.Fatal(err)
    +		}
    +		defer f.Close()
    +
    +		// Set max size to 1 byte - should fail on first stream
    +		_, err = ExpandApkWithOptions(context.Background(), f, t.TempDir(),
    +			WithMaxControlSize(1))
    +
    +		var sizeErr *limitio.SizeLimitExceededError
    +		if !errors.As(err, &sizeErr) {
    +			t.Fatalf("expected limitio.SizeLimitExceededError, got %T: %v", err, err)
    +		}
    +		if sizeErr.Limit != 1 {
    +			t.Errorf("expected limit 1, got %d", sizeErr.Limit)
    +		}
    +	})
    +
    +	t.Run("unlimited with negative one", func(t *testing.T) {
    +		f, err := os.Open(file)
    +		if err != nil {
    +			t.Fatal(err)
    +		}
    +		defer f.Close()
    +
    +		// -1 means unlimited - should succeed
    +		exp, err := ExpandApkWithOptions(context.Background(), f, t.TempDir(),
    +			WithMaxControlSize(-1))
    +		if err != nil {
    +			t.Fatalf("ExpandApkWithOptions() with unlimited failed: %v", err)
    +		}
    +		defer exp.Close()
    +	})
    +}
    +
    +func TestSplitWithOptions(t *testing.T) {
    +	file := "testdata/hello-wolfi-2.12.1-r0.apk"
    +
    +	t.Run("default limits work", func(t *testing.T) {
    +		f, err := os.Open(file)
    +		if err != nil {
    +			t.Fatal(err)
    +		}
    +		defer f.Close()
    +
    +		parts, err := SplitWithOptions(f)
    +		if err != nil {
    +			t.Fatalf("SplitWithOptions() with defaults failed: %v", err)
    +		}
    +		if len(parts) != 3 {
    +			t.Errorf("expected 3 parts, got %d", len(parts))
    +		}
    +	})
    +
    +	t.Run("max size limit exceeded", func(t *testing.T) {
    +		f, err := os.Open(file)
    +		if err != nil {
    +			t.Fatal(err)
    +		}
    +		defer f.Close()
    +
    +		_, err = SplitWithOptions(f, WithMaxControlSize(1))
    +
    +		var sizeErr *limitio.SizeLimitExceededError
    +		if !errors.As(err, &sizeErr) {
    +			t.Fatalf("expected limitio.SizeLimitExceededError, got %T: %v", err, err)
    +		}
    +		if sizeErr.Limit != 1 {
    +			t.Errorf("expected limit 1, got %d", sizeErr.Limit)
    +		}
    +	})
    +}
    +
    +func TestLimitedReader(t *testing.T) {
    +	t.Run("reads up to limit", func(t *testing.T) {
    +		data := []byte("hello world")
    +		r := limitio.NewLimitedReader(bytes.NewReader(data), 5)
    +
    +		buf := make([]byte, 10)
    +		n, err := r.Read(buf)
    +		if err != nil {
    +			t.Fatalf("unexpected error: %v", err)
    +		}
    +		if n != 5 {
    +			t.Errorf("expected to read 5 bytes, got %d", n)
    +		}
    +		if diff := cmp.Diff("hello", string(buf[:n])); diff != "" {
    +			t.Errorf("content mismatch (-want +got):\n%s", diff)
    +		}
    +	})
    +
    +	t.Run("returns error when limit exceeded", func(t *testing.T) {
    +		data := []byte("hello world")
    +		r := limitio.NewLimitedReader(bytes.NewReader(data), 5)
    +
    +		buf := make([]byte, 10)
    +		_, _ = r.Read(buf) // read first 5 bytes
    +
    +		_, err := r.Read(buf)
    +		var sizeErr *limitio.SizeLimitExceededError
    +		if !errors.As(err, &sizeErr) {
    +			t.Fatalf("expected limitio.SizeLimitExceededError, got %T: %v", err, err)
    +		}
    +		if sizeErr.Limit != 5 {
    +			t.Errorf("expected limit 5, got %d", sizeErr.Limit)
    +		}
    +	})
    +}
    +
    +// TestTarBombProtection tests that the size limits protect against tar bombs.
    +// A tar bomb is a tar archive with file headers claiming extremely large sizes,
    +// combined with highly compressible content (zeros) that creates a small compressed file.
    +func TestTarBombProtection(t *testing.T) {
    +	// Create a tar bomb: a file that's 10MB of zeros (compresses very small)
    +	// inside a tar inside gzip. This tests that the decompressed size limit works.
    +	const decompressedSize = 10 * 1024 * 1024 // 10 MB of zeros
    +	const sizeLimit = 1 * 1024 * 1024         // 1 MB limit
    +
    +	t.Run("ExpandApk catches tar bomb in data section", func(t *testing.T) {
    +		// Create a minimal APK-like structure with a tar containing large zero-filled file
    +		var apkData bytes.Buffer
    +
    +		// Create control section with minimal .PKGINFO
    +		var controlBuf bytes.Buffer
    +		controlGz := gzip.NewWriter(&controlBuf)
    +		controlTar := tar.NewWriter(controlGz)
    +
    +		pkginfo := []byte("pkgname = test\npkgver = 1.0.0-r0\n")
    +		require.NoError(t, controlTar.WriteHeader(&tar.Header{
    +			Name: ".PKGINFO",
    +			Mode: 0644,
    +			Size: int64(len(pkginfo)),
    +		}))
    +		_, err := controlTar.Write(pkginfo)
    +		require.NoError(t, err)
    +		require.NoError(t, controlTar.Close())
    +		require.NoError(t, controlGz.Close())
    +
    +		apkData.Write(controlBuf.Bytes())
    +
    +		// Create data section with tar bomb - large file of zeros
    +		var dataBuf bytes.Buffer
    +		dataGz := gzip.NewWriter(&dataBuf)
    +		dataTar := tar.NewWriter(dataGz)
    +
    +		// Create zeros buffer
    +		zeros := make([]byte, decompressedSize)
    +
    +		// Write a large file of zeros (compresses extremely well)
    +		require.NoError(t, dataTar.WriteHeader(&tar.Header{
    +			Name: "hugefile",
    +			Mode: 0644,
    +			Size: int64(decompressedSize),
    +		}))
    +		_, err = dataTar.Write(zeros)
    +		require.NoError(t, err)
    +		require.NoError(t, dataTar.Close())
    +		require.NoError(t, dataGz.Close())
    +
    +		apkData.Write(dataBuf.Bytes())
    +
    +		t.Logf("Tar bomb: %d bytes decompressed in tar, %d bytes compressed total (ratio: %.0fx)",
    +			decompressedSize, apkData.Len(), float64(decompressedSize)/float64(apkData.Len()))
    +
    +		// Try to expand with a small limit
    +		tmpDir := t.TempDir()
    +		_, err = ExpandApkWithOptions(context.Background(), bytes.NewReader(apkData.Bytes()), tmpDir,
    +			WithMaxDataSize(sizeLimit))
    +
    +		// Should fail with size limit error
    +		var sizeErr *limitio.SizeLimitExceededError
    +		if !errors.As(err, &sizeErr) {
    +			t.Fatalf("expected SizeLimitExceededError, got %T: %v", err, err)
    +		}
    +		t.Logf("Successfully caught tar bomb with limit %d", sizeErr.Limit)
    +	})
    +}
    +
    +// TestGzipBombProtection tests that the size limits protect against gzip bombs.
    +// A gzip bomb is highly compressible data that expands to a huge size.
    +func TestGzipBombProtection(t *testing.T) {
    +	// Create a gzip bomb: 10MB of zeros compresses to ~10KB but expands to 10MB
    +	// We'll set a limit of 1MB to trigger the protection
    +	const decompressedSize = 10 * 1024 * 1024 // 10 MB when decompressed
    +	const sizeLimit = 1 * 1024 * 1024         // 1 MB limit
    +
    +	// Create the gzip bomb data (zeros compress extremely well)
    +	var bombData bytes.Buffer
    +	gzw := gzip.NewWriter(&bombData)
    +	zeros := make([]byte, decompressedSize)
    +	_, err := gzw.Write(zeros)
    +	require.NoError(t, err)
    +	require.NoError(t, gzw.Close())
    +
    +	t.Logf("Gzip bomb: %d bytes compressed -> %d bytes decompressed (ratio: %.0fx)",
    +		bombData.Len(), decompressedSize, float64(decompressedSize)/float64(bombData.Len()))
    +
    +	// Test that NewLimitedReaderWithDefault catches the bomb
    +	t.Run("limitio catches gzip bomb", func(t *testing.T) {
    +		gzr, err := gzip.NewReader(bytes.NewReader(bombData.Bytes()))
    +		require.NoError(t, err)
    +		defer gzr.Close()
    +
    +		limited := limitio.NewLimitedReaderWithDefault(gzr, sizeLimit, sizeLimit)
    +
    +		// Try to read all data - should fail with size limit error
    +		_, err = io.ReadAll(limited)
    +
    +		var sizeErr *limitio.SizeLimitExceededError
    +		if !errors.As(err, &sizeErr) {
    +			t.Fatalf("expected SizeLimitExceededError, got %T: %v", err, err)
    +		}
    +		if sizeErr.Limit != sizeLimit {
    +			t.Errorf("expected limit %d, got %d", sizeLimit, sizeErr.Limit)
    +		}
    +	})
    +
    +	// Test with a fake APK structure containing a gzip bomb in the data section
    +	t.Run("ExpandApk catches gzip bomb in data section", func(t *testing.T) {
    +		// Create a minimal APK-like structure:
    +		// 1. Control section (small, valid)
    +		// 2. Data section (gzip bomb)
    +
    +		var apkData bytes.Buffer
    +
    +		// Create control section with minimal .PKGINFO
    +		var controlBuf bytes.Buffer
    +		controlGz := gzip.NewWriter(&controlBuf)
    +		controlTar := tar.NewWriter(controlGz)
    +
    +		pkginfo := []byte("pkgname = test\npkgver = 1.0.0-r0\n")
    +		require.NoError(t, controlTar.WriteHeader(&tar.Header{
    +			Name: ".PKGINFO",
    +			Mode: 0644,
    +			Size: int64(len(pkginfo)),
    +		}))
    +		_, err := controlTar.Write(pkginfo)
    +		require.NoError(t, err)
    +		require.NoError(t, controlTar.Close())
    +		require.NoError(t, controlGz.Close())
    +
    +		apkData.Write(controlBuf.Bytes())
    +
    +		// Create data section with gzip bomb (large file of zeros)
    +		var dataBuf bytes.Buffer
    +		dataGz := gzip.NewWriter(&dataBuf)
    +		dataTar := tar.NewWriter(dataGz)
    +
    +		// Write a large file of zeros
    +		require.NoError(t, dataTar.WriteHeader(&tar.Header{
    +			Name: "bigfile",
    +			Mode: 0644,
    +			Size: int64(decompressedSize),
    +		}))
    +		_, err = dataTar.Write(zeros)
    +		require.NoError(t, err)
    +		require.NoError(t, dataTar.Close())
    +		require.NoError(t, dataGz.Close())
    +
    +		apkData.Write(dataBuf.Bytes())
    +
    +		// Try to expand with a small limit
    +		tmpDir := t.TempDir()
    +		_, err = ExpandApkWithOptions(context.Background(), bytes.NewReader(apkData.Bytes()), tmpDir,
    +			WithMaxDataSize(sizeLimit))
    +
    +		// Should fail with size limit error
    +		var sizeErr *limitio.SizeLimitExceededError
    +		if !errors.As(err, &sizeErr) {
    +			t.Fatalf("expected SizeLimitExceededError, got %T: %v", err, err)
    +		}
    +		t.Logf("Successfully caught gzip bomb with limit %d", sizeErr.Limit)
    +	})
    +}
    
  • pkg/apk/expandapk/options.go+59 0 added
    @@ -0,0 +1,59 @@
    +// Copyright 2026 Chainguard, Inc.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package expandapk
    +
    +// DefaultMaxControlSize is the default maximum decompressed size for control sections (10 MB).
    +const DefaultMaxControlSize int64 = 10 << 20
    +
    +// DefaultMaxDataSize is the default maximum decompressed size for data sections (4 GB).
    +const DefaultMaxDataSize int64 = 4 << 30
    +
    +// Options configures the behavior of APK expansion operations.
    +type Options struct {
    +	// MaxControlSize is the maximum decompressed size for signature and control sections.
    +	// Use -1 for unlimited.
    +	MaxControlSize int64
    +
    +	// MaxDataSize is the maximum decompressed size for the data section.
    +	// Use -1 for unlimited.
    +	MaxDataSize int64
    +}
    +
    +// Option is a functional option for configuring Options.
    +type Option func(*Options) error
    +
    +// WithMaxControlSize sets the maximum decompressed size for signature and control sections.
    +func WithMaxControlSize(size int64) Option {
    +	return func(o *Options) error {
    +		o.MaxControlSize = size
    +		return nil
    +	}
    +}
    +
    +// WithMaxDataSize sets the maximum decompressed size for the data section.
    +func WithMaxDataSize(size int64) Option {
    +	return func(o *Options) error {
    +		o.MaxDataSize = size
    +		return nil
    +	}
    +}
    +
    +// DefaultOptions returns Options with default size limits.
    +func DefaultOptions() *Options {
    +	return &Options{
    +		MaxControlSize: DefaultMaxControlSize,
    +		MaxDataSize:    DefaultMaxDataSize,
    +	}
    +}
    
  • pkg/apk/expandapk/split.go+24 4 modified
    @@ -9,6 +9,8 @@ import (
     	"strings"
     
     	"github.com/klauspost/compress/gzip"
    +
    +	"chainguard.dev/apko/pkg/limitio"
     )
     
     // Split takes an APK reader and splits it into its constituent parts.
    @@ -22,6 +24,23 @@ import (
     // The signature and control sections are buffered in memory, while the data section is streamed
     // from the input reader.
     func Split(source io.Reader) ([]io.Reader, error) {
    +	return SplitWithOptions(source)
    +}
    +
    +// SplitWithOptions is like Split but accepts functional options to configure
    +// size limits for APK sections.
    +func SplitWithOptions(source io.Reader, opts ...Option) ([]io.Reader, error) {
    +	options := DefaultOptions()
    +	for _, opt := range opts {
    +		if err := opt(options); err != nil {
    +			return nil, fmt.Errorf("applying option: %w", err)
    +		}
    +	}
    +
    +	return splitWithOptions(source, options)
    +}
    +
    +func splitWithOptions(source io.Reader, options *Options) ([]io.Reader, error) {
     	parts := []io.Reader{}
     
     	br := bufio.NewReader(source)
    @@ -43,7 +62,7 @@ func Split(source io.Reader) ([]io.Reader, error) {
     
     	// Handle optional signature section.
     	if strings.HasPrefix(hdr.Name, ".SIGN.") {
    -		if _, err := io.Copy(io.Discard, gzi); err != nil {
    +		if _, err := io.Copy(io.Discard, limitio.NewLimitedReaderWithDefault(gzi, options.MaxControlSize, DefaultMaxControlSize)); err != nil {
     			return nil, fmt.Errorf("copying signature stream: %w", err)
     		}
     
    @@ -60,8 +79,8 @@ func Split(source io.Reader) ([]io.Reader, error) {
     	}
     
     	// There should always be a control section.
    -	if _, err := io.Copy(io.Discard, gzi); err != nil {
    -		return nil, fmt.Errorf("copying signature stream: %w", err)
    +	if _, err := io.Copy(io.Discard, limitio.NewLimitedReaderWithDefault(gzi, options.MaxControlSize, DefaultMaxControlSize)); err != nil {
    +		return nil, fmt.Errorf("copying control stream: %w", err)
     	}
     
     	parts = append(parts, bytes.NewReader(buf.Bytes()))
    @@ -71,7 +90,8 @@ func Split(source io.Reader) ([]io.Reader, error) {
     	}
     
     	// And the rest is the data section.
    -	parts = append(parts, br)
    +	dataReader := limitio.NewLimitedReaderWithDefault(br, options.MaxDataSize, DefaultMaxDataSize)
    +	parts = append(parts, dataReader)
     
     	return parts, nil
     }
    
  • pkg/build/build.go+6 0 modified
    @@ -282,6 +282,12 @@ func New(ctx context.Context, fs apkfs.FullFS, opts ...Option) (*Context, error)
     		apk.WithAuthenticator(bc.o.Auth),
     		apk.WithTransport(bc.o.Transport),
     		apk.WithPackageGetter(bc.o.PackageGetter),
    +		apk.WithSizeLimits(&apk.SizeLimits{
    +			APKIndexDecompressedMaxSize: bc.o.SizeLimits.APKIndexDecompressedMaxSize,
    +			APKControlMaxSize:           bc.o.SizeLimits.APKControlMaxSize,
    +			APKDataMaxSize:              bc.o.SizeLimits.APKDataMaxSize,
    +			HTTPResponseMaxSize:         bc.o.SizeLimits.HTTPResponseMaxSize,
    +		}),
     	}
     	// only try to pass the cache dir if one of the following is true:
     	// - the user has explicitly set a cache dir
    
  • pkg/build/options.go+9 0 modified
    @@ -26,6 +26,7 @@ import (
     	"chainguard.dev/apko/pkg/apk/apk"
     	"chainguard.dev/apko/pkg/apk/auth"
     	"chainguard.dev/apko/pkg/build/types"
    +	"chainguard.dev/apko/pkg/options"
     	"chainguard.dev/apko/pkg/sbom/generator"
     
     	"github.com/chainguard-dev/clog"
    @@ -257,3 +258,11 @@ func WithPackageGetter(pg apk.PackageGetter) Option {
     		return nil
     	}
     }
    +
    +// WithSizeLimits sets the size limits for various operations.
    +func WithSizeLimits(limits options.SizeLimits) Option {
    +	return func(bc *Context) error {
    +		bc.o.SizeLimits = limits
    +		return nil
    +	}
    +}
    
  • pkg/cpio/layer.go+2 0 modified
    @@ -24,13 +24,15 @@ import (
     	"github.com/u-root/u-root/pkg/cpio"
     )
     
    +// FromLayer converts a container layer to CPIO format.
     func FromLayer(layer v1.Layer, dest io.Writer) error {
     	// Open the filesystem layer to walk through the file.
     	u, err := layer.Uncompressed()
     	if err != nil {
     		return err
     	}
     	defer u.Close()
    +
     	tarReader := tar.NewReader(u)
     
     	w := cpio.NewDedupWriter(cpio.Newc.Writer(dest))
    
  • pkg/limitio/limitio.go+81 0 added
    @@ -0,0 +1,81 @@
    +// Copyright 2026 Chainguard, Inc.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +// Package limitio provides size-limited I/O operations to prevent unbounded reads.
    +package limitio
    +
    +import (
    +	"fmt"
    +	"io"
    +)
    +
    +// SizeLimitExceededError is returned when a read exceeds its configured size limit.
    +type SizeLimitExceededError struct {
    +	Limit int64
    +}
    +
    +func (e *SizeLimitExceededError) Error() string {
    +	return fmt.Sprintf("size limit exceeded: limit is %d bytes", e.Limit)
    +}
    +
    +// LimitedReader wraps io.LimitedReader and returns a SizeLimitExceededError when the
    +// limit is exceeded. Unlike io.LimitedReader which returns EOF, this returns a
    +// specific error to indicate the limit was exceeded.
    +type LimitedReader struct {
    +	lr       *io.LimitedReader
    +	limit    int64 // original limit for error messages
    +	exceeded bool  // true if we've determined the limit was exceeded
    +}
    +
    +// NewLimitedReader creates a new LimitedReader that will return a SizeLimitExceededError
    +// if more than limit bytes are read from r.
    +//   - If limit == -1: returns the reader unwrapped (unlimited)
    +func NewLimitedReader(r io.Reader, limit int64) io.Reader {
    +	if limit == -1 {
    +		return r
    +	}
    +	return &LimitedReader{
    +		lr:    &io.LimitedReader{R: r, N: limit},
    +		limit: limit,
    +	}
    +}
    +
    +// NewLimitedReaderWithDefault creates a LimitedReader with special handling for default values:
    +//   - If limit == -1: returns the reader unwrapped (unlimited)
    +//   - If limit == 0: uses defaultLimit
    +//   - Otherwise: uses the provided limit
    +func NewLimitedReaderWithDefault(r io.Reader, limit, defaultLimit int64) io.Reader {
    +	if limit == 0 {
    +		limit = defaultLimit
    +	}
    +	return NewLimitedReader(r, limit)
    +}
    +
    +func (l *LimitedReader) Read(p []byte) (n int, err error) {
    +	if l.exceeded {
    +		return 0, &SizeLimitExceededError{Limit: l.limit}
    +	}
    +	n, err = l.lr.Read(p)
    +	if err == io.EOF && l.lr.N <= 0 {
    +		// LimitedReader returns EOF when limit is reached, but we need to check
    +		// if there's more data available to determine if the limit was exceeded.
    +		// Try to read one more byte from the underlying reader.
    +		var buf [1]byte
    +		if nn, _ := l.lr.R.Read(buf[:]); nn > 0 {
    +			l.exceeded = true
    +			return n, &SizeLimitExceededError{Limit: l.limit}
    +		}
    +	}
    +	return n, err
    +}
    
  • pkg/options/options.go+27 0 modified
    @@ -28,6 +28,31 @@ import (
     	"chainguard.dev/apko/pkg/sbom/generator"
     )
     
    +// SizeLimits configures maximum sizes for various operations to prevent unbounded reads.
    +// A value of 0 means use the default, and a value of -1 means no limit.
    +type SizeLimits struct {
    +	// APKIndexDecompressedMaxSize is the maximum decompressed size for APKINDEX archives (default: 100 MB).
    +	// This protects against gzip bombs.
    +	APKIndexDecompressedMaxSize int64 `json:"apkIndexDecompressedMaxSize,omitempty"`
    +	// APKControlMaxSize is the maximum decompressed size for APK control sections (default: 10 MB).
    +	APKControlMaxSize int64 `json:"apkControlMaxSize,omitempty"`
    +	// APKDataMaxSize is the maximum decompressed size for APK data sections (default: 4 GB).
    +	// This protects against gzip bombs.
    +	APKDataMaxSize int64 `json:"apkDataMaxSize,omitempty"`
    +	// HTTPResponseMaxSize is the maximum size for HTTP responses (default: 2 GB).
    +	HTTPResponseMaxSize int64 `json:"httpResponseMaxSize,omitempty"`
    +}
    +
    +// DefaultSizeLimits returns SizeLimits with sensible default values.
    +func DefaultSizeLimits() SizeLimits {
    +	return SizeLimits{
    +		APKIndexDecompressedMaxSize: 100 << 20, // 100 MB
    +		APKControlMaxSize:           10 << 20,  // 10 MB
    +		APKDataMaxSize:              4 << 30,   // 4 GB
    +		HTTPResponseMaxSize:         2 << 30,   // 2 GB
    +	}
    +}
    +
     type Options struct {
     	WithVCS bool `json:"withVCS,omitempty"`
     	// ImageConfigFile might, but does not have to be a filename. It might be any abstract configuration identifier.
    @@ -59,6 +84,7 @@ type Options struct {
     	IgnoreSignatures        bool                  `json:"ignoreSignatures,omitempty"`
     	Transport               http.RoundTripper     `json:"-"`
     	PackageGetter           apk.PackageGetter     `json:"-"`
    +	SizeLimits              SizeLimits            `json:"sizeLimits,omitempty"`
     }
     
     type Auth struct{ User, Pass string }
    @@ -68,6 +94,7 @@ var Default = Options{
     	SourceDateEpoch: time.Unix(0, 0).UTC(),
     	Auth:            auth.DefaultAuthenticators,
     	SharedCache:     apk.NewCache(false),
    +	SizeLimits:      DefaultSizeLimits(),
     }
     
     // Tempdir returns the temporary directory where apko will create
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

4

News mentions

0

No linked articles in our index yet.