VYPR
High severityOSV Advisory· Published Dec 18, 2025· Updated Dec 19, 2025

CVE-2025-68388

CVE-2025-68388

Description

Allocation of resources without limits or throttling (CWE-770) allows an unauthenticated remote attacker to cause excessive allocation (CAPEC-130) of memory and CPU via the integration of malicious IPv4 fragments, leading to a degradation in Packetbeat.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
github.com/elastic/beatsGo
>= 8.6.0, < 8.19.98.19.9
github.com/elastic/beatsGo
>= 9.0.0, < 9.1.99.1.9
github.com/elastic/beatsGo
>= 9.2.0, < 9.2.39.2.3
github.com/elastic/beats/v7Go
< 7.0.0-alpha2.0.20251209162832-28cfc80d2f4e7.0.0-alpha2.0.20251209162832-28cfc80d2f4e

Affected products

1

Patches

1
28cfc80d2f4e

packetbeat: Rework ip fragmentation and make it bound (#47970) (#47989)

https://github.com/elastic/beatsmergify[bot]Dec 9, 2025via ghsa
3 files changed · +134 32
  • changelog/fragments/1765279149-ipfrag2.yaml+45 0 added
    @@ -0,0 +1,45 @@
    +# REQUIRED
    +# Kind can be one of:
    +# - breaking-change: a change to previously-documented behavior
    +# - deprecation: functionality that is being removed in a later release
    +# - bug-fix: fixes a problem in a previous version
    +# - enhancement: extends functionality but does not break or fix existing behavior
    +# - feature: new functionality
    +# - known-issue: problems that we are aware of in a given version
    +# - security: impacts on the security of a product or a user’s deployment.
    +# - upgrade: important information for someone upgrading from a prior version
    +# - other: does not fit into any of the other categories
    +kind: feature
    +
    +# REQUIRED for all kinds
    +# Change summary; a 80ish characters long description of the change.
    +summary: ipfrag2
    +
    +# REQUIRED for breaking-change, deprecation, known-issue
    +# Long description; in case the summary is not enough to describe the change
    +# this field accommodate a description without length limits.
    +# description:
    +
    +# REQUIRED for breaking-change, deprecation, known-issue
    +# impact:
    +
    +# REQUIRED for breaking-change, deprecation, known-issue
    +# action:
    +
    +# REQUIRED for all kinds
    +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc.
    +component:
    +
    +# AUTOMATED
    +# OPTIONAL to manually add other PR URLs
    +# PR URL: A link the PR that added the changeset.
    +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added.
    +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number.
    +# Please provide it if you are adding a fragment for a different PR.
    +# pr: https://github.com/owner/repo/1234
    +
    +# AUTOMATED
    +# OPTIONAL to manually add other issue URLs
    +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of).
    +# If not present is automatically filled by the tooling with the issue linked to the PR number.
    +# issue: https://github.com/owner/repo/1234
    
  • packetbeat/decoder/decoder.go+59 31 modified
    @@ -86,7 +86,7 @@ func New(f *flows.Flows, datalink layers.LinkType, icmp4 icmp.ICMPv4Processor, i
     		flows:     f,
     		decoders:  make(map[gopacket.LayerType]gopacket.DecodingLayer),
     		icmp4Proc: icmp4, icmp6Proc: icmp6, tcpProc: tcp, udpProc: udp,
    -		fragments:          fragmentCache{collected: make(map[uint16]fragments)},
    +		fragments:          fragmentCache{collected: make(map[fragmentKey]fragments), lastPurge: time.Now()},
     		allowMismatchedEth: allowMismatchedEth,
     		logger:             logp.NewLogger("decoder"),
     	}
    @@ -200,17 +200,27 @@ func (d *Decoder) OnPacket(data []byte, ci *gopacket.CaptureInfo) {
     			} else {
     				now := time.Now()
     				const offsetMask = 1<<13 - 1 // https://datatracker.ietf.org/doc/html/rfc791#section-3.1
    +				key := fragmentKey{
    +					proto: ipv4.Protocol,
    +					id:    ipv4.Id,
    +				}
    +				if src := ipv4.SrcIP.To4(); src != nil {
    +					copy(key.src[:], src)
    +				}
    +				if dst := ipv4.DstIP.To4(); dst != nil {
    +					copy(key.dst[:], dst)
    +				}
     				f := fragment{
     					id:     ipv4.Id,
     					offset: int(ipv4.FragOffset&offsetMask) * 8,
     					data:   append(data[:0:0], data...), // Ensure that we are not aliasing data.
     					more:   ipv4.Flags&layers.IPv4MoreFragments != 0,
    -					expire: now.Add(time.Duration(ipv4.TTL) * time.Second),
    +					expire: now.Add(fragmentHold),
     				}
     				var more bool
    -				data, more, err = d.fragments.add(now, f)
    +				data, more, err = d.fragments.add(now, key, f)
     				if err != nil {
    -					d.logger.Warnf("%v src=%s dst=%s", err, ipv4.SrcIP, ipv4.DstIP)
    +					d.logger.Debugf("%v src=%s dst=%s", err, ipv4.SrcIP, ipv4.DstIP)
     					return
     				}
     				if more {
    @@ -260,17 +270,27 @@ func (d *Decoder) OnPacket(data []byte, ci *gopacket.CaptureInfo) {
     
     // fragmentCache is a TTL aware cache of IPv4 fragments to reassemble.
     type fragmentCache struct {
    -	// oldest is the expiry time of the oldest fragment.
    -	oldest time.Time
    +	// lastPurge is the last time we attempted to purge expired fragments
    +	lastPurge time.Time
     
     	// collected is the collections of fragments keyed on their
     	// IPv4 packet ID field.
    -	collected map[uint16]fragments
    +	collected map[fragmentKey]fragments
    +}
    +
    +type fragmentKey struct {
    +	src   [4]byte
    +	dst   [4]byte
    +	proto layers.IPProtocol
    +	id    uint16
     }
     
    -// maxReassemble is the maximum size that a collection of fragmented
    -// packets will be reassembled to.
    -const maxReassemble = 1e5
    +const (
    +	ipMaxLength        = 65535
    +	fragmentHold       = time.Second
    +	fragmentMaxPerFlow = 64
    +	fragmentMaxSets    = 512
    +)
     
     // add adds a new fragment to the cache. The value of now is used to expire fragments
     // and collections of fragments. If the fragment completes a set of fragments for
    @@ -279,21 +299,32 @@ const maxReassemble = 1e5
     // fragments ID set, more is returned true. Expiries and oversize reassemblies are
     // signaled via the returned error.
     // The cache is purged of expired collections before add returns.
    -func (c *fragmentCache) add(now time.Time, f fragment) (data []byte, more bool, err error) {
    -	defer c.purge(now)
    +func (c *fragmentCache) add(now time.Time, k fragmentKey, f fragment) (data []byte, more bool, err error) {
    +	c.maybePurge(now)
     
    -	collected, ok := c.collected[f.id]
    -	if ok && !collected.expire.IsZero() && now.After(collected.expire) {
    -		delete(c.collected, f.id)
    -		return nil, false, fmt.Errorf("fragments expired before reassembly ID=%d", f.id)
    +	collected, ok := c.collected[k]
    +	if !ok {
    +		collected.expire = f.expire
     	}
    -	if c.oldest.After(f.expire) {
    -		c.oldest = f.expire
    +
    +	// If this is a new tuple and we are at our limit of tuples, bail
    +	if len(c.collected)+1 >= fragmentMaxSets {
    +		delete(c.collected, k)
    +		return nil, false, fmt.Errorf("too many active fragment sets")
     	}
    -	if collected.expire.IsZero() || collected.expire.After(f.expire) {
    -		collected.expire = f.expire
    +	// If this tuple already has all fragments, bail
    +	if len(collected.fragments)+1 >= fragmentMaxPerFlow {
    +		delete(c.collected, k)
    +		return nil, false, fmt.Errorf("fragment limit exceeded for flow ID=%d", f.id)
     	}
    +	// If the datagram would exceed the max, bail
    +	if collected.bytes+len(f.data) >= ipMaxLength {
    +		delete(c.collected, k)
    +		return nil, false, fmt.Errorf("fragment bytes limit exceeded for flow ID=%d", f.id)
    +	}
    +
     	collected.fragments = append(collected.fragments, f)
    +	collected.bytes += len(f.data)
     
     	// Check whether we have all the fragments we need to do a reassembly.
     	// Do the least amount of work possible
    @@ -318,37 +349,33 @@ func (c *fragmentCache) add(now time.Time, f fragment) (data []byte, more bool,
     		}
     	}
     	if more {
    -		c.collected[f.id] = collected
    +		c.collected[k] = collected
     		return nil, true, nil
     	}
     
     	// Drop the fragments and do the reassembly.
    -	delete(c.collected, f.id)
    +	delete(c.collected, k)
     	data = collected.fragments[0].data
     	for _, f := range collected.fragments[1:] {
    -		if len(data)+len(f.data) > maxReassemble {
    -			return nil, false, fmt.Errorf("packet reconstruction would exceed limit ID=%d", f.id)
    -		}
     		data = append(data, f.data...)
     	}
     	return data, false, nil
     }
     
     // purge performs a cache expiry purge, removing all collected fragments
     // that expired before now.
    -func (c *fragmentCache) purge(now time.Time) {
    -	if c.oldest.After(now) {
    +func (c *fragmentCache) maybePurge(now time.Time) {
    +	delta := now.Sub(c.lastPurge)
    +	if delta < fragmentHold {
     		return
     	}
    -	c.oldest = now
    +	c.lastPurge = now
    +
     	for id, coll := range c.collected {
     		if now.After(coll.expire) {
     			delete(c.collected, id)
     			continue
     		}
    -		if c.oldest.After(coll.expire) {
    -			c.oldest = coll.expire
    -		}
     	}
     }
     
    @@ -357,6 +384,7 @@ type fragments struct {
     	expire    time.Time
     	fragments []fragment
     	haveFinal bool
    +	bytes     int
     }
     
     // fragment is an IPv4 packet fragment.
    
  • packetbeat/decoder/decoder_test.go+30 1 modified
    @@ -22,6 +22,7 @@ package decoder
     import (
     	"strings"
     	"testing"
    +	"time"
     
     	"github.com/elastic/beats/v7/packetbeat/flows"
     	"github.com/elastic/beats/v7/packetbeat/protos"
    @@ -235,9 +236,13 @@ func TestFragment(t *testing.T) {
     	var payload []byte
     	t.Run("in_order", func(t *testing.T) {
     		d, tcp, udp := newTestDecoder(t)
    -		for _, p := range packets {
    +		for i, p := range packets {
     			d.OnPacket(p.Data(), &p.Metadata().CaptureInfo)
    +			if i == 0 {
    +				assert.Equal(t, len(d.fragments.collected), 1, "wrong fragment count")
    +			}
     		}
    +		assert.Equal(t, len(d.fragments.collected), 0, "wrong fragment count")
     
     		// Details confirmed by inspection of the test pcap with Wireshark.
     		assert.Nil(t, tcp.pkt, "unexpected non-nil TCP packet")
    @@ -268,4 +273,28 @@ func TestFragment(t *testing.T) {
     		assert.Equal(t, uint16(35873), udp.pkt.Tuple.DstPort, "unexpected destination port")
     		assert.Equal(t, udp.pkt.Payload, payload, "unexpected payload")
     	})
    +
    +	t.Run("expiry_with_hole", func(t *testing.T) {
    +		d, tcp, udp := newTestDecoder(t)
    +		// we're only gonna insert 2/3 packets
    +		assert.Equal(t, len(d.fragments.collected), 0, "UUU wrong fragment count")
    +		d.OnPacket(packets[0].Data(), &packets[0].Metadata().CaptureInfo)
    +		d.OnPacket(packets[1].Data(), &packets[1].Metadata().CaptureInfo)
    +		assert.Equal(t, 1, len(d.fragments.collected), "LLL wrong fragment count")
    +		d.fragments.maybePurge(time.Now())
    +		assert.Equal(t, 1, len(d.fragments.collected), "YYY wrong fragment count")
    +		// Now find the actual key, and make sure the 2 fragments are there
    +		for _, col := range d.fragments.collected {
    +			assert.Equal(t, 2, len(col.fragments))
    +			assert.False(t, col.haveFinal)
    +		}
    +		// Sleep so that the next time we process a complete packet, the cache is flushed of the old entries.
    +		time.Sleep(fragmentHold)
    +		d.fragments.maybePurge(time.Now())
    +		assert.Equal(t, len(d.fragments.collected), 0, "DDD wrong fragment count")
    +
    +		// Details confirmed by inspection of the test pcap with Wireshark.
    +		assert.Nil(t, tcp.pkt, "unexpected non-nil TCP packet")
    +		assert.Nil(t, udp.pkt, "unexpected non-nil UDP packet")
    +	})
     }
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

4

News mentions

0

No linked articles in our index yet.