VYPR
Moderate severityNVD Advisory· Published Sep 3, 2025· Updated Sep 4, 2025

Netty's BrotliDecoder is vulnerable to DoS via zip bomb style attack

CVE-2025-58057

Description

Netty is an asynchronous event-driven network application framework for rapid development of maintainable high performance protocol servers & clients. In netty-codec-compression versions 4.1.124.Final and below, and netty-codec versions 4.2.4.Final and below, when supplied with specially crafted input, BrotliDecoder and certain other decompression decoders will allocate a large number of reachable byte buffers, which can lead to denial of service. BrotliDecoder.decompress has no limit in how often it calls pull, decompressing data 64K bytes at a time. The buffers are saved in the output list, and remain reachable until OOM is hit. This is fixed in versions 4.1.125.Final of netty-codec and 4.2.5.Final of netty-codec-compression.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
io.netty:netty-codec-compressionMaven
>= 4.2.0.Alpha1, < 4.2.5.Final4.2.5.Final
io.netty:netty-codecMaven
< 4.1.125.Final4.1.125.Final

Affected products

1
  • Range: <= 4.1.124.Final, < 4.1.125.Final

Patches

1
9d804c54ce96

Merge commit from fork

https://github.com/netty/nettyNorman MaurerSep 3, 2025via ghsa
13 files changed · +603 248
  • codec-compression/src/main/java/io/netty/handler/codec/compression/BrotliDecoder.java+21 8 modified
    @@ -18,7 +18,6 @@
     
     import com.aayushatharva.brotli4j.decoder.DecoderJNI;
     import io.netty.buffer.ByteBuf;
    -import io.netty.buffer.ByteBufAllocator;
     import io.netty.channel.ChannelHandlerContext;
     import io.netty.handler.codec.ByteToMessageDecoder;
     import io.netty.util.internal.ObjectUtil;
    @@ -48,6 +47,7 @@ private enum State {
         private final int inputBufferSize;
         private DecoderJNI.Wrapper decoder;
         private boolean destroyed;
    +    private boolean needsRead;
     
         /**
          * Creates a new BrotliDecoder with a default 8kB input buffer
    @@ -64,15 +64,16 @@ public BrotliDecoder(int inputBufferSize) {
             this.inputBufferSize = ObjectUtil.checkPositive(inputBufferSize, "inputBufferSize");
         }
     
    -    private ByteBuf pull(ByteBufAllocator alloc) {
    +    private void forwardOutput(ChannelHandlerContext ctx) {
             ByteBuffer nativeBuffer = decoder.pull();
             // nativeBuffer actually wraps brotli's internal buffer so we need to copy its content
    -        ByteBuf copy = alloc.buffer(nativeBuffer.remaining());
    +        ByteBuf copy = ctx.alloc().buffer(nativeBuffer.remaining());
             copy.writeBytes(nativeBuffer);
    -        return copy;
    +        needsRead = false;
    +        ctx.fireChannelRead(copy);
         }
     
    -    private State decompress(ByteBuf input, List<Object> output, ByteBufAllocator alloc) {
    +    private State decompress(ChannelHandlerContext ctx, ByteBuf input) {
             for (;;) {
                 switch (decoder.getStatus()) {
                     case DONE:
    @@ -84,7 +85,7 @@ private State decompress(ByteBuf input, List<Object> output, ByteBufAllocator al
     
                     case NEEDS_MORE_INPUT:
                         if (decoder.hasOutput()) {
    -                        output.add(pull(alloc));
    +                        forwardOutput(ctx);
                         }
     
                         if (!input.isReadable()) {
    @@ -98,7 +99,7 @@ private State decompress(ByteBuf input, List<Object> output, ByteBufAllocator al
                         break;
     
                     case NEEDS_MORE_OUTPUT:
    -                    output.add(pull(alloc));
    +                    forwardOutput(ctx);
                         break;
     
                     default:
    @@ -123,6 +124,7 @@ public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
     
         @Override
         protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
    +        needsRead = true;
             if (destroyed) {
                 // Skip data received after finished.
                 in.skipBytes(in.readableBytes());
    @@ -134,7 +136,7 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) t
             }
     
             try {
    -            State state = decompress(in, out, ctx.alloc());
    +            State state = decompress(ctx, in);
                 if (state == State.DONE) {
                     destroy();
                 } else if (state == State.ERROR) {
    @@ -170,4 +172,15 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception {
                 super.channelInactive(ctx);
             }
         }
    +
    +    @Override
    +    public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
    +        // Discard bytes of the cumulation buffer if needed.
    +        discardSomeReadBytes();
    +
    +        if (needsRead && !ctx.channel().config().isAutoRead()) {
    +            ctx.read();
    +        }
    +        ctx.fireChannelReadComplete();
    +    }
     }
    
  • codec-compression/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java+29 5 modified
    @@ -57,6 +57,7 @@ private enum GzipState {
         private GzipState gzipState = GzipState.HEADER_START;
         private int flags = -1;
         private int xlen = -1;
    +    private boolean needsRead;
     
         private volatile boolean finished;
     
    @@ -195,6 +196,7 @@ public boolean isClosed() {
     
         @Override
         protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
    +        needsRead = true;
             if (finished) {
                 // Skip data received after finished.
                 in.skipBytes(in.readableBytes());
    @@ -263,7 +265,15 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) t
                         if (crc != null) {
                             crc.update(outArray, outIndex, outputLength);
                         }
    -                } else  if (inflater.needsDictionary()) {
    +                    if (maxAllocation == 0) {
    +                        // If we don't limit the maximum allocations we should just
    +                        // forward the buffer directly.
    +                        ByteBuf buffer = decompressed;
    +                        decompressed = null;
    +                        needsRead = false;
    +                        ctx.fireChannelRead(buffer);
    +                    }
    +                } else if (inflater.needsDictionary()) {
                         if (dictionary == null) {
                             throw new DecompressionException(
                                     "decompression failure, unable to set dictionary as non was specified");
    @@ -292,10 +302,13 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) t
             } catch (DataFormatException e) {
                 throw new DecompressionException("decompression failure", e);
             } finally {
    -            if (decompressed.isReadable()) {
    -                out.add(decompressed);
    -            } else {
    -                decompressed.release();
    +            if (decompressed != null) {
    +                if (decompressed.isReadable()) {
    +                    needsRead = false;
    +                    ctx.fireChannelRead(decompressed);
    +                } else {
    +                    decompressed.release();
    +                }
                 }
             }
         }
    @@ -517,4 +530,15 @@ private static boolean looksLikeZlib(short cmf_flg) {
             return (cmf_flg & 0x7800) == 0x7800 &&
                     cmf_flg % 31 == 0;
         }
    +
    +    @Override
    +    public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
    +        // Discard bytes of the cumulation buffer if needed.
    +        discardSomeReadBytes();
    +
    +        if (needsRead && !ctx.channel().config().isAutoRead()) {
    +            ctx.read();
    +        }
    +        ctx.fireChannelReadComplete();
    +    }
     }
    
  • codec-compression/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java+28 4 modified
    @@ -28,6 +28,7 @@ public class JZlibDecoder extends ZlibDecoder {
     
         private final Inflater z = new Inflater();
         private byte[] dictionary;
    +    private boolean needsRead;
         private volatile boolean finished;
     
         /**
    @@ -131,6 +132,7 @@ public boolean isClosed() {
     
         @Override
         protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
    +        needsRead = true;
             if (finished) {
                 // Skip data received after finished.
                 in.skipBytes(in.readableBytes());
    @@ -172,6 +174,14 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) t
                         int outputLength = z.next_out_index - oldNextOutIndex;
                         if (outputLength > 0) {
                             decompressed.writerIndex(decompressed.writerIndex() + outputLength);
    +                        if (maxAllocation == 0) {
    +                            // If we don't limit the maximum allocations we should just
    +                            // forward the buffer directly.
    +                            ByteBuf buffer = decompressed;
    +                            decompressed = null;
    +                            needsRead = false;
    +                            ctx.fireChannelRead(buffer);
    +                        }
                         }
     
                         switch (resultCode) {
    @@ -202,10 +212,13 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) t
                     }
                 } finally {
                     in.skipBytes(z.next_in_index - oldNextInIndex);
    -                if (decompressed.isReadable()) {
    -                    out.add(decompressed);
    -                } else {
    -                    decompressed.release();
    +                if (decompressed != null) {
    +                    if (decompressed.isReadable()) {
    +                        needsRead = false;
    +                        ctx.fireChannelRead(decompressed);
    +                    } else {
    +                        decompressed.release();
    +                    }
                     }
                 }
             } finally {
    @@ -218,6 +231,17 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) t
             }
         }
     
    +    @Override
    +    public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
    +        // Discard bytes of the cumulation buffer if needed.
    +        discardSomeReadBytes();
    +
    +        if (needsRead && !ctx.channel().config().isAutoRead()) {
    +            ctx.read();
    +        }
    +        ctx.fireChannelReadComplete();
    +    }
    +
         @Override
         protected void decompressionBufferExhausted(ByteBuf buffer) {
             finished = true;
    
  • codec-compression/src/main/java/io/netty/handler/codec/compression/ZstdDecoder.java+45 6 modified
    @@ -15,10 +15,12 @@
      */
     package io.netty.handler.codec.compression;
     
    +import com.github.luben.zstd.ZstdIOException;
     import com.github.luben.zstd.ZstdInputStreamNoFinalizer;
     import io.netty.buffer.ByteBuf;
     import io.netty.channel.ChannelHandlerContext;
     import io.netty.handler.codec.ByteToMessageDecoder;
    +import io.netty.util.internal.ObjectUtil;
     
     import java.io.Closeable;
     import java.io.IOException;
    @@ -39,9 +41,11 @@ public final class ZstdDecoder extends ByteToMessageDecoder {
             }
         }
     
    +    private final int maximumAllocationSize;
         private final MutableByteBufInputStream inputStream = new MutableByteBufInputStream();
         private ZstdInputStreamNoFinalizer zstdIs;
     
    +    private boolean needsRead;
         private State currentState = State.DECOMPRESS_DATA;
     
         /**
    @@ -52,31 +56,55 @@ private enum State {
             CORRUPTED
         }
     
    +    public ZstdDecoder() {
    +        this(4 * 1024 * 1024);
    +    }
    +
    +    public ZstdDecoder(int maximumAllocationSize) {
    +        this.maximumAllocationSize = ObjectUtil.checkPositiveOrZero(maximumAllocationSize, "maximumAllocationSize");
    +    }
    +
         @Override
         protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
    +        needsRead = true;
             try {
                 if (currentState == State.CORRUPTED) {
                     in.skipBytes(in.readableBytes());
    +
                     return;
                 }
    -            final int compressedLength = in.readableBytes();
    -
                 inputStream.current = in;
     
                 ByteBuf outBuffer = null;
    +
    +            final int compressedLength = in.readableBytes();
                 try {
    +                long uncompressedLength;
    +                if (in.isDirect()) {
    +                    uncompressedLength = com.github.luben.zstd.Zstd.getFrameContentSize(
    +                            CompressionUtil.safeNioBuffer(in, in.readerIndex(), in.readableBytes()));
    +                } else {
    +                    uncompressedLength = com.github.luben.zstd.Zstd.getFrameContentSize(
    +                            in.array(), in.readerIndex() + in.arrayOffset(), in.readableBytes());
    +                }
    +                if (uncompressedLength <= 0) {
    +                    // Let's start with the compressedLength * 2 as often we will not have everything
    +                    // we need in the in buffer and don't want to reserve too much memory.
    +                    uncompressedLength = compressedLength * 2L;
    +                }
    +
                     int w;
                     do {
                         if (outBuffer == null) {
    -                        // Let's start with the compressedLength * 2 as often we will not have everything
    -                        // we need in the in buffer and don't want to reserve too much memory.
    -                        outBuffer = ctx.alloc().heapBuffer(compressedLength * 2);
    +                        outBuffer = ctx.alloc().heapBuffer((int) (maximumAllocationSize == 0 ?
    +                                uncompressedLength : Math.min(maximumAllocationSize, uncompressedLength)));
                         }
                         do {
                             w = outBuffer.writeBytes(zstdIs, outBuffer.writableBytes());
                         } while (w != -1 && outBuffer.isWritable());
                         if (outBuffer.isReadable()) {
    -                        out.add(outBuffer);
    +                        needsRead = false;
    +                        ctx.fireChannelRead(outBuffer);
                             outBuffer = null;
                         }
                     } while (w != -1);
    @@ -93,6 +121,17 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) t
             }
         }
     
    +    @Override
    +    public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
    +        // Discard bytes of the cumulation buffer if needed.
    +        discardSomeReadBytes();
    +
    +        if (needsRead && !ctx.channel().config().isAutoRead()) {
    +            ctx.read();
    +        }
    +        ctx.fireChannelReadComplete();
    +    }
    +
         @Override
         public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
             super.handlerAdded(ctx);
    
  • codec-compression/src/test/java/io/netty/handler/codec/compression/AbstractIntegrationTest.java+63 0 modified
    @@ -17,7 +17,10 @@
     
     import io.netty.buffer.ByteBuf;
     import io.netty.buffer.CompositeByteBuf;
    +import io.netty.buffer.PooledByteBufAllocator;
     import io.netty.buffer.Unpooled;
    +import io.netty.channel.ChannelHandlerContext;
    +import io.netty.channel.ChannelInboundHandlerAdapter;
     import io.netty.channel.embedded.EmbeddedChannel;
     import io.netty.util.CharsetUtil;
     import io.netty.util.ReferenceCountUtil;
    @@ -27,6 +30,7 @@
     import java.util.Arrays;
     import java.util.Random;
     
    +import static org.assertj.core.api.Assertions.assertThat;
     import static org.junit.jupiter.api.Assertions.assertEquals;
     import static org.junit.jupiter.api.Assertions.assertFalse;
     import static org.junit.jupiter.api.Assertions.assertTrue;
    @@ -179,4 +183,63 @@ protected void testIdentity(final byte[] data, boolean heapBuffer) {
                 closeChannels();
             }
         }
    +
    +    @Test
    +    public void testHugeDecompress() {
    +        int chunkSize = 1024 * 1024;
    +        int numberOfChunks = 256;
    +        int memoryLimit = chunkSize * 128;
    +
    +        EmbeddedChannel compressChannel = createEncoder();
    +        ByteBuf compressed = compressChannel.alloc().buffer();
    +        for (int i = 0; i <= numberOfChunks; i++) {
    +            if (i < numberOfChunks) {
    +                ByteBuf in = compressChannel.alloc().buffer(chunkSize);
    +                in.writeZero(chunkSize);
    +                compressChannel.writeOutbound(in);
    +            } else {
    +                compressChannel.close();
    +            }
    +            while (true) {
    +                ByteBuf buf = compressChannel.readOutbound();
    +                if (buf == null) {
    +                    break;
    +                }
    +                compressed.writeBytes(buf);
    +                buf.release();
    +            }
    +        }
    +
    +        PooledByteBufAllocator allocator = new PooledByteBufAllocator(false);
    +
    +        HugeDecompressIncomingHandler endHandler = new HugeDecompressIncomingHandler(memoryLimit);
    +        EmbeddedChannel decompressChannel = createDecoder();
    +        decompressChannel.pipeline().addLast(endHandler);
    +        decompressChannel.config().setAllocator(allocator);
    +        decompressChannel.writeInbound(compressed);
    +        decompressChannel.finishAndReleaseAll();
    +        assertEquals((long) chunkSize * numberOfChunks, endHandler.total);
    +    }
    +
    +    private static final class HugeDecompressIncomingHandler extends ChannelInboundHandlerAdapter {
    +        final int memoryLimit;
    +        long total;
    +
    +        HugeDecompressIncomingHandler(int memoryLimit) {
    +            this.memoryLimit = memoryLimit;
    +        }
    +
    +        @Override
    +        public void channelRead(ChannelHandlerContext ctx, Object msg) {
    +            ByteBuf buf = (ByteBuf) msg;
    +            total += buf.readableBytes();
    +            try {
    +                PooledByteBufAllocator allocator = (PooledByteBufAllocator) ctx.alloc();
    +                assertThat(allocator.metric().usedHeapMemory()).isLessThan(memoryLimit);
    +                assertThat(allocator.metric().usedDirectMemory()).isLessThan(memoryLimit);
    +            } finally {
    +                buf.release();
    +            }
    +        }
    +    }
     }
    
  • codec-compression/src/test/java/io/netty/handler/codec/compression/BrotliIntegrationTest.java+37 0 added
    @@ -0,0 +1,37 @@
    +/*
    + * Copyright 2014 The Netty Project
    + *
    + * The Netty Project licenses this file to you under the Apache License,
    + * version 2.0 (the "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at:
    + *
    + *   https://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    + * License for the specific language governing permissions and limitations
    + * under the License.
    + */
    +package io.netty.handler.codec.compression;
    +
    +import io.netty.buffer.ByteBuf;
    +import io.netty.buffer.CompositeByteBuf;
    +import io.netty.buffer.Unpooled;
    +import io.netty.channel.embedded.EmbeddedChannel;
    +
    +import static org.junit.jupiter.api.Assertions.assertEquals;
    +import static org.junit.jupiter.api.Assertions.assertFalse;
    +
    +public class BrotliIntegrationTest extends AbstractIntegrationTest {
    +
    +    @Override
    +    protected EmbeddedChannel createEncoder() {
    +        return new EmbeddedChannel(new BrotliEncoder());
    +    }
    +
    +    @Override
    +    protected EmbeddedChannel createDecoder() {
    +        return new EmbeddedChannel(new BrotliDecoder());
    +    }
    +}
    
  • codec-compression/src/test/java/io/netty/handler/codec/compression/JdkZlibIntegrationTest.java+31 0 added
    @@ -0,0 +1,31 @@
    +/*
    + * Copyright 2014 The Netty Project
    + *
    + * The Netty Project licenses this file to you under the Apache License,
    + * version 2.0 (the "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at:
    + *
    + *   https://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    + * License for the specific language governing permissions and limitations
    + * under the License.
    + */
    +package io.netty.handler.codec.compression;
    +
    +import io.netty.channel.embedded.EmbeddedChannel;
    +
    +public class JdkZlibIntegrationTest extends AbstractIntegrationTest {
    +
    +    @Override
    +    protected EmbeddedChannel createEncoder() {
    +        return new EmbeddedChannel(new JdkZlibEncoder());
    +    }
    +
    +    @Override
    +    protected EmbeddedChannel createDecoder() {
    +        return new EmbeddedChannel(new JdkZlibDecoder(0));
    +    }
    +}
    
  • codec-compression/src/test/java/io/netty/handler/codec/compression/JZlibIntegrationTest.java+31 0 added
    @@ -0,0 +1,31 @@
    +/*
    + * Copyright 2014 The Netty Project
    + *
    + * The Netty Project licenses this file to you under the Apache License,
    + * version 2.0 (the "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at:
    + *
    + *   https://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    + * License for the specific language governing permissions and limitations
    + * under the License.
    + */
    +package io.netty.handler.codec.compression;
    +
    +import io.netty.channel.embedded.EmbeddedChannel;
    +
    +public class JZlibIntegrationTest extends AbstractIntegrationTest {
    +
    +    @Override
    +    protected EmbeddedChannel createEncoder() {
    +        return new EmbeddedChannel(new JZlibEncoder());
    +    }
    +
    +    @Override
    +    protected EmbeddedChannel createDecoder() {
    +        return new EmbeddedChannel(new JZlibDecoder(0));
    +    }
    +}
    
  • codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java+78 101 modified
    @@ -17,10 +17,8 @@
     import io.netty.buffer.ByteBuf;
     import io.netty.buffer.Unpooled;
     import io.netty.channel.Channel;
    -import io.netty.channel.ChannelConfig;
    -import io.netty.channel.ChannelHandler;
     import io.netty.channel.ChannelHandlerContext;
    -import io.netty.channel.ChannelId;
    +import io.netty.channel.ChannelInboundHandlerAdapter;
     import io.netty.channel.embedded.EmbeddedChannel;
     import io.netty.handler.codec.ByteToMessageDecoder;
     import io.netty.handler.codec.compression.Brotli;
    @@ -125,7 +123,7 @@ public DelegatingDecompressorFrameListener(Http2Connection connection, Http2Fram
                 public void onStreamRemoved(Http2Stream stream) {
                     final Http2Decompressor decompressor = decompressor(stream);
                     if (decompressor != null) {
    -                    cleanup(decompressor);
    +                    decompressor.cleanup();
                     }
                 }
             });
    @@ -140,66 +138,7 @@ public int onDataRead(ChannelHandlerContext ctx, int streamId, ByteBuf data, int
                 // The decompressor may be null if no compatible encoding type was found in this stream's headers
                 return listener.onDataRead(ctx, streamId, data, padding, endOfStream);
             }
    -
    -        final EmbeddedChannel channel = decompressor.decompressor();
    -        final int compressedBytes = data.readableBytes() + padding;
    -        decompressor.incrementCompressedBytes(compressedBytes);
    -        try {
    -            // call retain here as it will call release after its written to the channel
    -            channel.writeInbound(data.retain());
    -            ByteBuf buf = nextReadableBuf(channel);
    -            if (buf == null && endOfStream && channel.finish()) {
    -                buf = nextReadableBuf(channel);
    -            }
    -            if (buf == null) {
    -                if (endOfStream) {
    -                    listener.onDataRead(ctx, streamId, Unpooled.EMPTY_BUFFER, padding, true);
    -                }
    -                // No new decompressed data was extracted from the compressed data. This means the application could
    -                // not be provided with data and thus could not return how many bytes were processed. We will assume
    -                // there is more data coming which will complete the decompression block. To allow for more data we
    -                // return all bytes to the flow control window (so the peer can send more data).
    -                decompressor.incrementDecompressedBytes(compressedBytes);
    -                return compressedBytes;
    -            }
    -            try {
    -                Http2LocalFlowController flowController = connection.local().flowController();
    -                decompressor.incrementDecompressedBytes(padding);
    -                for (;;) {
    -                    ByteBuf nextBuf = nextReadableBuf(channel);
    -                    boolean decompressedEndOfStream = nextBuf == null && endOfStream;
    -                    if (decompressedEndOfStream && channel.finish()) {
    -                        nextBuf = nextReadableBuf(channel);
    -                        decompressedEndOfStream = nextBuf == null;
    -                    }
    -
    -                    decompressor.incrementDecompressedBytes(buf.readableBytes());
    -                    // Immediately return the bytes back to the flow controller. ConsumedBytesConverter will convert
    -                    // from the decompressed amount which the user knows about to the compressed amount which flow
    -                    // control knows about.
    -                    flowController.consumeBytes(stream,
    -                            listener.onDataRead(ctx, streamId, buf, padding, decompressedEndOfStream));
    -                    if (nextBuf == null) {
    -                        break;
    -                    }
    -
    -                    padding = 0; // Padding is only communicated once on the first iteration.
    -                    buf.release();
    -                    buf = nextBuf;
    -                }
    -                // We consume bytes each time we call the listener to ensure if multiple frames are decompressed
    -                // that the bytes are accounted for immediately. Otherwise the user may see an inconsistent state of
    -                // flow control.
    -                return 0;
    -            } finally {
    -                buf.release();
    -            }
    -        } catch (Http2Exception e) {
    -            throw e;
    -        } catch (Throwable t) {
    -            throw streamError(stream.id(), INTERNAL_ERROR, t,
    -                    "Decompressor error detected while delegating data read on streamId %d", stream.id());
    -        }
    +        return decompressor.decompress(ctx, stream, data, padding, endOfStream);
         }
     
         @Override
    @@ -313,7 +252,7 @@ private void initDecompressor(ChannelHandlerContext ctx, int streamId, Http2Head
                 }
                 final EmbeddedChannel channel = newContentDecompressor(ctx, contentEncoding);
                 if (channel != null) {
    -                decompressor = new Http2Decompressor(channel);
    +                decompressor = new Http2Decompressor(channel, connection, listener);
                     stream.setProperty(propertyKey, decompressor);
                     // Decode the content and remove or replace the existing headers
                     // so that the message looks like a decoded message.
    @@ -345,36 +284,6 @@ Http2Decompressor decompressor(Http2Stream stream) {
             return stream == null ? null : (Http2Decompressor) stream.getProperty(propertyKey);
         }
     
    -    /**
    -     * Release remaining content from the {@link EmbeddedChannel}.
    -     *
    -     * @param decompressor The decompressor for {@code stream}
    -     */
    -    private static void cleanup(Http2Decompressor decompressor) {
    -        decompressor.decompressor().finishAndReleaseAll();
    -    }
    -
    -    /**
    -     * Read the next decompressed {@link ByteBuf} from the {@link EmbeddedChannel}
    -     * or {@code null} if one does not exist.
    -     *
    -     * @param decompressor The channel to read from
    -     * @return The next decoded {@link ByteBuf} from the {@link EmbeddedChannel} or {@code null} if one does not exist
    -     */
    -    private static ByteBuf nextReadableBuf(EmbeddedChannel decompressor) {
    -        for (;;) {
    -            final ByteBuf buf = decompressor.readInbound();
    -            if (buf == null) {
    -                return null;
    -            }
    -            if (!buf.isReadable()) {
    -                buf.release();
    -                continue;
    -            }
    -            return buf;
    -        }
    -    }
    -
         /**
          * A decorator around the local flow controller that converts consumed bytes from uncompressed to compressed.
          */
    @@ -455,32 +364,100 @@ public int initialWindowSize(Http2Stream stream) {
          */
         private static final class Http2Decompressor {
             private final EmbeddedChannel decompressor;
    +
             private int compressed;
             private int decompressed;
    +        private Http2Stream stream;
    +        private int padding;
    +        private boolean dataDecompressed;
    +        private ChannelHandlerContext targetCtx;
     
    -        Http2Decompressor(EmbeddedChannel decompressor) {
    +        Http2Decompressor(EmbeddedChannel decompressor,  Http2Connection connection, Http2FrameListener listener) {
                 this.decompressor = decompressor;
    +            this.decompressor.pipeline().addLast(new ChannelInboundHandlerAdapter() {
    +                @Override
    +                public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
    +                    ByteBuf buf = (ByteBuf) msg;
    +                    if (!buf.isReadable()) {
    +                        buf.release();
    +                        return;
    +                    }
    +                    incrementDecompressedBytes(buf.readableBytes());
    +                    // Immediately return the bytes back to the flow controller. ConsumedBytesConverter will convert
    +                    // from the decompressed amount which the user knows about to the compressed amount which flow
    +                    // control knows about.
    +                    connection.local().flowController().consumeBytes(stream,
    +                            listener.onDataRead(targetCtx, stream.id(), buf, padding, false));
    +                    padding = 0; // Padding is only communicated once on the first iteration.
    +                    buf.release();
    +
    +                    dataDecompressed = true;
    +                }
    +
    +                @Override
    +                public void channelInactive(ChannelHandlerContext ctx) throws Exception {
    +                    listener.onDataRead(targetCtx, stream.id(), Unpooled.EMPTY_BUFFER, padding, true);
    +                }
    +            });
             }
     
             /**
    -         * Responsible for taking compressed bytes in and producing decompressed bytes.
    +         * Release remaining content from the {@link EmbeddedChannel}.
              */
    -        EmbeddedChannel decompressor() {
    -            return decompressor;
    +        void cleanup() {
    +            decompressor.finishAndReleaseAll();
             }
     
    +        int decompress(ChannelHandlerContext ctx, Http2Stream stream, ByteBuf data, int padding, boolean endOfStream)
    +                throws Http2Exception {
    +            final int compressedBytes = data.readableBytes() + padding;
    +            incrementCompressedBytes(compressedBytes);
    +            try {
    +                this.stream = stream;
    +                this.padding = padding;
    +                this.dataDecompressed = false;
    +                this.targetCtx = ctx;
    +
    +                // call retain here as it will call release after its written to the channel
    +                decompressor.writeInbound(data.retain());
    +                if (endOfStream) {
    +                    decompressor.finish();
    +
    +                    if (!dataDecompressed) {
    +                        // No new decompressed data was extracted from the compressed data. This means the application
    +                        // could not be provided with data and thus could not return how many bytes were processed.
    +                        // We will assume there is more data coming which will complete the decompression block.
    +                        // To allow for more data we return all bytes to the flow control window (so the peer can
    +                        // send more data).
    +                        incrementDecompressedBytes(compressedBytes);
    +                        return compressedBytes;
    +                    }
    +                }
    +                // We consume bytes each time we call the listener to ensure if multiple frames are decompressed
    +                // that the bytes are accounted for immediately. Otherwise the user may see an inconsistent state of
    +                // flow control.
    +                return 0;
    +            } catch (Throwable t) {
    +                // Http2Exception might be thrown by writeInbound(...) or finish().
    +                if (t instanceof Http2Exception) {
    +                    throw (Http2Exception) t;
    +                }
    +                throw streamError(stream.id(), INTERNAL_ERROR, t,
    +                        "Decompressor error detected while delegating data read on streamId %d", stream.id());
    +            }
    +        }
             /**
              * Increment the number of bytes received prior to doing any decompression.
              */
    -        void incrementCompressedBytes(int delta) {
    +        private void incrementCompressedBytes(int delta) {
                 assert delta >= 0;
                 compressed += delta;
             }
     
             /**
              * Increment the number of bytes after the decompression process.
              */
    -        void incrementDecompressedBytes(int delta) {
    +        private void incrementDecompressedBytes(int delta) {
                 assert delta >= 0;
                 decompressed += delta;
             }
    
  • codec-http2/src/main/resources/META-INF/native-image/io.netty/netty-codec-http2/generated/handlers/reflect-config.json+7 0 modified
    @@ -6,6 +6,13 @@
         },
         "queryAllPublicMethods": true
       },
    +  {
    +    "name": "io.netty.handler.codec.http2.DelegatingDecompressorFrameListener$Http2Decompressor$1",
    +    "condition": {
    +      "typeReachable": "io.netty.handler.codec.http2.DelegatingDecompressorFrameListener$Http2Decompressor$1"
    +    },
    +    "queryAllPublicMethods": true
    +  },
       {
         "name": "io.netty.handler.codec.http2.Http2ChannelDuplexHandler",
         "condition": {
    
  • codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java+128 124 modified
    @@ -17,6 +17,7 @@
     
     import io.netty.buffer.ByteBuf;
     import io.netty.channel.ChannelHandlerContext;
    +import io.netty.channel.ChannelInboundHandlerAdapter;
     import io.netty.channel.embedded.EmbeddedChannel;
     import io.netty.handler.codec.CodecException;
     import io.netty.handler.codec.DecoderResult;
    @@ -52,139 +53,140 @@ public abstract class HttpContentDecoder extends MessageToMessageDecoder<HttpObj
         private EmbeddedChannel decoder;
         private boolean continueResponse;
         private boolean needRead = true;
    +    private ByteBufForwarder forwarder;
     
         public HttpContentDecoder() {
             super(HttpObject.class);
         }
     
         @Override
         protected void decode(ChannelHandlerContext ctx, HttpObject msg, List<Object> out) throws Exception {
    -        try {
    -            if (msg instanceof HttpResponse && ((HttpResponse) msg).status().code() == 100) {
    +        needRead = true;
    +        if (msg instanceof HttpResponse && ((HttpResponse) msg).status().code() == 100) {
     
    -                if (!(msg instanceof LastHttpContent)) {
    -                    continueResponse = true;
    -                }
    -                // 100-continue response must be passed through.
    -                out.add(ReferenceCountUtil.retain(msg));
    -                return;
    +            if (!(msg instanceof LastHttpContent)) {
    +                continueResponse = true;
                 }
    +            // 100-continue response must be passed through.
    +            needRead = false;
    +            ctx.fireChannelRead(ReferenceCountUtil.retain(msg));
    +            return;
    +        }
     
    -            if (continueResponse) {
    -                if (msg instanceof LastHttpContent) {
    -                    continueResponse = false;
    -                }
    -                // 100-continue response must be passed through.
    -                out.add(ReferenceCountUtil.retain(msg));
    -                return;
    +        if (continueResponse) {
    +            if (msg instanceof LastHttpContent) {
    +                continueResponse = false;
                 }
    +            // 100-continue response must be passed through.
    +            needRead = false;
    +            ctx.fireChannelRead(ReferenceCountUtil.retain(msg));
    +            return;
    +        }
     
    -            if (msg instanceof HttpMessage) {
    -                cleanup();
    -                final HttpMessage message = (HttpMessage) msg;
    -                final HttpHeaders headers = message.headers();
    +        if (msg instanceof HttpMessage) {
    +            cleanup();
    +            final HttpMessage message = (HttpMessage) msg;
    +            final HttpHeaders headers = message.headers();
     
    -                // Determine the content encoding.
    -                String contentEncoding = headers.get(HttpHeaderNames.CONTENT_ENCODING);
    -                if (contentEncoding != null) {
    -                    contentEncoding = contentEncoding.trim();
    -                } else {
    -                    String transferEncoding = headers.get(HttpHeaderNames.TRANSFER_ENCODING);
    -                    if (transferEncoding != null) {
    -                        int idx = transferEncoding.indexOf(',');
    -                        if (idx != -1) {
    -                            contentEncoding = transferEncoding.substring(0, idx).trim();
    -                        } else {
    -                            contentEncoding = transferEncoding.trim();
    -                        }
    +            // Determine the content encoding.
    +            String contentEncoding = headers.get(HttpHeaderNames.CONTENT_ENCODING);
    +            if (contentEncoding != null) {
    +                contentEncoding = contentEncoding.trim();
    +            } else {
    +                String transferEncoding = headers.get(HttpHeaderNames.TRANSFER_ENCODING);
    +                if (transferEncoding != null) {
    +                    int idx = transferEncoding.indexOf(',');
    +                    if (idx != -1) {
    +                        contentEncoding = transferEncoding.substring(0, idx).trim();
                         } else {
    -                        contentEncoding = IDENTITY;
    +                        contentEncoding = transferEncoding.trim();
                         }
    -                }
    -                decoder = newContentDecoder(contentEncoding);
    -
    -                if (decoder == null) {
    -                    if (message instanceof HttpContent) {
    -                        ((HttpContent) message).retain();
    -                    }
    -                    out.add(message);
    -                    return;
    -                }
    -
    -                // Remove content-length header:
    -                // the correct value can be set only after all chunks are processed/decoded.
    -                // If buffering is not an issue, add HttpObjectAggregator down the chain, it will set the header.
    -                // Otherwise, rely on LastHttpContent message.
    -                if (headers.contains(HttpHeaderNames.CONTENT_LENGTH)) {
    -                    headers.remove(HttpHeaderNames.CONTENT_LENGTH);
    -                    headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
    -                }
    -                // Either it is already chunked or EOF terminated.
    -                // See https://github.com/netty/netty/issues/5892
    -
    -                // set new content encoding,
    -                CharSequence targetContentEncoding = getTargetContentEncoding(contentEncoding);
    -                if (HttpHeaderValues.IDENTITY.contentEquals(targetContentEncoding)) {
    -                    // Do NOT set the 'Content-Encoding' header if the target encoding is 'identity'
    -                    // as per: https://tools.ietf.org/html/rfc2616#section-14.11
    -                    headers.remove(HttpHeaderNames.CONTENT_ENCODING);
                     } else {
    -                    headers.set(HttpHeaderNames.CONTENT_ENCODING, targetContentEncoding);
    +                    contentEncoding = IDENTITY;
                     }
    +            }
    +            decoder = newContentDecoder(contentEncoding);
     
    +            if (decoder == null) {
                     if (message instanceof HttpContent) {
    -                    // If message is a full request or response object (headers + data), don't copy data part into out.
    -                    // Output headers only; data part will be decoded below.
    -                    // Note: "copy" object must not be an instance of LastHttpContent class,
    -                    // as this would (erroneously) indicate the end of the HttpMessage to other handlers.
    -                    HttpMessage copy;
    -                    if (message instanceof HttpRequest) {
    -                        HttpRequest r = (HttpRequest) message; // HttpRequest or FullHttpRequest
    -                        copy = new DefaultHttpRequest(r.protocolVersion(), r.method(), r.uri());
    -                    } else if (message instanceof HttpResponse) {
    -                        HttpResponse r = (HttpResponse) message; // HttpResponse or FullHttpResponse
    -                        copy = new DefaultHttpResponse(r.protocolVersion(), r.status());
    -                    } else {
    -                        throw new CodecException("Object of class " + message.getClass().getName() +
    -                                                 " is not an HttpRequest or HttpResponse");
    -                    }
    -                    copy.headers().set(message.headers());
    -                    copy.setDecoderResult(message.decoderResult());
    -                    out.add(copy);
    -                } else {
    -                    out.add(message);
    +                    ((HttpContent) message).retain();
                     }
    +                needRead = false;
    +                ctx.fireChannelRead(message);
    +                return;
    +            }
    +            decoder.pipeline().addLast(forwarder);
    +            // Remove content-length header:
    +            // the correct value can be set only after all chunks are processed/decoded.
    +            // If buffering is not an issue, add HttpObjectAggregator down the chain, it will set the header.
    +            // Otherwise, rely on LastHttpContent message.
    +            if (headers.contains(HttpHeaderNames.CONTENT_LENGTH)) {
    +                headers.remove(HttpHeaderNames.CONTENT_LENGTH);
    +                headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
    +            }
    +            // Either it is already chunked or EOF terminated.
    +            // See https://github.com/netty/netty/issues/5892
    +
    +            // set new content encoding,
    +            CharSequence targetContentEncoding = getTargetContentEncoding(contentEncoding);
    +            if (HttpHeaderValues.IDENTITY.contentEquals(targetContentEncoding)) {
    +                // Do NOT set the 'Content-Encoding' header if the target encoding is 'identity'
    +                // as per: https://tools.ietf.org/html/rfc2616#section-14.11
    +                headers.remove(HttpHeaderNames.CONTENT_ENCODING);
    +            } else {
    +                headers.set(HttpHeaderNames.CONTENT_ENCODING, targetContentEncoding);
                 }
     
    -            if (msg instanceof HttpContent) {
    -                final HttpContent c = (HttpContent) msg;
    -                if (decoder == null) {
    -                    out.add(c.retain());
    +            if (message instanceof HttpContent) {
    +                // If message is a full request or response object (headers + data), don't copy data part into out.
    +                // Output headers only; data part will be decoded below.
    +                // Note: "copy" object must not be an instance of LastHttpContent class,
    +                // as this would (erroneously) indicate the end of the HttpMessage to other handlers.
    +                HttpMessage copy;
    +                if (message instanceof HttpRequest) {
    +                    HttpRequest r = (HttpRequest) message; // HttpRequest or FullHttpRequest
    +                    copy = new DefaultHttpRequest(r.protocolVersion(), r.method(), r.uri());
    +                } else if (message instanceof HttpResponse) {
    +                    HttpResponse r = (HttpResponse) message; // HttpResponse or FullHttpResponse
    +                    copy = new DefaultHttpResponse(r.protocolVersion(), r.status());
                     } else {
    -                    decodeContent(c, out);
    +                    throw new CodecException("Object of class " + message.getClass().getName() +
    +                                             " is not an HttpRequest or HttpResponse");
                     }
    +                copy.headers().set(message.headers());
    +                copy.setDecoderResult(message.decoderResult());
    +                needRead = false;
    +                ctx.fireChannelRead(copy);
    +            } else {
    +                needRead = false;
    +                ctx.fireChannelRead(message);
                 }
    -        } finally {
    -            needRead = out.isEmpty();
             }
    -    }
    -
    -    private void decodeContent(HttpContent c, List<Object> out) {
    -        ByteBuf content = c.content();
    -
    -        decode(content, out);
    -
    -        if (c instanceof LastHttpContent) {
    -            finishDecode(out);
     
    -            LastHttpContent last = (LastHttpContent) c;
    -            // Generate an additional chunk if the decoder produced
    -            // the last product on closure,
    -            HttpHeaders headers = last.trailingHeaders();
    -            if (headers.isEmpty()) {
    -                out.add(LastHttpContent.EMPTY_LAST_CONTENT);
    +        if (msg instanceof HttpContent) {
    +            final HttpContent c = (HttpContent) msg;
    +            if (decoder == null) {
    +                needRead = false;
    +                ctx.fireChannelRead(c.retain());
                 } else {
    -                out.add(new ComposedLastHttpContent(headers, DecoderResult.SUCCESS));
    +                // call retain here as it will call release after its written to the channel
    +                decoder.writeInbound(c.content().retain());
    +
    +                if (c instanceof LastHttpContent) {
    +                    boolean notEmpty = decoder.finish();
    +                    decoder = null;
    +                    assert !notEmpty;
    +                    LastHttpContent last = (LastHttpContent) c;
    +                    // Generate an additional chunk if the decoder produced
    +                    // the last product on closure,
    +                    HttpHeaders headers = last.trailingHeaders();
    +                    needRead = false;
    +                    if (headers.isEmpty()) {
    +                        ctx.fireChannelRead(LastHttpContent.EMPTY_LAST_CONTENT);
    +                    } else {
    +                        ctx.fireChannelRead(new ComposedLastHttpContent(headers, DecoderResult.SUCCESS));
    +                    }
    +                }
                 }
             }
         }
    @@ -242,14 +244,16 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception {
         @Override
         public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
             this.ctx = ctx;
    +        forwarder = new ByteBufForwarder(ctx);
             super.handlerAdded(ctx);
         }
     
         private void cleanup() {
             if (decoder != null) {
                 // Clean-up the previous decoder if not cleaned up correctly.
    -            decoder.finishAndReleaseAll();
    +            boolean nonEmpty = decoder.finishAndReleaseAll();
                 decoder = null;
    +            assert !nonEmpty;
             }
         }
     
    @@ -263,30 +267,30 @@ private void cleanupSafely(ChannelHandlerContext ctx) {
             }
         }
     
    -    private void decode(ByteBuf in, List<Object> out) {
    -        // call retain here as it will call release after its written to the channel
    -        decoder.writeInbound(in.retain());
    -        fetchDecoderOutput(out);
    -    }
    +    private final class ByteBufForwarder extends ChannelInboundHandlerAdapter {
    +
    +        private final ChannelHandlerContext targetCtx;
     
    -    private void finishDecode(List<Object> out) {
    -        if (decoder.finish()) {
    -            fetchDecoderOutput(out);
    +        ByteBufForwarder(ChannelHandlerContext targetCtx) {
    +            this.targetCtx = targetCtx;
             }
    -        decoder = null;
    -    }
     
    -    private void fetchDecoderOutput(List<Object> out) {
    -        for (;;) {
    -            ByteBuf buf = decoder.readInbound();
    -            if (buf == null) {
    -                break;
    -            }
    +        @Override
    +        public boolean isSharable() {
    +            // We need to mark the handler as sharable as we will add it to every EmbeddedChannel that is
    +            // generated.
    +            return true;
    +        }
    +
    +        @Override
    +        public void channelRead(ChannelHandlerContext ctx, Object msg) {
    +            ByteBuf buf = (ByteBuf) msg;
                 if (!buf.isReadable()) {
                     buf.release();
    -                continue;
    +                return;
                 }
    -            out.add(new DefaultHttpContent(buf));
    +            needRead = false;
    +            targetCtx.fireChannelRead(new DefaultHttpContent(buf));
             }
         }
     }
    
  • codec-http/src/main/resources/META-INF/native-image/io.netty/netty-codec-http/generated/handlers/reflect-config.json+7 0 modified
    @@ -48,6 +48,13 @@
         },
         "queryAllPublicMethods": true
       },
    +  {
    +    "name": "io.netty.handler.codec.http.HttpContentDecoder$ByteBufForwarder",
    +    "condition": {
    +      "typeReachable": "io.netty.handler.codec.http.HttpContentDecoder$ByteBufForwarder"
    +    },
    +    "queryAllPublicMethods": true
    +  },
       {
         "name": "io.netty.handler.codec.http.HttpContentDecompressor",
         "condition": {
    
  • codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java+98 0 modified
    @@ -15,13 +15,22 @@
      */
     package io.netty.handler.codec.http;
     
    +import io.netty.buffer.AdaptiveByteBufAllocator;
    +import io.netty.buffer.ByteBuf;
    +import io.netty.buffer.PooledByteBufAllocator;
     import io.netty.buffer.Unpooled;
     import io.netty.channel.ChannelHandlerContext;
     import io.netty.channel.ChannelInboundHandlerAdapter;
     import io.netty.channel.ChannelOutboundHandlerAdapter;
     import io.netty.channel.embedded.EmbeddedChannel;
    +import io.netty.handler.codec.compression.Brotli;
    +import io.netty.handler.codec.compression.Zstd;
     import org.junit.jupiter.api.Test;
    +import org.junit.jupiter.params.ParameterizedTest;
    +import org.junit.jupiter.params.provider.MethodSource;
     
    +import java.util.ArrayList;
    +import java.util.List;
     import java.util.concurrent.atomic.AtomicInteger;
     
     import static org.junit.jupiter.api.Assertions.assertEquals;
    @@ -70,4 +79,93 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) {
             assertEquals(2, readCalled.get());
             assertFalse(channel.finishAndReleaseAll());
         }
    +
    +    static String[] encodings() {
    +        List<String> encodings = new ArrayList<>();
    +        encodings.add("gzip");
    +        encodings.add("deflate");
    +        if (Brotli.isAvailable()) {
    +            encodings.add("br");
    +        }
    +        if (Zstd.isAvailable()) {
    +            encodings.add("zstd");
    +        }
    +        encodings.add("snappy");
    +        return encodings.toArray(new String[0]);
    +    }
    +
    +    @ParameterizedTest
    +    @MethodSource("encodings")
    +    public void testZipBomb(String encoding) {
    +        int chunkSize = 1024 * 1024;
    +        int numberOfChunks = 256;
    +        int memoryLimit = chunkSize * 128;
    +
    +        EmbeddedChannel compressionChannel = new EmbeddedChannel(new HttpContentCompressor());
    +        DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
    +        req.headers().set(HttpHeaderNames.ACCEPT_ENCODING, encoding);
    +        compressionChannel.writeInbound(req);
    +
    +        DefaultHttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
    +        response.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
    +        compressionChannel.writeOutbound(response);
    +
    +        for (int i = 0; i < numberOfChunks; i++) {
    +            ByteBuf buffer = compressionChannel.alloc().buffer(chunkSize);
    +            buffer.writeZero(chunkSize);
    +            compressionChannel.writeOutbound(new DefaultHttpContent(buffer));
    +        }
    +        compressionChannel.writeOutbound(LastHttpContent.EMPTY_LAST_CONTENT);
    +        compressionChannel.finish();
    +        compressionChannel.releaseInbound();
    +
    +        ByteBuf compressed = compressionChannel.alloc().buffer();
    +        HttpMessage message = null;
    +        while (true) {
    +            HttpObject obj = compressionChannel.readOutbound();
    +            if (obj == null) {
    +                break;
    +            }
    +            if (obj instanceof HttpMessage) {
    +                message = (HttpMessage) obj;
    +            }
    +            if (obj instanceof HttpContent) {
    +                HttpContent content = (HttpContent) obj;
    +                compressed.writeBytes(content.content());
    +                content.release();
    +            }
    +        }
    +
    +        PooledByteBufAllocator allocator = new PooledByteBufAllocator(false);
    +
    +        ZipBombIncomingHandler incomingHandler = new ZipBombIncomingHandler(memoryLimit);
    +        EmbeddedChannel decompressChannel = new EmbeddedChannel(new HttpContentDecompressor(0), incomingHandler);
    +        decompressChannel.config().setAllocator(allocator);
    +        decompressChannel.writeInbound(message);
    +        decompressChannel.writeInbound(new DefaultLastHttpContent(compressed));
    +
    +        assertEquals((long) chunkSize * numberOfChunks, incomingHandler.total);
    +    }
    +
    +    private static final class ZipBombIncomingHandler extends ChannelInboundHandlerAdapter {
    +        final int memoryLimit;
    +        long total;
    +
    +        ZipBombIncomingHandler(int memoryLimit) {
    +            this.memoryLimit = memoryLimit;
    +        }
    +
    +        @Override
    +        public void channelRead(ChannelHandlerContext ctx, Object msg) {
    +            PooledByteBufAllocator allocator = (PooledByteBufAllocator) ctx.alloc();
    +            assertTrue(allocator.metric().usedHeapMemory() < memoryLimit);
    +            assertTrue(allocator.metric().usedDirectMemory() < memoryLimit);
    +
    +            if (msg instanceof HttpContent) {
    +                HttpContent buf = (HttpContent) msg;
    +                total += buf.content().readableBytes();
    +                buf.release();
    +            }
    +        }
    +    }
     }
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

4

News mentions

0

No linked articles in our index yet.