VYPR
Moderate severityNVD Advisory· Published Sep 16, 2022· Updated Apr 23, 2025

Segfault in `QuantizedMatMul` in TensorFlow

CVE-2022-35973

Description

TensorFlow is an open source platform for machine learning. If QuantizedMatMul is given nonscalar input for: min_a, max_a, min_b, or max_b It gives a segfault that can be used to trigger a denial of service attack. We have patched the issue in GitHub commit aca766ac7693bf29ed0df55ad6bfcc78f35e7f48. The fix will be included in TensorFlow 2.10.0. We will also cherrypick this commit on TensorFlow 2.9.1, TensorFlow 2.8.1, and TensorFlow 2.7.2, as these are also affected and still in supported range. There are no known workarounds for this issue.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
tensorflowPyPI
< 2.7.22.7.2
tensorflowPyPI
>= 2.8.0, < 2.8.12.8.1
tensorflowPyPI
>= 2.9.0, < 2.9.12.9.1
tensorflow-cpuPyPI
< 2.7.22.7.2
tensorflow-cpuPyPI
>= 2.8.0, < 2.8.12.8.1
tensorflow-cpuPyPI
>= 2.9.0, < 2.9.12.9.1
tensorflow-gpuPyPI
< 2.7.22.7.2
tensorflow-gpuPyPI
>= 2.8.0, < 2.8.12.8.1
tensorflow-gpuPyPI
>= 2.9.0, < 2.9.12.9.1

Affected products

1

Patches

1
aca766ac7693

Fix tf.raw_ops. QuantizedMatMul vulnerability from non scalar min/max a/b arguments.

https://github.com/tensorflow/tensorflowLaura PakAug 4, 2022via ghsa
2 files changed · +73 20
  • tensorflow/core/kernels/quantized_matmul_op.cc+15 0 modified
    @@ -20,11 +20,14 @@ limitations under the License.
     #define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
     #include "public/gemmlowp.h"
     #include "tensorflow/core/framework/op_kernel.h"
    +#include "tensorflow/core/framework/op_requires.h"
     #include "tensorflow/core/framework/tensor.h"
    +#include "tensorflow/core/framework/tensor_shape.h"
     #include "tensorflow/core/kernels/meta_support.h"
     #include "tensorflow/core/kernels/quantization_utils.h"
     #include "tensorflow/core/kernels/reference_gemm.h"
     #include "tensorflow/core/lib/core/errors.h"
    +#include "tensorflow/core/platform/errors.h"
     
     namespace tensorflow {
     
    @@ -75,9 +78,21 @@ class QuantizedMatMulOp : public OpKernel {
       void Compute(OpKernelContext* context) override {
         const Tensor& a = context->input(0);
         const Tensor& b = context->input(1);
    +    OP_REQUIRES(context, TensorShapeUtils::IsScalar(context->input(2).shape()),
    +                errors::InvalidArgument("min_a must be a scalar, but got shape",
    +                                        context->input(2).shape()));
         const float min_a = context->input(2).flat<float>()(0);
    +    OP_REQUIRES(context, context->input(3).NumElements() == 1,
    +                errors::InvalidArgument("max_a must be a scalar, but got shape",
    +                                        context->input(3).shape()));
         const float max_a = context->input(3).flat<float>()(0);
    +    OP_REQUIRES(context, context->input(4).NumElements() == 1,
    +                errors::InvalidArgument("min_b must be a scalar, but got shape",
    +                                        context->input(4).shape()));
         const float min_b = context->input(4).flat<float>()(0);
    +    OP_REQUIRES(context, context->input(5).NumElements() == 1,
    +                errors::InvalidArgument("max_b must be a scalar, but got shape",
    +                                        context->input(5).shape()));
         const float max_b = context->input(5).flat<float>()(0);
     
         // Make sure that we have valid quantization ranges for the input buffers.
    
  • tensorflow/core/kernels/quantized_matmul_op_test.cc+58 20 modified
    @@ -62,10 +62,10 @@ TEST_F(QuantizedMatMulTest, Small_NoParams) {
       // | 15 | 16 | 17 | 18 |
       AddInputFromArray<quint8>(TensorShape({3, 4}),
                                 {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
    -  AddInputFromArray<float>(TensorShape({1}), {0});
    -  AddInputFromArray<float>(TensorShape({1}), {255.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {0});
    -  AddInputFromArray<float>(TensorShape({1}), {255.0f});
    +  AddInputFromArray<float>(TensorShape({}), {0});
    +  AddInputFromArray<float>(TensorShape({}), {255.0f});
    +  AddInputFromArray<float>(TensorShape({}), {0});
    +  AddInputFromArray<float>(TensorShape({}), {255.0f});
     
       TF_ASSERT_OK(RunOpKernel());
       // Here are the results we expect, from hand calculations:
    @@ -118,10 +118,10 @@ TEST_F(QuantizedMatMulTest, VerySmall_WithParams) {
       // The B matrix is:
       // |   1 |
       AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), {0});
    -  AddInputFromArray<float>(TensorShape({1}), {-12.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {243.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {1.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {256.0f});
    +  AddInputFromArray<float>(TensorShape({}), {-12.0f});
    +  AddInputFromArray<float>(TensorShape({}), {243.0f});
    +  AddInputFromArray<float>(TensorShape({}), {1.0f});
    +  AddInputFromArray<float>(TensorShape({}), {256.0f});
       TF_ASSERT_OK(RunOpKernel());
       // We're requesting C = A.transposed() * B,
       // so we expect to get these results:
    @@ -162,12 +162,50 @@ TEST_F(QuantizedMatMulTest, VerySmall_BadRange) {
       // The B matrix is:
       // |   1 |
       AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), {0});
    -  AddInputFromArray<float>(TensorShape({1}), {-12.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {243.0f});
    +  AddInputFromArray<float>(TensorShape({}), {-12.0f});
    +  AddInputFromArray<float>(TensorShape({}), {243.0f});
       // Here we set the range so that the min and max are equal, so we expect to
       // see an error when we run.
    -  AddInputFromArray<float>(TensorShape({1}), {1.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {1.0f});
    +  AddInputFromArray<float>(TensorShape({}), {1.0f});
    +  AddInputFromArray<float>(TensorShape({}), {1.0f});
    +  EXPECT_EQ(::tensorflow::error::INVALID_ARGUMENT, RunOpKernel().code());
    +}
    +
    +// This test multiplies two 1x1 8bit matrices, but sets invalid quantized min
    +// and max values, so we expect to get an error
    +TEST_F(QuantizedMatMulTest, VerySmall_BadMinMax) {
    +  // These parameters reflect a typical production usage of eight-bit matmuls
    +  // in an Inception-style network.
    +  const bool transpose_a = true;
    +  const int a_rows = 1;
    +  const int a_cols = 1;
    +  const int b_rows = 1;
    +  const int b_cols = 1;
    +  const bool transpose_b = false;
    +  TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "QuantizedMatMul")
    +                   .Input(FakeInput(DT_QUINT8))
    +                   .Input(FakeInput(DT_QUINT8))
    +                   .Input(FakeInput(DT_FLOAT))
    +                   .Input(FakeInput(DT_FLOAT))
    +                   .Input(FakeInput(DT_FLOAT))
    +                   .Input(FakeInput(DT_FLOAT))
    +                   .Attr("Toutput", DataTypeToEnum<qint32>::v())
    +                   .Attr("transpose_a", transpose_a)
    +                   .Attr("transpose_b", transpose_b)
    +                   .Finalize(node_def()));
    +  TF_ASSERT_OK(InitOp());
    +  // The A matrix is:
    +  // |  -1 |
    +  AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), {11});
    +  // The B matrix is:
    +  // |   1 |
    +  AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), {0});
    +  // Here we set the error of a non scalar min_a value, so we expect to see an
    +  // error when we run.
    +  AddInputFromArray<float>(TensorShape({1}), {2});
    +  AddInputFromArray<float>(TensorShape({}), {243.0f});
    +  AddInputFromArray<float>(TensorShape({}), {1.0f});
    +  AddInputFromArray<float>(TensorShape({}), {256.0f});
       EXPECT_EQ(::tensorflow::error::INVALID_ARGUMENT, RunOpKernel().code());
     }
     
    @@ -233,10 +271,10 @@ TEST_F(QuantizedMatMulTest, Small_WithParams) {
                                                                    3,
                                                                    6,
                                                                });
    -  AddInputFromArray<float>(TensorShape({1}), {-12.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {243.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {0});
    -  AddInputFromArray<float>(TensorShape({1}), {255.0f});
    +  AddInputFromArray<float>(TensorShape({}), {-12.0f});
    +  AddInputFromArray<float>(TensorShape({}), {243.0f});
    +  AddInputFromArray<float>(TensorShape({}), {0});
    +  AddInputFromArray<float>(TensorShape({}), {255.0f});
       TF_ASSERT_OK(RunOpKernel());
       // We're requesting C = A.transposed() * B,
       // so we expect to get these results:
    @@ -326,10 +364,10 @@ TEST_F(QuantizedMatMulTest, Medium_WithParams) {
     
       AddInputFromArray<quint8>(a_quantized.shape(), a_quantized.flat<quint8>());
       AddInputFromArray<quint8>(b_quantized.shape(), b_quantized.flat<quint8>());
    -  AddInputFromArray<float>(TensorShape({1}), {a_min});
    -  AddInputFromArray<float>(TensorShape({1}), {a_max});
    -  AddInputFromArray<float>(TensorShape({1}), {b_min});
    -  AddInputFromArray<float>(TensorShape({1}), {b_max});
    +  AddInputFromArray<float>(TensorShape({}), {a_min});
    +  AddInputFromArray<float>(TensorShape({}), {a_max});
    +  AddInputFromArray<float>(TensorShape({}), {b_min});
    +  AddInputFromArray<float>(TensorShape({}), {b_max});
       TF_ASSERT_OK(RunOpKernel());
     
       Tensor expected_float(DT_FLOAT, {a_cols, b_cols});
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

5

News mentions

0

No linked articles in our index yet.