VYPR
Moderate severityNVD Advisory· Published Sep 16, 2022· Updated Apr 23, 2025

`CHECK` fail in `FakeQuantWithMinMaxVarsPerChannel` in TensorFlow

CVE-2022-36019

Description

TensorFlow is an open source platform for machine learning. If FakeQuantWithMinMaxVarsPerChannel is given min or max tensors of a rank other than one, it results in a CHECK fail that can be used to trigger a denial of service attack. We have patched the issue in GitHub commit 785d67a78a1d533759fcd2f5e8d6ef778de849e0. The fix will be included in TensorFlow 2.10.0. We will also cherrypick this commit on TensorFlow 2.9.1, TensorFlow 2.8.1, and TensorFlow 2.7.2, as these are also affected and still in supported range. There are no known workarounds for this issue.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
tensorflowPyPI
< 2.7.22.7.2
tensorflowPyPI
>= 2.8.0, < 2.8.12.8.1
tensorflowPyPI
>= 2.9.0, < 2.9.12.9.1
tensorflow-cpuPyPI
< 2.7.22.7.2
tensorflow-cpuPyPI
>= 2.8.0, < 2.8.12.8.1
tensorflow-cpuPyPI
>= 2.9.0, < 2.9.12.9.1
tensorflow-gpuPyPI
< 2.7.22.7.2
tensorflow-gpuPyPI
>= 2.8.0, < 2.8.12.8.1
tensorflow-gpuPyPI
>= 2.9.0, < 2.9.12.9.1

Affected products

1

Patches

1
785d67a78a1d

Fix quantize ops input validation issues.

https://github.com/tensorflow/tensorflowAntonio SanchezJul 19, 2022via ghsa
8 files changed · +337 33
  • tensorflow/core/kernels/fake_quant_ops.cc+16 1 modified
    @@ -24,6 +24,7 @@ limitations under the License.
     // Above is the related header but clang tidy doesn't recognize it.
     #include "tensorflow/core/framework/numeric_op.h"
     #include "tensorflow/core/framework/tensor.h"
    +#include "tensorflow/core/framework/tensor_shape.h"
     #include "tensorflow/core/lib/core/errors.h"
     #include "tensorflow/core/lib/monitoring/gauge.h"
     #include "tensorflow/core/platform/protobuf.h"
    @@ -205,6 +206,13 @@ class FakeQuantWithMinMaxVarsOp : public OpKernel {
         const Tensor& min = context->input(1);
         const Tensor& max = context->input(2);
     
    +    OP_REQUIRES(
    +        context, TensorShapeUtils::IsScalar(min.shape()),
    +        InvalidArgument("`min` must be rank 0 but is rank ", min.dims()));
    +    OP_REQUIRES(
    +        context, TensorShapeUtils::IsScalar(max.shape()),
    +        InvalidArgument("`max` must be rank 0 but is rank ", max.dims()));
    +
         Tensor* output;
         OP_REQUIRES_OK(context,
                        context->allocate_output(0, input.shape(), &output));
    @@ -342,10 +350,17 @@ class FakeQuantWithMinMaxVarsPerChannelOp : public OpKernel {
         const Tensor& input = context->input(0);
         const int depth = input.dim_size(input.dims() - 1);  // last dimension size.
         const Tensor& min = context->input(1);
    +    const Tensor& max = context->input(2);
    +
    +    OP_REQUIRES(
    +        context, TensorShapeUtils::IsVector(min.shape()),
    +        InvalidArgument("`min` must be rank 1 but is rank ", min.dims()));
         OP_REQUIRES(context, min.dim_size(0) == depth,
                     InvalidArgument("min has incorrect size, expected ", depth,
                                     " was ", min.dim_size(0)));
    -    const Tensor& max = context->input(2);
    +    OP_REQUIRES(
    +        context, TensorShapeUtils::IsVector(max.shape()),
    +        InvalidArgument("`max` must be rank 1 but is rank ", max.dims()));
         OP_REQUIRES(context, max.dim_size(0) == depth,
                     InvalidArgument("max has incorrect size, expected ", depth,
                                     " was ", max.dim_size(0)));
    
  • tensorflow/core/kernels/quantized_bias_add_op.cc+25 4 modified
    @@ -20,6 +20,7 @@ limitations under the License.
     #include "tensorflow/core/framework/numeric_op.h"
     #include "tensorflow/core/framework/op_kernel.h"
     #include "tensorflow/core/framework/tensor.h"
    +#include "tensorflow/core/framework/tensor_shape.h"
     #include "tensorflow/core/kernels/meta_support.h"
     #include "tensorflow/core/kernels/ops_util.h"
     #include "tensorflow/core/kernels/quantization_utils.h"
    @@ -38,10 +39,30 @@ class QuantizedBiasAddOp : public OpKernel {
       void Compute(OpKernelContext* context) override {
         const Tensor& input = context->input(0);
         const Tensor& bias = context->input(1);
    -    const float input_min = context->input(2).flat<float>()(0);
    -    const float input_max = context->input(3).flat<float>()(0);
    -    const float bias_min = context->input(4).flat<float>()(0);
    -    const float bias_max = context->input(5).flat<float>()(0);
    +
    +    const Tensor& min_input = context->input(2);
    +    const Tensor& max_input = context->input(3);
    +    const Tensor& min_bias = context->input(4);
    +    const Tensor& max_bias = context->input(5);
    +    OP_REQUIRES(
    +        context, TensorShapeUtils::IsScalar(min_input.shape()),
    +        errors::InvalidArgument("`min_input` must be rank 0 but is rank ",
    +                                min_input.dims()));
    +    OP_REQUIRES(
    +        context, TensorShapeUtils::IsScalar(max_input.shape()),
    +        errors::InvalidArgument("`max_input` must be rank 0 but is rank ",
    +                                max_input.dims()));
    +    OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_bias.shape()),
    +                errors::InvalidArgument(
    +                    "`min_bias` must be rank 0 but is rank ", min_bias.dims()));
    +    OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_bias.shape()),
    +                errors::InvalidArgument(
    +                    "`max_bias` must be rank 0 but is rank ", max_bias.dims()));
    +
    +    const float input_min = min_input.flat<float>()(0);
    +    const float input_max = max_input.flat<float>()(0);
    +    const float bias_min = min_bias.flat<float>()(0);
    +    const float bias_max = max_bias.flat<float>()(0);
     
         OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input.shape()),
                     errors::InvalidArgument("Input tensor must be at least 2D: ",
    
  • tensorflow/core/kernels/quantized_bias_add_op_test.cc+8 8 modified
    @@ -74,10 +74,10 @@ TEST_F(QuantizedBiasAddTest, Small) {
                                 input_quantized.flat<quint8>());
       AddInputFromArray<quint8>(bias_quantized.shape(),
                                 bias_quantized.flat<quint8>());
    -  AddInputFromArray<float>(TensorShape({1}), {input_min});
    -  AddInputFromArray<float>(TensorShape({1}), {input_max});
    -  AddInputFromArray<float>(TensorShape({1}), {bias_min});
    -  AddInputFromArray<float>(TensorShape({1}), {bias_max});
    +  AddInputFromArray<float>(TensorShape({}), {input_min});
    +  AddInputFromArray<float>(TensorShape({}), {input_max});
    +  AddInputFromArray<float>(TensorShape({}), {bias_min});
    +  AddInputFromArray<float>(TensorShape({}), {bias_max});
       TF_ASSERT_OK(RunOpKernel());
       const Tensor& output_quantized = *GetOutput(0);
       const float output_min = GetOutput(1)->flat<float>()(0);
    @@ -156,10 +156,10 @@ TEST_F(QuantizedBiasAddTest, RealData) {
                                 input_quantized.flat<quint8>());
       AddInputFromArray<quint8>(bias_quantized.shape(),
                                 bias_quantized.flat<quint8>());
    -  AddInputFromArray<float>(TensorShape({1}), {input_min});
    -  AddInputFromArray<float>(TensorShape({1}), {input_max});
    -  AddInputFromArray<float>(TensorShape({1}), {bias_min});
    -  AddInputFromArray<float>(TensorShape({1}), {bias_max});
    +  AddInputFromArray<float>(TensorShape({}), {input_min});
    +  AddInputFromArray<float>(TensorShape({}), {input_max});
    +  AddInputFromArray<float>(TensorShape({}), {bias_min});
    +  AddInputFromArray<float>(TensorShape({}), {bias_max});
       TF_ASSERT_OK(RunOpKernel());
       const Tensor& output_quantized = *GetOutput(0);
       const float output_min = GetOutput(1)->flat<float>()(0);
    
  • tensorflow/core/kernels/quantized_instance_norm.cc+11 3 modified
    @@ -25,7 +25,7 @@ limitations under the License.
     #include "tensorflow/core/framework/op_kernel.h"
     #include "tensorflow/core/framework/register_types.h"
     #include "tensorflow/core/framework/tensor.h"
    -
    +#include "tensorflow/core/framework/tensor_shape.h"
     #include "tensorflow/core/kernels/quantization_utils.h"
     
     #ifdef USE_NEON
    @@ -274,8 +274,16 @@ class QuantizedInstanceNorm : public OpKernel {
       void Compute(OpKernelContext* context) override {
         const Tensor& input = context->input(0);
     
    -    float input_min = context->input(1).flat<float>()(0);
    -    float input_max = context->input(2).flat<float>()(0);
    +    const Tensor& x_min = context->input(1);
    +    const Tensor& x_max = context->input(2);
    +    OP_REQUIRES(context, TensorShapeUtils::IsScalar(x_min.shape()),
    +                errors::InvalidArgument("`x_min` must be rank 0 but is rank ",
    +                                        x_min.dims()));
    +    OP_REQUIRES(context, TensorShapeUtils::IsScalar(x_max.shape()),
    +                errors::InvalidArgument("`x_max` must be rank 0 but is rank ",
    +                                        x_max.dims()));
    +    float input_min = x_min.scalar<float>()();
    +    float input_max = x_max.scalar<float>()();
         float input_scale = (input_max - input_min) / 255.0f;
     
         OP_REQUIRES(context, input_min < input_max,
    
  • tensorflow/core/kernels/requantize.cc+31 5 modified
    @@ -18,9 +18,11 @@ limitations under the License.
     #define EIGEN_USE_THREADS
     
     #include <math.h>
    -#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
    +
     #include "tensorflow/core/framework/op.h"
     #include "tensorflow/core/framework/op_kernel.h"
    +#include "tensorflow/core/framework/tensor.h"
    +#include "tensorflow/core/framework/tensor_shape.h"
     #include "tensorflow/core/framework/type_traits.h"
     #include "tensorflow/core/framework/types.h"
     #include "tensorflow/core/kernels/meta_support.h"
    @@ -38,10 +40,34 @@ class RequantizeOp : public OpKernel {
     
       void Compute(OpKernelContext* ctx) override {
         const Tensor& input = ctx->input(0);
    -    const float input_min_float = ctx->input(1).flat<float>()(0);
    -    const float input_max_float = ctx->input(2).flat<float>()(0);
    -    const float requested_output_min_float = ctx->input(3).flat<float>()(0);
    -    const float requested_output_max_float = ctx->input(4).flat<float>()(0);
    +
    +    const Tensor& input_min = ctx->input(1);
    +    const Tensor& input_max = ctx->input(2);
    +    const Tensor& requested_output_min = ctx->input(3);
    +    const Tensor& requested_output_max = ctx->input(4);
    +    OP_REQUIRES(
    +        ctx, TensorShapeUtils::IsScalar(input_min.shape()),
    +        errors::InvalidArgument("`input_min` must be rank 0 but is rank ",
    +                                input_min.dims()));
    +    OP_REQUIRES(
    +        ctx, TensorShapeUtils::IsScalar(input_max.shape()),
    +        errors::InvalidArgument("`input_max` must be rank 0 but is rank ",
    +                                input_max.dims()));
    +    OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(requested_output_min.shape()),
    +                errors::InvalidArgument(
    +                    "`requested_output_min` must be rank 0 but is rank ",
    +                    requested_output_min.dims()));
    +    OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(requested_output_max.shape()),
    +                errors::InvalidArgument(
    +                    "`requested_output_max` must be rank 0 but is rank ",
    +                    requested_output_max.dims()));
    +
    +    const float input_min_float = input_min.flat<float>()(0);
    +    const float input_max_float = input_max.flat<float>()(0);
    +    const float requested_output_min_float =
    +        requested_output_min.flat<float>()(0);
    +    const float requested_output_max_float =
    +        requested_output_max.flat<float>()(0);
     
         Tensor* output = nullptr;
         OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output));
    
  • tensorflow/core/kernels/requantize_op_test.cc+12 12 modified
    @@ -53,10 +53,10 @@ TEST_F(RequantizeTest, HandCraftedRequantize) {
       // Requantize to -1 to 1.
       AddInputFromArray<qint32>(TensorShape({value_count}),
                                 {-(1 << 23), 0, (1 << 23)});
    -  AddInputFromArray<float>(TensorShape({1}), {-256.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {256.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {-1.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {1.0f});
    +  AddInputFromArray<float>(TensorShape({}), {-256.0f});
    +  AddInputFromArray<float>(TensorShape({}), {256.0f});
    +  AddInputFromArray<float>(TensorShape({}), {-1.0f});
    +  AddInputFromArray<float>(TensorShape({}), {1.0f});
       TF_ASSERT_OK(RunOpKernel());
       Tensor expected(allocator(), DT_QUINT8, TensorShape({value_count}));
       test::FillValues<quint8>(&expected, {0, 128, 255});
    @@ -71,10 +71,10 @@ TEST_F(RequantizeTest, InvalidOutputMin) {
     
       AddInputFromArray<qint32>(TensorShape({value_count}),
                                 {-(1 << 23), 0, (1 << 23)});
    -  AddInputFromArray<float>(TensorShape({1}), {-256.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {256.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {0.01f});
    -  AddInputFromArray<float>(TensorShape({1}), {1.0f});
    +  AddInputFromArray<float>(TensorShape({}), {-256.0f});
    +  AddInputFromArray<float>(TensorShape({}), {256.0f});
    +  AddInputFromArray<float>(TensorShape({}), {0.01f});
    +  AddInputFromArray<float>(TensorShape({}), {1.0f});
       EXPECT_EQ("requested_output_min must be <= 0, but got 0.01",
                 RunOpKernel().error_message());
     }
    @@ -85,10 +85,10 @@ TEST_F(RequantizeTest, InvalidOutputMax) {
     
       AddInputFromArray<qint32>(TensorShape({value_count}),
                                 {-(1 << 23), 0, (1 << 23)});
    -  AddInputFromArray<float>(TensorShape({1}), {-256.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {256.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {-10.0f});
    -  AddInputFromArray<float>(TensorShape({1}), {-11.0f});
    +  AddInputFromArray<float>(TensorShape({}), {-256.0f});
    +  AddInputFromArray<float>(TensorShape({}), {256.0f});
    +  AddInputFromArray<float>(TensorShape({}), {-10.0f});
    +  AddInputFromArray<float>(TensorShape({}), {-11.0f});
       EXPECT_EQ(
           "requested_output_max must be >= requested_output_min, but got -11 and "
           "-10",
    
  • tensorflow/python/kernel_tests/quantization_ops/BUILD+24 0 added
    @@ -0,0 +1,24 @@
    +# Tests of TensorFlow quantization ops written using the Python API.
    +
    +# buildifier: disable=same-origin-load
    +load("//tensorflow:tensorflow.bzl", "tf_py_test")
    +
    +package(
    +    default_visibility = ["//tensorflow:internal"],
    +    licenses = ["notice"],
    +)
    +
    +tf_py_test(
    +    name = "quantization_ops_test",
    +    size = "small",
    +    srcs = ["quantization_ops_test.py"],
    +    deps = [
    +        "//tensorflow/python:array_ops",
    +        "//tensorflow/python:client",
    +        "//tensorflow/python:client_testlib",
    +        "//tensorflow/python:framework",
    +        "//tensorflow/python:framework_for_generated_wrappers",
    +        "//tensorflow/python:math_ops",
    +        "//third_party/py/numpy",
    +    ],
    +)
    
  • tensorflow/python/kernel_tests/quantization_ops/quantization_ops_test.py+210 0 added
    @@ -0,0 +1,210 @@
    +# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
    +#
    +# Licensed under the Apache License, Version 2.0 (the "License");
    +# you may not use this file except in compliance with the License.
    +# You may obtain a copy of the License at
    +#
    +#     http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +# ==============================================================================
    +"""Tests for tf.quantize ops."""
    +import numpy as np
    +
    +from tensorflow.python.framework import constant_op
    +from tensorflow.python.framework import dtypes
    +from tensorflow.python.framework import errors
    +from tensorflow.python.framework import test_util
    +from tensorflow.python.ops import array_ops
    +from tensorflow.python.ops import math_ops
    +from tensorflow.python.ops import nn_ops
    +from tensorflow.python.platform import googletest
    +
    +
    +class FakeQuantWithMinMaxVarsOpTest(test_util.TensorFlowTestCase):
    +
    +  @test_util.run_in_graph_and_eager_modes
    +  def test_invalid_inputs(self):
    +    inputs = constant_op.constant(
    +        value=[[1.0], [2.0], [4.0]], dtype=dtypes.float32)
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          array_ops.fake_quant_with_min_max_vars(
    +              inputs=inputs, min=0.0, max=[[1.0], [2.0], [4.0]]))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          array_ops.fake_quant_with_min_max_vars(
    +              inputs=inputs, min=[[1.0], [2.0], [4.0]], max=1.0))
    +
    +
    +class FakeQuantWithMinMaxVarsPerChannelOpTest(test_util.TensorFlowTestCase):
    +
    +  @test_util.run_in_graph_and_eager_modes
    +  def test_invalid_inputs(self):
    +    inputs = constant_op.constant(
    +        value=[[1.0], [2.0], [4.0]], dtype=dtypes.float32)
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 1"):
    +      self.evaluate(
    +          array_ops.fake_quant_with_min_max_vars_per_channel(
    +              inputs=inputs, min=[[0.0]], max=[1.0]))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "Dimensions must be equal|incorrect size"):
    +      self.evaluate(
    +          array_ops.fake_quant_with_min_max_vars_per_channel(
    +              inputs=inputs, min=[0.0, 0.1], max=[1.0]))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 1"):
    +      self.evaluate(
    +          array_ops.fake_quant_with_min_max_vars_per_channel(
    +              inputs=inputs, min=[1.0], max=[[1.0]]))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "Dimensions must be equal|incorrect size"):
    +      self.evaluate(
    +          array_ops.fake_quant_with_min_max_vars_per_channel(
    +              inputs=inputs, min=[0.0], max=[1.0, 1.1]))
    +
    +
    +class QuantizedBiasedAddTest(test_util.TensorFlowTestCase):
    +
    +  @test_util.run_in_graph_and_eager_modes
    +  def test_invalid_inputs(self):
    +    inputs = constant_op.constant(
    +        np.int8(0), shape=[3, 3, 3, 3], dtype=dtypes.qint8)
    +    bias = constant_op.constant(np.int8(0), shape=[3], dtype=dtypes.qint8)
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          nn_ops.quantized_bias_add(
    +              input=inputs,
    +              bias=bias,
    +              min_input=[],
    +              max_input=1.0,
    +              min_bias=0.0,
    +              max_bias=1.0,
    +              out_type=dtypes.qint32))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          nn_ops.quantized_bias_add(
    +              input=inputs,
    +              bias=bias,
    +              min_input=0.0,
    +              max_input=[],
    +              min_bias=0.0,
    +              max_bias=1.0,
    +              out_type=dtypes.qint32))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          nn_ops.quantized_bias_add(
    +              input=inputs,
    +              bias=bias,
    +              min_input=0.0,
    +              max_input=1.0,
    +              min_bias=[],
    +              max_bias=1.0,
    +              out_type=dtypes.qint32))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          nn_ops.quantized_bias_add(
    +              input=inputs,
    +              bias=bias,
    +              min_input=0.0,
    +              max_input=1.0,
    +              min_bias=0.0,
    +              max_bias=[],
    +              out_type=dtypes.qint32))
    +
    +
    +class QuantizedInstanceNormOpTest(test_util.TensorFlowTestCase):
    +
    +  @test_util.run_in_graph_and_eager_modes
    +  def test_invalid_inputs(self):
    +    inputs = constant_op.constant(
    +        np.uint8(0), shape=[3, 3, 3, 3], dtype=dtypes.quint8)
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          array_ops.quantized_instance_norm(
    +              x=inputs, x_min=0.0, x_max=[[1.0], [2.0], [4.0]]))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          array_ops.quantized_instance_norm(
    +              x=inputs, x_min=[[1.0], [2.0], [4.0]], x_max=1.0))
    +
    +
    +class RequantizeOpTest(test_util.TensorFlowTestCase):
    +
    +  @test_util.run_in_graph_and_eager_modes
    +  def test_invalid_inputs(self):
    +    inputs = constant_op.constant(
    +        np.int32(0), shape=[3, 3, 3, 3], dtype=dtypes.qint32)
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          math_ops.requantize(
    +              input=inputs,
    +              input_min=[],
    +              input_max=1.0,
    +              requested_output_min=0.0,
    +              requested_output_max=1.0,
    +              out_type=dtypes.qint8))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          math_ops.requantize(
    +              input=inputs,
    +              input_min=0.0,
    +              input_max=[],
    +              requested_output_min=0.0,
    +              requested_output_max=1.0,
    +              out_type=dtypes.qint8))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          math_ops.requantize(
    +              input=inputs,
    +              input_min=0.0,
    +              input_max=1.0,
    +              requested_output_min=[],
    +              requested_output_max=1.0,
    +              out_type=dtypes.qint8))
    +
    +    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
    +                                "must be rank 0"):
    +      self.evaluate(
    +          math_ops.requantize(
    +              input=inputs,
    +              input_min=0.0,
    +              input_max=1.0,
    +              requested_output_min=0.0,
    +              requested_output_max=[],
    +              out_type=dtypes.qint8))
    +
    +
    +if __name__ == "__main__":
    +  googletest.main()
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

5

News mentions

0

No linked articles in our index yet.