VYPR
Moderate severityNVD Advisory· Published Aug 12, 2021· Updated Nov 13, 2024

Infinite loop in TensorFlow Lite

CVE-2021-37686

Description

TensorFlow is an end-to-end open source platform for machine learning. In affected versions the strided slice implementation in TFLite has a logic bug which can allow an attacker to trigger an infinite loop. This arises from newly introduced support for ellipsis in axis definition. An attacker can craft a model such that ellipsis_end_idx is smaller than i (e.g., always negative). In this case, the inner loop does not increase i and the continue statement causes execution to skip over the preincrement at the end of the outer loop. We have patched the issue in GitHub commit dfa22b348b70bb89d6d6ec0ff53973bacb4f4695. TensorFlow 2.6.0 is the only affected version.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
tensorflowPyPI
>= 2.6.0rc0, < 2.6.0rc22.6.0rc2
tensorflow-cpuPyPI
>= 2.6.0rc0, < 2.6.0rc22.6.0rc2
tensorflow-gpuPyPI
>= 2.6.0rc0, < 2.6.0rc22.6.0rc2

Affected products

1

Patches

1
dfa22b348b70

Prevent a division by 0 in average ops.

https://github.com/tensorflow/tensorflowMihai MaruseacJul 16, 2021via ghsa
8 files changed · +165 132
  • tensorflow/lite/kernels/internal/averagepool_quantized_test.cc+8 6 modified
    @@ -40,12 +40,14 @@ void RunOneAveragePoolTest(const PoolParams& params,
       std::vector<int8> optimized_averagePool_output(buffer_size);
       std::vector<int8> reference_averagePool_output(buffer_size);
     
    -  reference_integer_ops::AveragePool(params, input_shape, input_data,
    -                                     output_shape,
    -                                     reference_averagePool_output.data());
    -  optimized_integer_ops::AveragePool(params, input_shape, input_data,
    -                                     output_shape,
    -                                     optimized_averagePool_output.data());
    +  bool reference_success = reference_integer_ops::AveragePool(
    +      params, input_shape, input_data, output_shape,
    +      reference_averagePool_output.data());
    +  bool optimized_success = optimized_integer_ops::AveragePool(
    +      params, input_shape, input_data, output_shape,
    +      optimized_averagePool_output.data());
    +  EXPECT_TRUE(reference_success);
    +  EXPECT_TRUE(optimized_success);
     
       for (int i = 0; i < buffer_size; i++) {
         EXPECT_TRUE(reference_averagePool_output[i] ==
    
  • tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h+3 1 modified
    @@ -144,7 +144,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
       }
     }
     
    -inline void AveragePool(const PoolParams& params,
    +inline bool AveragePool(const PoolParams& params,
                             const RuntimeShape& input_shape, const int8* input_data,
                             const RuntimeShape& output_shape, int8* output_data) {
       ruy::profiler::ScopeLabel label("AveragePool/8bitWith32bitAccumulator");
    @@ -192,6 +192,7 @@ inline void AveragePool(const PoolParams& params,
                   std::min(params.filter_height, input_height - in_y_origin);
               const int filter_count =
                   (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
    +          if (filter_count == 0) return false;
               memset(acc, 0, tranche_depth * sizeof(acc[0]));
               const int8* input_ptr =
                   input_data + depth_base +
    @@ -267,6 +268,7 @@ inline void AveragePool(const PoolParams& params,
           }
         }
       }
    +  return true;
     }
     
     }  // namespace optimized_integer_ops
    
  • tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h+25 21 modified
    @@ -3761,7 +3761,7 @@ inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims,
                    output_data, output_dims);
     }
     
    -inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
    +inline bool AveragePool(const float* input_data, const Dims<4>& input_dims,
                             int stride_width, int stride_height, int pad_width,
                             int pad_height, int kwidth, int kheight,
                             float output_activation_min,
    @@ -3776,35 +3776,37 @@ inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
       params.padding_values.width = pad_width;
       params.float_activation_min = output_activation_min;
       params.float_activation_max = output_activation_max;
    -  AveragePool(params, DimsToShape(input_dims), input_data,
    -              DimsToShape(output_dims), output_data);
    +  return AveragePool(params, DimsToShape(input_dims), input_data,
    +                     DimsToShape(output_dims), output_data);
     }
     
     // legacy, for compatibility with old checked-in code
     template <FusedActivationFunctionType Ac>
    -void AveragePool(const float* input_data, const Dims<4>& input_dims,
    +bool AveragePool(const float* input_data, const Dims<4>& input_dims,
                      int stride_width, int stride_height, int pad_width,
                      int pad_height, int kwidth, int kheight, float* output_data,
                      const Dims<4>& output_dims) {
       float output_activation_min, output_activation_max;
       GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
     
    -  AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
    -              pad_height, kwidth, kheight, output_activation_min,
    -              output_activation_max, output_data, output_dims);
    +  return AveragePool(input_data, input_dims, stride_width, stride_height,
    +                     pad_width, pad_height, kwidth, kheight,
    +                     output_activation_min, output_activation_max, output_data,
    +                     output_dims);
     }
     
     // legacy, for compatibility with old checked-in code
     template <FusedActivationFunctionType Ac>
    -void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride,
    +bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride,
                      int pad_width, int pad_height, int filter_width,
                      int filter_height, float* output_data,
                      const Dims<4>& output_dims) {
    -  AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
    -                  filter_width, filter_height, output_data, output_dims);
    +  return AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width,
    +                         pad_height, filter_width, filter_height, output_data,
    +                         output_dims);
     }
     
    -inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
    +inline bool AveragePool(const uint8* input_data, const Dims<4>& input_dims,
                             int stride_width, int stride_height, int pad_width,
                             int pad_height, int filter_width, int filter_height,
                             int32 output_activation_min,
    @@ -3819,13 +3821,13 @@ inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
       params.padding_values.width = pad_width;
       params.quantized_activation_min = output_activation_min;
       params.quantized_activation_max = output_activation_max;
    -  AveragePool(params, DimsToShape(input_dims), input_data,
    -              DimsToShape(output_dims), output_data);
    +  return AveragePool(params, DimsToShape(input_dims), input_data,
    +                     DimsToShape(output_dims), output_data);
     }
     
     // legacy, for compatibility with old checked-in code
     template <FusedActivationFunctionType Ac>
    -void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
    +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims,
                      int stride_width, int stride_height, int pad_width,
                      int pad_height, int filter_width, int filter_height,
                      int32 output_activation_min, int32 output_activation_max,
    @@ -3839,21 +3841,23 @@ void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
         TFLITE_DCHECK_EQ(output_activation_min, 0);
         TFLITE_DCHECK_EQ(output_activation_max, 255);
       }
    -  AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
    -              pad_height, filter_width, filter_height, output_activation_min,
    -              output_activation_max, output_data, output_dims);
    +  return AveragePool(input_data, input_dims, stride_width, stride_height,
    +                     pad_width, pad_height, filter_width, filter_height,
    +                     output_activation_min, output_activation_max, output_data,
    +                     output_dims);
     }
     
     // legacy, for compatibility with old checked-in code
     template <FusedActivationFunctionType Ac>
    -void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
    +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
                      int pad_width, int pad_height, int filter_width,
                      int filter_height, int32 output_activation_min,
                      int32 output_activation_max, uint8* output_data,
                      const Dims<4>& output_dims) {
    -  AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
    -                  filter_width, filter_height, output_activation_min,
    -                  output_activation_max, output_data, output_dims);
    +  return AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width,
    +                         pad_height, filter_width, filter_height,
    +                         output_activation_min, output_activation_max,
    +                         output_data, output_dims);
     }
     
     inline void MaxPool(const float* input_data, const Dims<4>& input_dims,
    
  • tensorflow/lite/kernels/internal/optimized/optimized_ops.h+9 2 modified
    @@ -3172,7 +3172,7 @@ inline int NodeOffset(int b, int h, int w, int height, int width) {
       return (b * height + h) * width + w;
     }
     
    -inline void AveragePool(const PoolParams& params,
    +inline bool AveragePool(const PoolParams& params,
                             const RuntimeShape& input_shape,
                             const float* input_data,
                             const RuntimeShape& output_shape, float* output_data) {
    @@ -3187,6 +3187,9 @@ inline void AveragePool(const PoolParams& params,
       const int stride_height = params.stride_height;
       const int stride_width = params.stride_width;
     
    +  if (stride_height == 0) return false;
    +  if (stride_width == 0) return false;
    +
       // TODO(benoitjacob) make this a proper reference impl without Eigen!
       const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
       auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
    @@ -3232,9 +3235,11 @@ inline void AveragePool(const PoolParams& params,
                                                       params.float_activation_min,
                                                       params.float_activation_max);
       }
    +
    +  return true;
     }
     
    -inline void AveragePool(const PoolParams& params,
    +inline bool AveragePool(const PoolParams& params,
                             const RuntimeShape& input_shape,
                             const uint8* input_data,
                             const RuntimeShape& output_shape, uint8* output_data) {
    @@ -3283,6 +3288,7 @@ inline void AveragePool(const PoolParams& params,
                   std::min(params.filter_height, input_height - in_y_origin);
               const int filter_count =
                   (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
    +          if (filter_count == 0) return false;
               memset(acc, 0, tranche_depth * sizeof(acc[0]));
               const uint8* input_ptr =
                   input_data + depth_base +
    @@ -3369,6 +3375,7 @@ inline void AveragePool(const PoolParams& params,
           }
         }
       }
    +  return true;
     }
     
     inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
    
  • tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h+6 2 modified
    @@ -21,7 +21,7 @@ limitations under the License.
     namespace tflite {
     namespace reference_integer_ops {
     
    -inline void AveragePool(const PoolParams& params,
    +inline bool AveragePool(const PoolParams& params,
                             const RuntimeShape& input_shape,
                             const int8_t* input_data,
                             const RuntimeShape& output_shape, int8_t* output_data) {
    @@ -66,6 +66,7 @@ inline void AveragePool(const PoolParams& params,
                   filter_count++;
                 }
               }
    +          if (filter_count == 0) return false;
               // Round to the closest integer value.
               acc = acc > 0 ? (acc + filter_count / 2) / filter_count
                             : (acc - filter_count / 2) / filter_count;
    @@ -77,6 +78,7 @@ inline void AveragePool(const PoolParams& params,
           }
         }
       }
    +  return true;
     }
     
     inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
    @@ -136,7 +138,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
       }
     }
     
    -inline void AveragePool(const PoolParams& params,
    +inline bool AveragePool(const PoolParams& params,
                             const RuntimeShape& input_shape,
                             const int16_t* input_data,
                             const RuntimeShape& output_shape,
    @@ -182,6 +184,7 @@ inline void AveragePool(const PoolParams& params,
                   filter_count++;
                 }
               }
    +          if (filter_count == 0) return false;
               // Round to the closest integer value.
               acc = acc > 0 ? (acc + filter_count / 2) / filter_count
                             : (acc - filter_count / 2) / filter_count;
    @@ -193,6 +196,7 @@ inline void AveragePool(const PoolParams& params,
           }
         }
       }
    +  return true;
     }
     
     inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
    
  • tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h+25 21 modified
    @@ -1487,7 +1487,7 @@ void Sub(const T* input1_data, const Dims<4>& input1_dims, const T* input2_data,
           output_data);
     }
     
    -inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
    +inline bool AveragePool(const float* input_data, const Dims<4>& input_dims,
                             int stride_width, int stride_height, int pad_width,
                             int pad_height, int kwidth, int kheight,
                             float output_activation_min,
    @@ -1502,8 +1502,8 @@ inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
       params.padding_values.width = pad_width;
       params.float_activation_min = output_activation_min;
       params.float_activation_max = output_activation_max;
    -  AveragePool(params, DimsToShape(input_dims), input_data,
    -              DimsToShape(output_dims), output_data);
    +  return AveragePool(params, DimsToShape(input_dims), input_data,
    +                     DimsToShape(output_dims), output_data);
     }
     
     // Transitional version that will be moved shortly to legacy_reference_ops, as
    @@ -1562,29 +1562,31 @@ inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims,
     
     // legacy, for compatibility with old checked-in code
     template <FusedActivationFunctionType Ac>
    -void AveragePool(const float* input_data, const Dims<4>& input_dims,
    +bool AveragePool(const float* input_data, const Dims<4>& input_dims,
                      int stride_width, int stride_height, int pad_width,
                      int pad_height, int kwidth, int kheight, float* output_data,
                      const Dims<4>& output_dims) {
       float output_activation_min, output_activation_max;
       GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
     
    -  AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
    -              pad_height, kwidth, kheight, output_activation_min,
    -              output_activation_max, output_data, output_dims);
    +  return AveragePool(input_data, input_dims, stride_width, stride_height,
    +                     pad_width, pad_height, kwidth, kheight,
    +                     output_activation_min, output_activation_max, output_data,
    +                     output_dims);
     }
     
     // legacy, for compatibility with old checked-in code
     template <FusedActivationFunctionType Ac>
    -void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride,
    +bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride,
                      int pad_width, int pad_height, int filter_width,
                      int filter_height, float* output_data,
                      const Dims<4>& output_dims) {
    -  AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
    -                  filter_width, filter_height, output_data, output_dims);
    +  return AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width,
    +                         pad_height, filter_width, filter_height, output_data,
    +                         output_dims);
     }
     
    -inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
    +inline bool AveragePool(const uint8* input_data, const Dims<4>& input_dims,
                             int stride_width, int stride_height, int pad_width,
                             int pad_height, int filter_width, int filter_height,
                             int32 output_activation_min,
    @@ -1599,13 +1601,13 @@ inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
       params.padding_values.width = pad_width;
       params.quantized_activation_min = output_activation_min;
       params.quantized_activation_max = output_activation_max;
    -  AveragePool(params, DimsToShape(input_dims), input_data,
    -              DimsToShape(output_dims), output_data);
    +  return AveragePool(params, DimsToShape(input_dims), input_data,
    +                     DimsToShape(output_dims), output_data);
     }
     
     // legacy, for compatibility with old checked-in code
     template <FusedActivationFunctionType Ac>
    -void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
    +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims,
                      int stride_width, int stride_height, int pad_width,
                      int pad_height, int filter_width, int filter_height,
                      int32 output_activation_min, int32 output_activation_max,
    @@ -1619,21 +1621,23 @@ void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
         TFLITE_DCHECK_EQ(output_activation_min, 0);
         TFLITE_DCHECK_EQ(output_activation_max, 255);
       }
    -  AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
    -              pad_height, filter_width, filter_height, output_activation_min,
    -              output_activation_max, output_data, output_dims);
    +  return AveragePool(input_data, input_dims, stride_width, stride_height,
    +                     pad_width, pad_height, filter_width, filter_height,
    +                     output_activation_min, output_activation_max, output_data,
    +                     output_dims);
     }
     
     // legacy, for compatibility with old checked-in code
     template <FusedActivationFunctionType Ac>
    -void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
    +bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
                      int pad_width, int pad_height, int filter_width,
                      int filter_height, int32 output_activation_min,
                      int32 output_activation_max, uint8* output_data,
                      const Dims<4>& output_dims) {
    -  AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
    -                  filter_width, filter_height, output_activation_min,
    -                  output_activation_max, output_data, output_dims);
    +  return AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width,
    +                         pad_height, filter_width, filter_height,
    +                         output_activation_min, output_activation_max,
    +                         output_data, output_dims);
     }
     
     inline void MaxPool(const float* input_data, const Dims<4>& input_dims,
    
  • tensorflow/lite/kernels/internal/reference/pooling.h+6 2 modified
    @@ -23,7 +23,7 @@ limitations under the License.
     namespace tflite {
     namespace reference_ops {
     
    -inline void AveragePool(const PoolParams& params,
    +inline bool AveragePool(const PoolParams& params,
                             const RuntimeShape& input_shape,
                             const float* input_data,
                             const RuntimeShape& output_shape, float* output_data) {
    @@ -66,6 +66,7 @@ inline void AveragePool(const PoolParams& params,
                   filter_count++;
                 }
               }
    +          if (filter_count == 0) return false;
               const float average = total / filter_count;
               output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
                   ActivationFunctionWithMinMax(average, params.float_activation_min,
    @@ -74,9 +75,10 @@ inline void AveragePool(const PoolParams& params,
           }
         }
       }
    +  return true;
     }
     
    -inline void AveragePool(const PoolParams& params,
    +inline bool AveragePool(const PoolParams& params,
                             const RuntimeShape& input_shape,
                             const uint8_t* input_data,
                             const RuntimeShape& output_shape,
    @@ -122,6 +124,7 @@ inline void AveragePool(const PoolParams& params,
                   filter_count++;
                 }
               }
    +          if (filter_count == 0) return false;
               acc = (acc + filter_count / 2) / filter_count;
               acc = std::max(acc, params.quantized_activation_min);
               acc = std::min(acc, params.quantized_activation_max);
    @@ -131,6 +134,7 @@ inline void AveragePool(const PoolParams& params,
           }
         }
       }
    +  return true;
     }
     
     inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape,
    
  • tensorflow/lite/kernels/pooling.cc+83 77 modified
    @@ -117,117 +117,126 @@ TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) {
     }
     
     template <KernelType kernel_type>
    -void AverageEvalFloat(TfLiteContext* context, TfLiteNode* node,
    -                      TfLitePoolParams* params, OpData* data,
    -                      const TfLiteTensor* input, TfLiteTensor* output) {
    +TfLiteStatus AverageEvalFloat(TfLiteContext* context, TfLiteNode* node,
    +                              TfLitePoolParams* params, OpData* data,
    +                              const TfLiteTensor* input, TfLiteTensor* output) {
       float activation_min, activation_max;
       CalculateActivationRange(params->activation, &activation_min,
                                &activation_max);
    -#define TF_LITE_AVERAGE_POOL(type)                                       \
    -  tflite::PoolParams op_params;                                          \
    -  op_params.stride_height = params->stride_height;                       \
    -  op_params.stride_width = params->stride_width;                         \
    -  op_params.filter_height = params->filter_height;                       \
    -  op_params.filter_width = params->filter_width;                         \
    -  op_params.padding_values.height = data->padding.height;                \
    -  op_params.padding_values.width = data->padding.width;                  \
    -  op_params.float_activation_min = activation_min;                       \
    -  op_params.float_activation_max = activation_max;                       \
    -  type::AveragePool(op_params, GetTensorShape(input),                    \
    -                    GetTensorData<float>(input), GetTensorShape(output), \
    -                    GetTensorData<float>(output))
    +#define TF_LITE_AVERAGE_POOL(type)                                            \
    +  tflite::PoolParams op_params;                                               \
    +  op_params.stride_height = params->stride_height;                            \
    +  op_params.stride_width = params->stride_width;                              \
    +  op_params.filter_height = params->filter_height;                            \
    +  op_params.filter_width = params->filter_width;                              \
    +  op_params.padding_values.height = data->padding.height;                     \
    +  op_params.padding_values.width = data->padding.width;                       \
    +  op_params.float_activation_min = activation_min;                            \
    +  op_params.float_activation_max = activation_max;                            \
    +  TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \
    +                                            GetTensorData<float>(input),      \
    +                                            GetTensorShape(output),           \
    +                                            GetTensorData<float>(output)))
       if (kernel_type == kReference) {
         TF_LITE_AVERAGE_POOL(reference_ops);
       } else {
         TF_LITE_AVERAGE_POOL(optimized_ops);
       }
     #undef TF_LITE_AVERAGE_POOL
    +  return kTfLiteOk;
     }
     
     template <KernelType kernel_type>
    -void AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node,
    -                               TfLitePoolParams* params, OpData* data,
    -                               const TfLiteTensor* input,
    -                               TfLiteTensor* output) {
    +TfLiteStatus AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node,
    +                                       TfLitePoolParams* params, OpData* data,
    +                                       const TfLiteTensor* input,
    +                                       TfLiteTensor* output) {
       int32_t activation_min;
       int32_t activation_max;
       (void)CalculateActivationRangeQuantized(context, params->activation, output,
                                               &activation_min, &activation_max);
    -#define TF_LITE_AVERAGE_POOL(type)                                         \
    -  tflite::PoolParams op_params;                                            \
    -  op_params.stride_height = params->stride_height;                         \
    -  op_params.stride_width = params->stride_width;                           \
    -  op_params.filter_height = params->filter_height;                         \
    -  op_params.filter_width = params->filter_width;                           \
    -  op_params.padding_values.height = data->padding.height;                  \
    -  op_params.padding_values.width = data->padding.width;                    \
    -  op_params.quantized_activation_min = activation_min;                     \
    -  op_params.quantized_activation_max = activation_max;                     \
    -  type::AveragePool(op_params, GetTensorShape(input),                      \
    -                    GetTensorData<uint8_t>(input), GetTensorShape(output), \
    -                    GetTensorData<uint8_t>(output))
    +#define TF_LITE_AVERAGE_POOL(type)                                            \
    +  tflite::PoolParams op_params;                                               \
    +  op_params.stride_height = params->stride_height;                            \
    +  op_params.stride_width = params->stride_width;                              \
    +  op_params.filter_height = params->filter_height;                            \
    +  op_params.filter_width = params->filter_width;                              \
    +  op_params.padding_values.height = data->padding.height;                     \
    +  op_params.padding_values.width = data->padding.width;                       \
    +  op_params.quantized_activation_min = activation_min;                        \
    +  op_params.quantized_activation_max = activation_max;                        \
    +  TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \
    +                                            GetTensorData<uint8_t>(input),    \
    +                                            GetTensorShape(output),           \
    +                                            GetTensorData<uint8_t>(output)))
       if (kernel_type == kReference) {
         TF_LITE_AVERAGE_POOL(reference_ops);
       } else {
         TF_LITE_AVERAGE_POOL(optimized_ops);
       }
     #undef TF_LITE_AVERAGE_POOL
    +  return kTfLiteOk;
     }
     
     template <KernelType kernel_type>
    -void AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
    -                              TfLitePoolParams* params, OpData* data,
    -                              const TfLiteTensor* input, TfLiteTensor* output) {
    +TfLiteStatus AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
    +                                      TfLitePoolParams* params, OpData* data,
    +                                      const TfLiteTensor* input,
    +                                      TfLiteTensor* output) {
       int32_t activation_min;
       int32_t activation_max;
     
       (void)CalculateActivationRangeQuantized(context, params->activation, output,
                                               &activation_min, &activation_max);
    -#define TF_LITE_AVERAGE_POOL(type)                                        \
    -  tflite::PoolParams op_params;                                           \
    -  op_params.stride_height = params->stride_height;                        \
    -  op_params.stride_width = params->stride_width;                          \
    -  op_params.filter_height = params->filter_height;                        \
    -  op_params.filter_width = params->filter_width;                          \
    -  op_params.padding_values.height = data->padding.height;                 \
    -  op_params.padding_values.width = data->padding.width;                   \
    -  op_params.quantized_activation_min = activation_min;                    \
    -  op_params.quantized_activation_max = activation_max;                    \
    -  type::AveragePool(op_params, GetTensorShape(input),                     \
    -                    GetTensorData<int8_t>(input), GetTensorShape(output), \
    -                    GetTensorData<int8_t>(output))
    +#define TF_LITE_AVERAGE_POOL(type)                                            \
    +  tflite::PoolParams op_params;                                               \
    +  op_params.stride_height = params->stride_height;                            \
    +  op_params.stride_width = params->stride_width;                              \
    +  op_params.filter_height = params->filter_height;                            \
    +  op_params.filter_width = params->filter_width;                              \
    +  op_params.padding_values.height = data->padding.height;                     \
    +  op_params.padding_values.width = data->padding.width;                       \
    +  op_params.quantized_activation_min = activation_min;                        \
    +  op_params.quantized_activation_max = activation_max;                        \
    +  TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \
    +                                            GetTensorData<int8_t>(input),     \
    +                                            GetTensorShape(output),           \
    +                                            GetTensorData<int8_t>(output)))
       if (kernel_type == kReference) {
         TF_LITE_AVERAGE_POOL(reference_integer_ops);
       } else {
         TF_LITE_AVERAGE_POOL(optimized_integer_ops);
       }
     #undef TF_LITE_AVERAGE_POOL
    +  return kTfLiteOk;
     }
     
     template <KernelType kernel_type>
    -void AverageEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node,
    -                               TfLitePoolParams* params, OpData* data,
    -                               const TfLiteTensor* input,
    -                               TfLiteTensor* output) {
    +TfLiteStatus AverageEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node,
    +                                       TfLitePoolParams* params, OpData* data,
    +                                       const TfLiteTensor* input,
    +                                       TfLiteTensor* output) {
       int32_t activation_min;
       int32_t activation_max;
       CalculateActivationRangeQuantized(context, params->activation, output,
                                         &activation_min, &activation_max);
    -#define TF_LITE_AVERAGE_POOL(type)                                         \
    -  tflite::PoolParams op_params;                                            \
    -  op_params.stride_height = params->stride_height;                         \
    -  op_params.stride_width = params->stride_width;                           \
    -  op_params.filter_height = params->filter_height;                         \
    -  op_params.filter_width = params->filter_width;                           \
    -  op_params.padding_values.height = data->padding.height;                  \
    -  op_params.padding_values.width = data->padding.width;                    \
    -  op_params.quantized_activation_min = activation_min;                     \
    -  op_params.quantized_activation_max = activation_max;                     \
    -  type::AveragePool(op_params, GetTensorShape(input),                      \
    -                    GetTensorData<int16_t>(input), GetTensorShape(output), \
    -                    GetTensorData<int16_t>(output))
    +#define TF_LITE_AVERAGE_POOL(type)                                            \
    +  tflite::PoolParams op_params;                                               \
    +  op_params.stride_height = params->stride_height;                            \
    +  op_params.stride_width = params->stride_width;                              \
    +  op_params.filter_height = params->filter_height;                            \
    +  op_params.filter_width = params->filter_width;                              \
    +  op_params.padding_values.height = data->padding.height;                     \
    +  op_params.padding_values.width = data->padding.width;                       \
    +  op_params.quantized_activation_min = activation_min;                        \
    +  op_params.quantized_activation_max = activation_max;                        \
    +  TF_LITE_ENSURE(context, type::AveragePool(op_params, GetTensorShape(input), \
    +                                            GetTensorData<int16_t>(input),    \
    +                                            GetTensorShape(output),           \
    +                                            GetTensorData<int16_t>(output)))
       TF_LITE_AVERAGE_POOL(reference_integer_ops);
     #undef TF_LITE_AVERAGE_POOL
    +  return kTfLiteOk;
     }
     
     template <KernelType kernel_type>
    @@ -380,20 +389,17 @@ TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) {
       TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
       switch (input->type) {  // Already know in/out types are same.
         case kTfLiteFloat32:
    -      AverageEvalFloat<kernel_type>(context, node, params, data, input, output);
    -      break;
    +      return AverageEvalFloat<kernel_type>(context, node, params, data, input,
    +                                           output);
         case kTfLiteUInt8:
    -      AverageEvalQuantizedUint8<kernel_type>(context, node, params, data, input,
    -                                             output);
    -      break;
    +      return AverageEvalQuantizedUint8<kernel_type>(context, node, params, data,
    +                                                    input, output);
         case kTfLiteInt8:
    -      AverageEvalQuantizedInt8<kernel_type>(context, node, params, data, input,
    -                                            output);
    -      break;
    +      return AverageEvalQuantizedInt8<kernel_type>(context, node, params, data,
    +                                                   input, output);
         case kTfLiteInt16:
    -      AverageEvalQuantizedInt16<kernel_type>(context, node, params, data, input,
    -                                             output);
    -      break;
    +      return AverageEvalQuantizedInt16<kernel_type>(context, node, params, data,
    +                                                    input, output);
         default:
           TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
                              TfLiteTypeGetName(input->type));
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

11

News mentions

0

No linked articles in our index yet.