Out of bounds write in TFLite
Description
Tensorflow is an Open Source Machine Learning Framework. An attacker can craft a TFLite model that would cause a write outside of bounds of an array in TFLite. In fact, the attacker can override the linked list used by the memory allocator. This can be leveraged for an arbitrary write primitive under certain conditions. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
Affected packages
Versions sourced from the GitHub Security Advisory.
| Package | Affected versions | Patched versions |
|---|---|---|
tensorflowPyPI | < 2.5.3 | 2.5.3 |
tensorflowPyPI | >= 2.6.0, < 2.6.3 | 2.6.3 |
tensorflowPyPI | >= 2.7.0, < 2.7.1 | 2.7.1 |
tensorflow-cpuPyPI | < 2.5.3 | 2.5.3 |
tensorflow-cpuPyPI | >= 2.6.0, < 2.6.3 | 2.6.3 |
tensorflow-cpuPyPI | >= 2.7.0, < 2.7.1 | 2.7.1 |
tensorflow-gpuPyPI | < 2.5.3 | 2.5.3 |
tensorflow-gpuPyPI | >= 2.6.0, < 2.6.3 | 2.6.3 |
tensorflow-gpuPyPI | >= 2.7.0, < 2.7.1 | 2.7.1 |
Affected products
1- Range: >= 2.7.0, < 2.7.1
Patches
16c0b2b70eeee[lite] add validation check for sparse fully connected
1 file changed · +48 −10
tensorflow/lite/kernels/fully_connected.cc+48 −10 modified@@ -928,6 +928,36 @@ TfLiteStatus EvalShuffledQuantized(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } +// Verifies that sparsity values are valid given input/weight/output. +bool VerifySparsity(const RuntimeShape& weights_shape, + const RuntimeShape& input_shape, + const RuntimeShape& output_shape, + const TfLiteSparsity* sparsity) { + const int weights_dims_count = weights_shape.DimensionsCount(); + const int output_dims_count = output_shape.DimensionsCount(); + const int w0_size = sparsity->dim_metadata[0].dense_size; + const int accum_depth = weights_shape.Dims(weights_dims_count - 1); + const int output_elements = output_shape.FlatSize(); + const int input_elements = input_shape.FlatSize(); + const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1); + const int output_depth = MatchingDim(weights_shape, weights_dims_count - 2, + output_shape, output_dims_count - 1); + const int max_batch_index = batches - 1; + const int max_output = max_batch_index * output_depth + w0_size; + const int max_batch_depth = accum_depth * max_batch_index; + + // Verify output size is enough. + if (output_elements < max_output) return false; + + // Verify index from sparse in input is valid. + for (int i = 0; i < sparsity->dim_metadata[1].array_indices->size; ++i) { + if (input_elements <= + max_batch_depth + sparsity->dim_metadata[1].array_indices->data[i]) + return false; + } + return true; +} + template <KernelType kernel_type> TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteFullyConnectedParams* params, OpData* data, @@ -968,24 +998,32 @@ TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, "Unsupported sparse fully-connected weight format."); return kTfLiteError; } + const auto& input_shape = GetTensorShape(input); + const auto& filter_shape = GetTensorShape(filter); + const auto& output_shape = GetTensorShape(output); + const auto& bias_shape = GetTensorShape(bias); + if (!VerifySparsity(filter_shape, input_shape, output_shape, &sparsity)) { + TF_LITE_KERNEL_LOG(context, "Invalid sparse fully-connected format."); + return kTfLiteError; + } if (sparsity.dim_metadata_size == kDimMetadataSizeRandomSparse) { // Random sparse. optimized_ops::FullyConnectedSparseWeight( - sparsity, op_params, GetTensorShape(input), - GetTensorData<float>(input), GetTensorShape(filter), - GetTensorData<float>(filter), GetTensorShape(bias), - GetTensorData<float>(bias), GetTensorShape(output), - GetTensorData<float>(output)); + sparsity, op_params, // Disable formatting + input_shape, GetTensorData<float>(input), // Disable formatting + filter_shape, GetTensorData<float>(filter), // Disable formatting + bias_shape, GetTensorData<float>(bias), // Disable formatting + output_shape, GetTensorData<float>(output)); } else if (sparsity.dim_metadata_size == kDimMetadataSizeBlockSparse && sparsity.dim_metadata[2].dense_size == 4) { // Block sparse with block size of 1x4. optimized_ops::FullyConnectedSparseWeight1x4( - sparsity, op_params, GetTensorShape(input), - GetTensorData<float>(input), GetTensorShape(filter), - GetTensorData<float>(filter), GetTensorShape(bias), - GetTensorData<float>(bias), GetTensorShape(output), - GetTensorData<float>(output), + sparsity, op_params, // Disable formatting + input_shape, GetTensorData<float>(input), // Disable formatting + filter_shape, GetTensorData<float>(filter), // Disable formatting + bias_shape, GetTensorData<float>(bias), // Disable formatting + output_shape, GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); } else { TF_LITE_KERNEL_LOG(context,
Vulnerability mechanics
Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.
References
6- github.com/advisories/GHSA-9c78-vcq7-7vxqghsaADVISORY
- nvd.nist.gov/vuln/detail/CVE-2022-23561ghsaADVISORY
- github.com/pypa/advisory-database/tree/main/vulns/tensorflow-cpu/PYSEC-2022-70.yamlghsaWEB
- github.com/pypa/advisory-database/tree/main/vulns/tensorflow-gpu/PYSEC-2022-125.yamlghsaWEB
- github.com/tensorflow/tensorflow/commit/6c0b2b70eeee588591680f5b7d5d38175fd7cdf6ghsax_refsource_MISCWEB
- github.com/tensorflow/tensorflow/security/advisories/GHSA-9c78-vcq7-7vxqghsax_refsource_CONFIRMWEB
News mentions
0No linked articles in our index yet.