Access to invalid memory during shape inference in `Cudnn*` ops
Description
TensorFlow is an open source platform for machine learning. In affected versions the shape inference code for the Cudnn* operations in TensorFlow can be tricked into accessing invalid memory, via a heap buffer overflow. This occurs because the ranks of the input, input_h and input_c parameters are not validated, but code assumes they have certain values. The fix will be included in TensorFlow 2.7.0. We will also cherrypick this commit on TensorFlow 2.6.1, TensorFlow 2.5.2, and TensorFlow 2.4.4, as these are also affected and still in supported range.
Affected packages
Versions sourced from the GitHub Security Advisory.
| Package | Affected versions | Patched versions |
|---|---|---|
tensorflowPyPI | >= 2.6.0, < 2.6.1 | 2.6.1 |
tensorflowPyPI | >= 2.5.0, < 2.5.2 | 2.5.2 |
tensorflowPyPI | < 2.4.4 | 2.4.4 |
tensorflow-cpuPyPI | >= 2.6.0, < 2.6.1 | 2.6.1 |
tensorflow-cpuPyPI | >= 2.5.0, < 2.5.2 | 2.5.2 |
tensorflow-cpuPyPI | < 2.4.4 | 2.4.4 |
tensorflow-gpuPyPI | >= 2.6.0, < 2.6.1 | 2.6.1 |
tensorflow-gpuPyPI | >= 2.5.0, < 2.5.2 | 2.5.2 |
tensorflow-gpuPyPI | < 2.4.4 | 2.4.4 |
Affected products
1- Range: >= 2.6.0, < 2.6.1
Patches
1af5fcebb37c8Fix access to undefined memory during shape inference of Cudnn*.
2 files changed · +77 −0
tensorflow/core/ops/cudnn_rnn_ops.cc+21 −0 modified@@ -81,11 +81,17 @@ REGISTER_OP("CudnnRNN") .Attr("seed2: int = 0") .Attr("is_training: bool = true") .SetShapeFn([](InferenceContext* c) { + ShapeHandle unused; auto input_shape = c->input(0); auto input_h_shape = c->input(1); + TF_RETURN_IF_ERROR(c->WithRank(input_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(input_h_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); + auto seq_length = c->Dim(input_shape, 0); auto batch_size = c->Dim(input_shape, 1); auto num_units = c->Dim(input_h_shape, 2); + string direction; TF_RETURN_IF_ERROR(c->GetAttr("direction", &direction)); string rnn_mode; @@ -124,8 +130,13 @@ REGISTER_OP("CudnnRNNV2") .Attr("seed2: int = 0") .Attr("is_training: bool = true") .SetShapeFn([](InferenceContext* c) { + ShapeHandle unused; auto input_shape = c->input(0); auto input_h_shape = c->input(1); + TF_RETURN_IF_ERROR(c->WithRank(input_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(input_h_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); + auto seq_length = c->Dim(input_shape, 0); auto batch_size = c->Dim(input_shape, 1); auto num_units = c->Dim(input_h_shape, 2); @@ -171,16 +182,26 @@ REGISTER_OP("CudnnRNNV3") .Attr("is_training: bool = true") .Attr("time_major: bool = true") .SetShapeFn([](InferenceContext* c) { + ShapeHandle unused; auto input_shape = c->input(0); auto input_h_shape = c->input(1); auto input_c_shape = c->input(2); + TF_RETURN_IF_ERROR(c->WithRank(input_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(input_h_shape, 3, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 1, &unused)); + auto max_seq_length = c->Dim(input_shape, 0); auto batch_size = c->Dim(input_shape, 1); auto num_units = c->Dim(input_h_shape, 2); + string direction; TF_RETURN_IF_ERROR(c->GetAttr("direction", &direction)); string rnn_mode; TF_RETURN_IF_ERROR(c->GetAttr("rnn_mode", &rnn_mode)); + if (rnn_mode == "lstm") { + TF_RETURN_IF_ERROR(c->WithRank(input_c_shape, 3, &unused)); + } int dir_count = (direction == "bidirectional") ? 2 : 1; DimensionHandle output_size; TF_RETURN_IF_ERROR(c->Multiply(num_units, dir_count, &output_size));
tensorflow/core/ops/cudnn_rnn_ops_test.cc+56 −0 modified@@ -68,6 +68,11 @@ TEST(CudnnRNNOpsTest, ForwardLstm_ShapeFn) { .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); + INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?]"); + INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?]"); + // Disabled because the kernel does not check shape of input_c. + // INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[?,?,?];[?];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[]"); } TEST(CudnnRNNOpsTest, ForwardV2Lstm_ShapeFn) { @@ -100,6 +105,11 @@ TEST(CudnnRNNOpsTest, ForwardV2Lstm_ShapeFn) { .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); + INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?]"); + INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?]"); + // Disabled because the kernel does not check shape of input_c. + // INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[?,?,?];[?];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[]"); } TEST(CudnnRNNOpsTest, ForwardV3Lstm_ShapeFn) { @@ -137,6 +147,52 @@ TEST(CudnnRNNOpsTest, ForwardV3Lstm_ShapeFn) { .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); + INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?];[?]"); + INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?];[?]"); + INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[?,?,?];[];[?];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[?];[]"); +} + +TEST(CudnnRNNOpsTest, ForwardV3Gru) { + int max_seq_length = 2; + int batch_size = 3; + int num_units = 4; + int num_layers = 5; + int dir_count = 1; + std::vector<int> input_shape = {max_seq_length, batch_size, num_units}; + std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, + num_units}; + std::vector<int> input_c_shape = {num_layers * dir_count, batch_size, + num_units}; + std::vector<int> output_shape = {max_seq_length, batch_size, + num_units * dir_count}; + std::vector<int> seq_lengths_shape = {batch_size}; + auto shape_to_str = [](const std::vector<int>& v) { + return strings::StrCat("[", absl::StrJoin(v, ","), "]"); + }; + string input_shapes_desc = strings::StrCat( + shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", + shape_to_str(input_c_shape), ";", "[?]", ";", + shape_to_str(seq_lengths_shape)); + string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;[];?;?"; + + ShapeInferenceTestOp op("CudnnRNNV3"); + TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV3") + .Input({"input", 0, DT_FLOAT}) + .Input({"input_h", 0, DT_FLOAT}) + .Input({"input_c", 0, DT_FLOAT}) + .Input({"params", 0, DT_FLOAT}) + .Input({"sequence_lengths", 0, DT_INT32}) + .Attr("rnn_mode", "gru") + .Attr("input_mode", "auto_select") + .Attr("direction", "unidirectional") + .Finalize(&op.node_def)); + INFER_OK(op, input_shapes_desc, output_shapes_desc); + INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[];[?];[?]"); + INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[];[?];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[];[];[?]"); + INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[];[?];[]"); } } // end namespace tensorflow
Vulnerability mechanics
Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.
References
7- github.com/advisories/GHSA-cqv6-3phm-hcwxghsaADVISORY
- nvd.nist.gov/vuln/detail/CVE-2021-41221ghsaADVISORY
- github.com/pypa/advisory-database/tree/main/vulns/tensorflow-cpu/PYSEC-2021-630.yamlghsaWEB
- github.com/pypa/advisory-database/tree/main/vulns/tensorflow-gpu/PYSEC-2021-828.yamlghsaWEB
- github.com/pypa/advisory-database/tree/main/vulns/tensorflow/PYSEC-2021-413.yamlghsaWEB
- github.com/tensorflow/tensorflow/commit/af5fcebb37c8b5d71c237f4e59c6477015c78ce6ghsax_refsource_MISCWEB
- github.com/tensorflow/tensorflow/security/advisories/GHSA-cqv6-3phm-hcwxghsax_refsource_CONFIRMWEB
News mentions
0No linked articles in our index yet.