Critical severity9.8OSV Advisory· Published Aug 7, 2025· Updated Apr 15, 2026
CVE-2025-54950
CVE-2025-54950
Description
An out-of-bounds access vulnerability in the loading of ExecuTorch models can cause the runtime to crash and potentially result in code execution or other undesirable effects. This issue affects ExecuTorch prior to commit b6b7a16df5e7852d976d8c34c8a7e9a1b6f7d005.
Affected packages
Versions sourced from the GitHub Security Advisory.
| Package | Affected versions | Patched versions |
|---|---|---|
executorchPyPI | < 0.7.0 | 0.7.0 |
org.pytorch:executorch-androidMaven | < 0.7.0 | 0.7.0 |
github.com/pytorch/executorchSwiftURL | < 0.7.0 | 0.7.0 |
Affected products
1- Range: ciflow/binaries/all/sdym, ciflow/binaries/sdym, stable-2023-08-01, …
Patches
2b6b7a16df5e7add safety check to prim kernels
4 files changed · +415 −20
kernels/prim_ops/et_copy_index.cpp+8 −1 modified@@ -65,7 +65,14 @@ constexpr size_t kTensorDimensionLimit = 16; // The output of each iteration (copy_from) is copied into the copy_to tensor at // the specified index. This operator is supported in both ATen and lean modes. void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 3, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)3, + stack.size()); SizesType expected_output_size[kTensorDimensionLimit]; auto copy_to = (*stack[0]).toTensor();
kernels/prim_ops/et_view.cpp+8 −1 modified@@ -66,7 +66,14 @@ bool get_view_target_size( } // namespace void et_view(KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 3, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)3, + stack.size()); auto self = (*stack[0]).toTensor(); auto size = (*stack[1]).toIntList();
kernels/prim_ops/register_prim_ops.cpp+133 −17 modified@@ -36,7 +36,6 @@ namespace { } #define __NUMBER_ET_PRIM_OP_IMPL(operator, stack, context) \ - (void)context; \ EValue& a = *stack[0]; \ EValue& b = *stack[1]; \ EValue& out = *stack[2]; \ @@ -50,11 +49,23 @@ namespace { out = EValue(a.toDouble() operator b.toInt()); \ } +#define __ET_PRIM_OP_NUM_ARGS_CHECK_IMPL(stack, context) \ + ET_KERNEL_CHECK_MSG( \ + context, \ + stack.size() == 3, \ + InvalidProgram, \ + /* void */, \ + "Expected %zu args, got %zu", \ + (size_t)3, \ + stack.size()); + #define ALGEBRA_ET_PRIM_OP(operator, stack, context) \ + __ET_PRIM_OP_NUM_ARGS_CHECK_IMPL(stack, context) \ __NUMBER_ET_PRIM_OP_IMPL(operator, stack, context) \ __ET_PRIM_OP_ERROR_IMPL(a, b, context) #define BOOLEAN_ET_PRIM_OP(operator, stack, context) \ + __ET_PRIM_OP_NUM_ARGS_CHECK_IMPL(stack, context) \ __NUMBER_ET_PRIM_OP_IMPL(operator, stack, context) \ else if (a.isBool() && b.isBool()) { \ out = EValue(a.toBool() operator b.toBool()); \ @@ -80,7 +91,14 @@ static Kernel prim_ops[] = { Kernel( "aten::sym_size.int", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 3, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)3, + stack.size()); EValue& self = *stack[0]; EValue& dim = *stack[1]; EValue& out = *stack[2]; @@ -94,7 +112,14 @@ static Kernel prim_ops[] = { Kernel( "aten::_local_scalar_dense", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 2, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)2, + stack.size()); EValue& self = *stack[0]; EValue& out = *stack[1]; executorch::aten::Tensor self_tensor = @@ -113,7 +138,14 @@ static Kernel prim_ops[] = { Kernel( "aten::sym_numel", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 2, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)2, + stack.size()); EValue& self = *stack[0]; EValue& out = *stack[1]; executorch::aten::Tensor self_tensor = @@ -125,7 +157,15 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::sym_max.Scalar", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 3, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)3, + stack.size()); + EValue& a = *stack[0]; EValue& b = *stack[1]; EValue& out = *stack[2]; @@ -146,7 +186,14 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::sym_min.Scalar", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 3, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)3, + stack.size()); EValue& a = *stack[0]; EValue& b = *stack[1]; EValue& out = *stack[2]; @@ -167,7 +214,6 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::add.Scalar", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; ALGEBRA_ET_PRIM_OP(+, stack, context); }), @@ -197,7 +243,14 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::floordiv.Scalar", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 3, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)3, + stack.size()); EValue& a = *stack[0]; EValue& b = *stack[1]; EValue& out = *stack[2]; @@ -233,7 +286,14 @@ static Kernel prim_ops[] = { "executorch_prim::truediv.Scalar", [](KernelRuntimeContext& context, Span<EValue*> stack) { // can't use macro because of custom casting behavior - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 3, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)3, + stack.size()); EValue& a = *stack[0]; EValue& b = *stack[1]; EValue& out = *stack[2]; @@ -266,7 +326,14 @@ static Kernel prim_ops[] = { // can't use macro because of custom casting behavior // TODO: Now that we are reliably generating conversion operators, // we can remove the mixed type handling for other operators - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 2, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)2, + stack.size()); EValue& a = *stack[0]; EValue& out = *stack[1]; if (a.isInt()) { @@ -318,7 +385,14 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::neg.Scalar", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 2, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)2, + stack.size()); EValue& a = *stack[0]; EValue& out = *stack[1]; if (a.isInt()) { @@ -335,7 +409,14 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::floordiv.int", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 3, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)3, + stack.size()); EValue& a = *stack[0]; EValue& b = *stack[1]; EValue& out = *stack[2]; @@ -346,7 +427,14 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::mod.int", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 3, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)3, + stack.size()); EValue& a = *stack[0]; EValue& b = *stack[1]; EValue& out = *stack[2]; @@ -357,7 +445,14 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::mod.Scalar", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 3, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)3, + stack.size()); EValue& a = *stack[0]; EValue& b = *stack[1]; EValue& out = *stack[2]; @@ -379,7 +474,14 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::ceil.Scalar", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 2, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)2, + stack.size()); EValue& a = *stack[0]; EValue& out = *stack[1]; if (a.isDouble()) { @@ -399,7 +501,14 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::round.Scalar", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 2, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)2, + stack.size()); EValue& a = *stack[0]; EValue& out = *stack[1]; if (a.isDouble()) { @@ -436,7 +545,14 @@ static Kernel prim_ops[] = { Kernel( "executorch_prim::trunc.Scalar", [](KernelRuntimeContext& context, Span<EValue*> stack) { - (void)context; + ET_KERNEL_CHECK_MSG( + context, + stack.size() == 2, + InvalidProgram, + /* void */, + "Expected %zu args, got %zu", + (size_t)2, + stack.size()); EValue& a = *stack[0]; EValue& out = *stack[1]; if (a.isDouble()) {
kernels/prim_ops/test/prim_ops_test.cpp+266 −1 modified@@ -179,6 +179,8 @@ TEST_F(RegisterPrimOpsTest, TestAlgebraOps) { stack[i] = &values[i]; } + EValue* stack2[2] = {&values[0], &values[1]}; + getOpsFn("executorch_prim::add.Scalar")(context_, stack); EXPECT_EQ(stack[2]->toInt(), 7); @@ -200,7 +202,7 @@ TEST_F(RegisterPrimOpsTest, TestAlgebraOps) { getOpsFn("executorch_prim::mod.Scalar")(context_, stack); EXPECT_EQ(stack[2]->toInt(), 3); - getOpsFn("executorch_prim::sym_float.Scalar")(context_, stack); + getOpsFn("executorch_prim::sym_float.Scalar")(context_, stack2); EXPECT_FLOAT_EQ(stack[1]->toDouble(), 3.0); } @@ -648,5 +650,268 @@ TEST_F(RegisterPrimOpsTest, TestTrunc) { } } +// Test that each prim op returns InvalidProgram error when given a stack that's +// one element shorter than expected +TEST_F(RegisterPrimOpsTest, TestInvalidProgramErrorOnShortStack) { + // Test aten::sym_size.int with a stack of size 2 (missing output) + { + testing::TensorFactory<ScalarType::Int> tf; + Tensor self_tensor = tf.ones({3, 5}); + EValue values[2]; + int64_t dim = 1; + values[0] = EValue(self_tensor); + values[1] = EValue(dim); + + EValue* stack[2]; + for (size_t i = 0; i < 2; i++) { + stack[i] = &values[i]; + } + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("aten::sym_size.int")(context_, stack)); + EXPECT_EQ(context_.failure_state(), torch::executor::Error::InvalidProgram); + } + + // Test aten::sym_numel with a stack of size 1 (missing output) + { + testing::TensorFactory<ScalarType::Int> tf; + Tensor self_tensor = tf.ones({3, 5}); + EValue values[1]; + values[0] = EValue(self_tensor); + + EValue* stack[1]; + stack[0] = &values[0]; + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("aten::sym_numel")(context_, stack)); + EXPECT_EQ(context_.failure_state(), torch::executor::Error::InvalidProgram); + } + + // Test executorch_prim::sym_max.Scalar with a stack of size 2 (missing + // output) + { + EValue values[2]; + int64_t a = 5; + int64_t b = 3; + values[0] = EValue(a); + values[1] = EValue(b); + + EValue* stack[2]; + for (size_t i = 0; i < 2; i++) { + stack[i] = &values[i]; + } + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::sym_max.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + } + + // Test executorch_prim::sym_min.Scalar with a stack of size 2 (missing + // output) + { + EValue values[2]; + int64_t a = 5; + int64_t b = 3; + values[0] = EValue(a); + values[1] = EValue(b); + + EValue* stack[2]; + for (size_t i = 0; i < 2; i++) { + stack[i] = &values[i]; + } + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::sym_min.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + } + + // Test algebra ops with a stack of size 2 (missing output) + { + EValue values[2]; + int64_t a = 3; + int64_t b = 4; + values[0] = EValue(a); + values[1] = EValue(b); + + EValue* stack[2]; + for (size_t i = 0; i < 2; i++) { + stack[i] = &values[i]; + } + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::add.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::sub.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::mul.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, + getOpsFn("executorch_prim::floordiv.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::truediv.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::mod.int")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::mod.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + } + + // Test executorch_prim::sym_float.Scalar with a stack of size 1 (missing + // output) + { + EValue values[1]; + int64_t a = 3; + values[0] = EValue(a); + + EValue* stack[1]; + stack[0] = &values[0]; + + ET_EXPECT_KERNEL_FAILURE( + context_, + getOpsFn("executorch_prim::sym_float.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + } + + // Test boolean ops with a stack of size 2 (missing output) + { + EValue values[2]; + double a = 3; + double b = 4; + values[0] = EValue(a); + values[1] = EValue(b); + + EValue* stack[2]; + for (size_t i = 0; i < 2; i++) { + stack[i] = &values[i]; + } + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::ge.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::gt.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::le.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::lt.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::eq.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + } + + // Test aten::_local_scalar_dense with a stack of size 1 (missing output) + { + testing::TensorFactory<ScalarType::Int> tf; + Tensor self_tensor = tf.ones({1}); + EValue values[1]; + values[0] = EValue(self_tensor); + + EValue* stack[1]; + stack[0] = &values[0]; + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("aten::_local_scalar_dense")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + } + + // Test executorch_prim::neg.Scalar with a stack of size 1 (missing output) + { + EValue values[1]; + values[0] = EValue(5.0f); + + EValue* stack[1]; + stack[0] = &values[0]; + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::neg.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + } + + // Test executorch_prim::et_copy_index.tensor with a stack of size 2 (missing + // index) + { + testing::TensorFactory<ScalarType::Int> tf; + auto copy_to = tf.make({2, 2}, {0, 0, 0, 0}); + auto to_copy = tf.make({2}, {3, 4}); + + EValue values[2]; + values[0] = EValue(copy_to); + values[1] = EValue(to_copy); + + EValue* stack[2]; + stack[0] = &values[0]; + stack[1] = &values[1]; + + ET_EXPECT_KERNEL_FAILURE( + context_, + getOpsFn("executorch_prim::et_copy_index.tensor")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + } + + // Test executorch_prim::et_view.default with a stack of size 2 (missing + // output) + { + testing::TensorFactory<ScalarType::Int> tf; + auto self = tf.make({3, 2}, {1, 2, 3, 4, 5, 6}); + auto self_evalue = EValue(self); + + int64_t size[3] = {1, 3, -1}; + EValue size_as_evals[3] = { + EValue(size[0]), EValue(size[1]), EValue(size[2])}; + EValue* size_wrapped_vals[3] = { + &size_as_evals[0], &size_as_evals[1], &size_as_evals[2]}; + int64_t size_unwrapped_vals[3] = {0, 0, 0}; + EValue size_int_list_evalue = EValue( + BoxedEvalueList<int64_t>(size_wrapped_vals, size_unwrapped_vals, 3)); + + EValue* stack[2] = {&self_evalue, &size_int_list_evalue}; + + ET_EXPECT_KERNEL_FAILURE( + context_, + getOpsFn("executorch_prim::et_view.default")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + } + + // Test ceil, round, trunc with a stack of size 1 (missing output) + { + EValue values[1]; + values[0] = EValue(5.5); + + EValue* stack[1]; + stack[0] = &values[0]; + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::ceil.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::round.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + + ET_EXPECT_KERNEL_FAILURE( + context_, getOpsFn("executorch_prim::trunc.Scalar")(context_, stack)); + EXPECT_EQ(context_.failure_state(), Error::InvalidProgram); + } +} + } // namespace executor } // namespace torch
fb03b6f85596Add safety check to generated kernels
5 files changed · +16 −2
codegen/gen.py+8 −1 modified@@ -243,6 +243,10 @@ def __call__( argument_type_gen=argument_type_gen ).convert_arguments(arguments) + # +1 for the return value + num_boxed_args = len(binding_list) + 1 + # This safety check does not account for optional args with default values. ET itself doesnt support default args, but when supported is added this check can be relaxed to >= # of non default arg. + safety_check = f"""ET_KERNEL_CHECK_MSG(context, stack.size() == {num_boxed_args}, InvalidProgram, /*void*/, \"Expected %\" ET_PRIsize_t \"args received %\" ET_PRIsize_t, (size_t){num_boxed_args}, stack.size());""" # for each C++ argument, generate the conversion code code_connector = "\n\t" arg_connector = ", " @@ -292,12 +296,13 @@ def __call__( {indent} context.fail(torch::executor::Error::Internal); {indent}}}""" newline = "\n " - return "\n".join( + temp = "\n".join( [ f""" Kernel( "{f.namespace}::{f.func.name}",{newline + '"' + (k + '",') if k != "default" else ""} []({contextArg.defn()}, Span<EValue*> stack) {{ + {safety_check} {code_connector.join(code_list)} {exception_boundary_begin} @@ -313,6 +318,7 @@ def __call__( for k in used_kernel_keys ] ) + return temp def gen_unboxing( @@ -534,6 +540,7 @@ def gen_headers( "headers": [ "#include <executorch/runtime/core/exec_aten/exec_aten.h> // at::Tensor etc.", "#include <executorch/runtime/kernel/kernel_runtime_context.h>", + "#include <executorch/runtime/core/error.h>", ], } if use_aten_lib:
codegen/templates/RegisterCodegenUnboxedKernels.cpp+1 −0 modified@@ -8,6 +8,7 @@ #include <executorch/runtime/core/evalue.h> #include <executorch/runtime/core/exec_aten/exec_aten.h> +#include <executorch/runtime/core/exec_aten/util/tensor_util.h> #include <executorch/runtime/core/span.h> #include <executorch/runtime/kernel/operator_registry.h> #include <executorch/runtime/platform/profiler.h>
codegen/templates/RegisterKernels.cpp+1 −0 modified@@ -10,6 +10,7 @@ // This implements register_all_kernels() API that is declared in // RegisterKernels.h #include "RegisterKernels.h" +#include <executorch/runtime/core/exec_aten/util/tensor_util.h> #include "${fn_header}" // Generated Function import headers namespace torch {
codegen/test/test_executorch_gen.py+4 −1 modified@@ -508,6 +508,7 @@ def test_codegen_unboxed_specialized(self) -> None: "custom_1::op_1", "v1/7;0,1,2,3|7;0,1,2,3|7;0,1,2,3", [](torch::executor::KernelRuntimeContext & context, Span<EValue*> stack) { + ET_KERNEL_CHECK_MSG(context, stack.size() == 1, InvalidProgram, /*void*/, \"Expected %\" ET_PRIsize_t \"args received %\" ET_PRIsize_t, (size_t)1, stack.size()); """ + """ @@ -606,6 +607,7 @@ def test_codegen_unboxed_default(self) -> None: Kernel( "custom_1::op_1", [](torch::executor::KernelRuntimeContext & context, Span<EValue*> stack) { + ET_KERNEL_CHECK_MSG(context, stack.size() == 1, InvalidProgram, /*void*/, \"Expected %\" ET_PRIsize_t \"args received %\" ET_PRIsize_t, (size_t)1, stack.size()); """ + """ @@ -621,7 +623,6 @@ def test_codegen_unboxed_default(self) -> None: ), """ ) - self.assertEqual(expected_str, result) result = ComputeCodegenUnboxedKernels( @@ -633,6 +634,7 @@ def test_codegen_unboxed_default(self) -> None: Kernel( "custom_1::op_1", [](torch::executor::KernelRuntimeContext & context, Span<EValue*> stack) { + ET_KERNEL_CHECK_MSG(context, stack.size() == 1, InvalidProgram, /*void*/, "Expected %" ET_PRIsize_t "args received %" ET_PRIsize_t, (size_t)1, stack.size()); """ + """ @@ -676,6 +678,7 @@ def test_codegen_unboxed_default_kernel_key_selected(self) -> None: Kernel( "custom_1::op_1", [](torch::executor::KernelRuntimeContext & context, Span<EValue*> stack) { + ET_KERNEL_CHECK_MSG(context, stack.size() == 1, InvalidProgram, /*void*/, "Expected %" ET_PRIsize_t "args received %" ET_PRIsize_t, (size_t)1, stack.size()); """ + """
shim_et/xplat/executorch/codegen/codegen.bzl+2 −0 modified@@ -896,6 +896,7 @@ def executorch_generated_lib( exported_deps = [ "//executorch/codegen:macros", "//executorch/runtime/kernel:kernel_runtime_context" + aten_suffix, + "//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix, ], feature = feature, ) @@ -933,6 +934,7 @@ def executorch_generated_lib( exported_deps = [ "//executorch/runtime/core/exec_aten:lib" + aten_suffix, "//executorch/runtime/kernel:kernel_runtime_context" + aten_suffix, + "//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix, ], xplat_deps = xplat_deps, fbcode_deps = fbcode_deps,
Vulnerability mechanics
Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.
References
5- github.com/advisories/GHSA-f9hx-c6jf-3qxmghsaADVISORY
- nvd.nist.gov/vuln/detail/CVE-2025-54950ghsaADVISORY
- github.com/pytorch/executorch/commit/b6b7a16df5e7852d976d8c34c8a7e9a1b6f7d005nvdWEB
- github.com/pytorch/executorch/commit/fb03b6f85596a8f954d97929075335255b6a58d4ghsaWEB
- www.facebook.com/security/advisories/cve-2025-54950nvdWEB
News mentions
0No linked articles in our index yet.