Skip to content

Commit fad9447

Browse files
authored
Fix allocation-size-too-big crash in prepare_input_tensors (#8233)
(Adapted from an LLM-suggested fix for a fuzzer-discovered crash) The crash is an allocation-size-too-big error that occurs when the `prepare_input_tensors` function attempts to allocate an excessively large amount of memory for the `inputs` array. This crash is caused by the function's inability to handle large numbers of inputs, resulting in an attempt to allocate a huge amount of memory that exceeds the system's limits. The root cause of the crash is the lack of bounds checking on the `num_inputs` variable, which allows the function to attempt to allocate an arbitrarily large amount of memory. This is exacerbated by the fact that the function allocates memory for each input tensor separately, without checking the total size of all tensors before allocating memory for the `inputs` array. The patch fixes the crash by adding bounds checking on the `num_inputs` variable and calculating the total size of all tensors before allocating memory for the `inputs` array. Differential Revision: D68876117
1 parent e7fd150 commit fad9447

File tree

3 files changed

+94
-5
lines changed

3 files changed

+94
-5
lines changed

extension/runner_util/inputs.cpp

+44-4
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,36 @@ using executorch::runtime::TensorInfo;
2222
namespace executorch {
2323
namespace extension {
2424

25-
Result<BufferCleanup> prepare_input_tensors(Method& method) {
25+
Result<BufferCleanup> prepare_input_tensors(
26+
Method& method,
27+
PrepareInputTensorsOptions options) {
2628
MethodMeta method_meta = method.method_meta();
2729
size_t num_inputs = method_meta.num_inputs();
28-
size_t num_allocated = 0;
30+
31+
// A large number of small allocations could exhaust the heap even if the
32+
// total size is smaller than the limit.
33+
ET_CHECK_OR_RETURN_ERROR(
34+
num_inputs <= options.max_inputs,
35+
InvalidProgram,
36+
"Too many inputs: %zu > %zu",
37+
num_inputs,
38+
options.max_inputs);
39+
40+
// Allocate memory for the inputs array
2941
void** inputs = (void**)malloc(num_inputs * sizeof(void*));
42+
ET_CHECK_OR_RETURN_ERROR(
43+
inputs != nullptr,
44+
MemoryAllocationFailed,
45+
"malloc(%zd) failed",
46+
num_inputs * sizeof(void*));
3047

48+
// Allocate memory for each input tensor.
49+
size_t total_size = 0;
50+
size_t num_allocated = 0;
3151
for (size_t i = 0; i < num_inputs; i++) {
3252
auto tag = method_meta.input_tag(i);
3353
if (!tag.ok()) {
54+
// The BufferCleanup will free the inputs when it goes out of scope.
3455
BufferCleanup cleanup({inputs, num_allocated});
3556
return tag.error();
3657
}
@@ -40,10 +61,29 @@ Result<BufferCleanup> prepare_input_tensors(Method& method) {
4061
}
4162
Result<TensorInfo> tensor_meta = method_meta.input_tensor_meta(i);
4263
if (!tensor_meta.ok()) {
64+
BufferCleanup cleanup({inputs, num_allocated});
4365
return tensor_meta.error();
4466
}
4567
// This input is a tensor. Allocate a buffer for it.
46-
void* data_ptr = malloc(tensor_meta->nbytes());
68+
size_t tensor_size = tensor_meta->nbytes();
69+
total_size += tensor_size;
70+
if (total_size > options.max_total_allocation_size) {
71+
ET_LOG(
72+
Error,
73+
"Allocating %zu bytes for input %zu would exceed "
74+
"max_total_allocation_size %zu",
75+
tensor_size,
76+
i,
77+
options.max_total_allocation_size);
78+
BufferCleanup cleanup({inputs, num_allocated});
79+
return Error::InvalidProgram;
80+
}
81+
void* data_ptr = malloc(tensor_size);
82+
if (data_ptr == nullptr) {
83+
ET_LOG(Error, "malloc(%zu) failed for input %zu", tensor_size, i);
84+
BufferCleanup cleanup({inputs, num_allocated});
85+
return Error::MemoryAllocationFailed;
86+
}
4787
inputs[num_allocated++] = data_ptr;
4888

4989
// Create the tensor and set it as the input.
@@ -52,11 +92,11 @@ Result<BufferCleanup> prepare_input_tensors(Method& method) {
5292
if (err != Error::Ok) {
5393
ET_LOG(
5494
Error, "Failed to prepare input %zu: 0x%" PRIx32, i, (uint32_t)err);
55-
// The BufferCleanup will free the inputs when it goes out of scope.
5695
BufferCleanup cleanup({inputs, num_allocated});
5796
return err;
5897
}
5998
}
99+
60100
return BufferCleanup({inputs, num_allocated});
61101
}
62102

extension/runner_util/inputs.h

+20-1
Original file line numberDiff line numberDiff line change
@@ -51,18 +51,37 @@ class BufferCleanup final {
5151
executorch::runtime::Span<void*> buffers_;
5252
};
5353

54+
/// Defines options for `prepare_input_tensors()`.
55+
struct PrepareInputTensorsOptions {
56+
/**
57+
* The maximum total size in bytes of all input tensors. If the total size of
58+
* all inputs exceeds this, an error is returned. This prevents allocating too
59+
* much memory if the PTE file is malformed.
60+
*/
61+
size_t max_total_allocation_size = 1024 * 1024 * 1024;
62+
63+
/**
64+
* The maximum number of inputs to allocate. If the number of inputs exceeds
65+
* this, an error is returned. This prevents allocating too much memory if the
66+
* PTE file is malformed.
67+
*/
68+
size_t max_inputs = 1024;
69+
};
70+
5471
/**
5572
* Allocates input tensors for the provided Method, filling them with ones. Does
5673
* not modify inputs that are not Tensors.
5774
*
5875
* @param[in] method The Method that owns the inputs to prepare.
76+
* @param[in] options Extra options for preparing the inputs.
5977
*
6078
* @returns On success, an object that owns any allocated tensor memory. It must
6179
* remain alive when calling `method->execute()`.
6280
* @returns An error on failure.
6381
*/
6482
executorch::runtime::Result<BufferCleanup> prepare_input_tensors(
65-
executorch::runtime::Method& method);
83+
executorch::runtime::Method& method,
84+
PrepareInputTensorsOptions options = {});
6685

6786
namespace internal {
6887
/**

extension/runner_util/test/inputs_test.cpp

+30
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ using executorch::runtime::EValue;
2828
using executorch::runtime::MemoryAllocator;
2929
using executorch::runtime::MemoryManager;
3030
using executorch::runtime::Method;
31+
using executorch::runtime::MethodMeta;
3132
using executorch::runtime::Program;
3233
using executorch::runtime::Result;
3334
using executorch::runtime::Span;
@@ -100,6 +101,35 @@ TEST_F(InputsTest, Smoke) {
100101
// the pointers.
101102
}
102103

104+
TEST_F(InputsTest, ExceedingInputCountLimitFails) {
105+
// The smoke test above demonstrated that we can prepare inputs with the
106+
// default limits. It should fail if we lower the max below the number of
107+
// actual inputs.
108+
MethodMeta method_meta = method_->method_meta();
109+
size_t num_inputs = method_meta.num_inputs();
110+
ASSERT_GE(num_inputs, 1);
111+
executorch::extension::PrepareInputTensorsOptions options;
112+
options.max_inputs = num_inputs - 1;
113+
114+
Result<BufferCleanup> input_buffers =
115+
prepare_input_tensors(*method_, options);
116+
ASSERT_NE(input_buffers.error(), Error::Ok);
117+
}
118+
119+
TEST_F(InputsTest, ExceedingInputAllocationLimitFails) {
120+
// The smoke test above demonstrated that we can prepare inputs with the
121+
// default limits. It should fail if we lower the max below the actual
122+
// allocation size.
123+
executorch::extension::PrepareInputTensorsOptions options;
124+
// The input tensors are float32, so 1 byte will always be smaller than any
125+
// non-empty input tensor.
126+
options.max_total_allocation_size = 1;
127+
128+
Result<BufferCleanup> input_buffers =
129+
prepare_input_tensors(*method_, options);
130+
ASSERT_NE(input_buffers.error(), Error::Ok);
131+
}
132+
103133
TEST(BufferCleanupTest, Smoke) {
104134
// Returns the size of the buffer at index `i`.
105135
auto test_buffer_size = [](size_t i) {

0 commit comments

Comments
 (0)