Initialize ArithmeticParams & OpDataReduce (#2337)
There are several structs that are allocated as either part of the arena or on the stack that do not get entirely initialized. It is typical that individual operations will just fill in the fields of a struct that they need. This can lead to difficult to find bugs where a field is used despite never being initialized. This PR resolves two such instances found in issue #2336.
The first is in the reference ADD kernel, where we use ArithmeticParams without initializing it. This is resolved by zero initializing the struct when it is declared.
The second is the OpDataReduce object that is created in the Persistent Arena for Reduce kernels. This is resolved by performing placement new on the buffer that is returned from AllocatePersistentBuffer. This is a pattern we use elsewhere with allocations, and likely one we should re-use in all Init functions.
BUG=#2336
diff --git a/tensorflow/lite/micro/kernels/add.cc b/tensorflow/lite/micro/kernels/add.cc
index b27206c..fde047a 100644
--- a/tensorflow/lite/micro/kernels/add.cc
+++ b/tensorflow/lite/micro/kernels/add.cc
@@ -38,7 +38,7 @@
const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) {
switch (output->type) {
case kTfLiteFloat32: {
- tflite::ArithmeticParams op_params;
+ tflite::ArithmeticParams op_params = {};
SetActivationParams(data->output_activation_min_f32,
data->output_activation_max_f32, &op_params);
if (data->requires_broadcast) {
@@ -59,7 +59,7 @@
}
} break;
case kTfLiteInt32: {
- tflite::ArithmeticParams op_params;
+ tflite::ArithmeticParams op_params = {};
SetActivationParams(std::numeric_limits<int32_t>::lowest(),
std::numeric_limits<int32_t>::max(), &op_params);
if (data->requires_broadcast) {
@@ -93,7 +93,7 @@
const TfLiteEvalTensor* input1,
const TfLiteEvalTensor* input2,
TfLiteEvalTensor* output) {
- tflite::ArithmeticParams op_params;
+ tflite::ArithmeticParams op_params = {};
op_params.left_shift = data->left_shift;
op_params.input1_offset = data->input1_offset;
op_params.input1_multiplier = data->input1_multiplier;
diff --git a/tensorflow/lite/micro/kernels/reduce.cc b/tensorflow/lite/micro/kernels/reduce.cc
index ab24a82..a689d3b 100644
--- a/tensorflow/lite/micro/kernels/reduce.cc
+++ b/tensorflow/lite/micro/kernels/reduce.cc
@@ -29,7 +29,9 @@
namespace tflite {
void* InitReduce(TfLiteContext* context, const char* buffer, size_t length) {
- return context->AllocatePersistentBuffer(context, sizeof(OpDataReduce));
+ void* op_data =
+ context->AllocatePersistentBuffer(context, sizeof(OpDataReduce));
+ return new (op_data) OpDataReduce();
}
TfLiteStatus PrepareMax(TfLiteContext* context, TfLiteNode* node) {
diff --git a/tensorflow/lite/micro/kernels/reduce_common.cc b/tensorflow/lite/micro/kernels/reduce_common.cc
index 0dab49c..2c1a92a 100644
--- a/tensorflow/lite/micro/kernels/reduce_common.cc
+++ b/tensorflow/lite/micro/kernels/reduce_common.cc
@@ -74,7 +74,9 @@
TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0);
TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1);
+ op_data->input_zp = input->params.zero_point;
op_data->input_scale = input->params.scale;
+ op_data->output_zp = output->params.zero_point;
op_data->output_scale = output->params.scale;
op_data->num_output_elements = NumElements(output);