Unique Init, Prepare, Eval functions Kernels A-M  (#2344)

refactor init, prepare , and eval functions to be unique names for kernels who's name starts with the Letters A-M

BUG=[b/313963581](https://b.corp.google.com/issues/313963581)
diff --git a/tensorflow/lite/micro/kernels/batch_matmul.cc b/tensorflow/lite/micro/kernels/batch_matmul.cc
index 9ecc4fc..bd621f4 100644
--- a/tensorflow/lite/micro/kernels/batch_matmul.cc
+++ b/tensorflow/lite/micro/kernels/batch_matmul.cc
@@ -279,7 +279,8 @@
   return swapped_shape;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* BatchMatMulInit(TfLiteContext* context, const char* buffer,
+                      size_t length) {
   // This is a builtin op, so we don't use the contents in 'buffer', if any.
   // Instead, we allocate a new object to carry information from Prepare() to
   // Eval().
@@ -288,7 +289,7 @@
   return micro_context->AllocatePersistentBuffer(sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus BatchMatMulPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -463,7 +464,7 @@
 // RHS <..., C, B> X LHS <..., B, A>
 // where output is a C X A column-oriented, which is equivalent to
 // A X C row-oriented.
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus BatchMatMulEval(TfLiteContext* context, TfLiteNode* node) {
   EvalOpContext op_context(context, node);
   OpData* op_data = op_context.op_data;
   const TfLiteEvalTensor* lhs = op_context.lhs;
@@ -550,7 +551,8 @@
 }  // namespace
 
 TFLMRegistration Register_BATCH_MATMUL() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(BatchMatMulInit, BatchMatMulPrepare,
+                                   BatchMatMulEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/batch_to_space_nd.cc b/tensorflow/lite/micro/kernels/batch_to_space_nd.cc
index 090a040..31a1c28 100644
--- a/tensorflow/lite/micro/kernels/batch_to_space_nd.cc
+++ b/tensorflow/lite/micro/kernels/batch_to_space_nd.cc
@@ -38,7 +38,7 @@
 const int kInputOutputMinDimensionNum = 3;
 const int kInputOutputMaxDimensionNum = 4;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus BatchToSpaceNDPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -62,7 +62,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus BatchToSpaceNDEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   const TfLiteEvalTensor* block_shape =
@@ -106,7 +106,8 @@
 }  // namespace.
 
 TFLMRegistration Register_BATCH_TO_SPACE_ND() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, BatchToSpaceNDPrepare,
+                                   BatchToSpaceNDEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/call_once.cc b/tensorflow/lite/micro/kernels/call_once.cc
index 8ad1c20..65857ef 100644
--- a/tensorflow/lite/micro/kernels/call_once.cc
+++ b/tensorflow/lite/micro/kernels/call_once.cc
@@ -36,12 +36,12 @@
   bool has_run;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* CallOnceInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CallOncePrepare(TfLiteContext* context, TfLiteNode* node) {
   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
   const auto* params =
       reinterpret_cast<const TfLiteCallOnceParams*>(node->builtin_data);
@@ -60,7 +60,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CallOnceEval(TfLiteContext* context, TfLiteNode* node) {
   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
 
   // Call once only runs one time then is a no-op for every subsequent call.
@@ -82,7 +82,7 @@
 }  // namespace.
 
 TFLMRegistration Register_CALL_ONCE() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(CallOnceInit, CallOncePrepare, CallOnceEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/cast.cc b/tensorflow/lite/micro/kernels/cast.cc
index 10d25eb..032817a 100644
--- a/tensorflow/lite/micro/kernels/cast.cc
+++ b/tensorflow/lite/micro/kernels/cast.cc
@@ -25,7 +25,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CastPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -77,7 +77,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CastEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -111,7 +111,7 @@
 }  // namespace
 
 TFLMRegistration Register_CAST() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, CastPrepare, CastEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/ceil.cc b/tensorflow/lite/micro/kernels/ceil.cc
index 46b55e7..36139f9 100644
--- a/tensorflow/lite/micro/kernels/ceil.cc
+++ b/tensorflow/lite/micro/kernels/ceil.cc
@@ -27,7 +27,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CeilPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TfLiteTensor* input =
@@ -50,7 +50,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CeilEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -67,7 +67,7 @@
 }  // namespace
 
 TFLMRegistration Register_CEIL() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, CeilPrepare, CeilEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/comparisons.cc b/tensorflow/lite/micro/kernels/comparisons.cc
index 4056316..69b3c61 100644
--- a/tensorflow/lite/micro/kernels/comparisons.cc
+++ b/tensorflow/lite/micro/kernels/comparisons.cc
@@ -533,7 +533,7 @@
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ComparisonsPrepare(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   OpData* data = static_cast<OpData*>(node->user_data);
 
@@ -580,27 +580,27 @@
 }  // namespace
 
 TFLMRegistration Register_EQUAL() {
-  return tflite::micro::RegisterOp(Init, Prepare, EqualEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, EqualEval);
 }
 
 TFLMRegistration Register_NOT_EQUAL() {
-  return tflite::micro::RegisterOp(Init, Prepare, NotEqualEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, NotEqualEval);
 }
 
 TFLMRegistration Register_GREATER() {
-  return tflite::micro::RegisterOp(Init, Prepare, GreaterEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, GreaterEval);
 }
 
 TFLMRegistration Register_GREATER_EQUAL() {
-  return tflite::micro::RegisterOp(Init, Prepare, GreaterEqualEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, GreaterEqualEval);
 }
 
 TFLMRegistration Register_LESS() {
-  return tflite::micro::RegisterOp(Init, Prepare, LessEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, LessEval);
 }
 
 TFLMRegistration Register_LESS_EQUAL() {
-  return tflite::micro::RegisterOp(Init, Prepare, LessEqualEval);
+  return tflite::micro::RegisterOp(Init, ComparisonsPrepare, LessEqualEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/concatenation.cc b/tensorflow/lite/micro/kernels/concatenation.cc
index b4a838f..57d63a9 100644
--- a/tensorflow/lite/micro/kernels/concatenation.cc
+++ b/tensorflow/lite/micro/kernels/concatenation.cc
@@ -103,12 +103,13 @@
                                tflite::micro::GetTensorData<data_type>(output));
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* ConcatenationInit(TfLiteContext* context, const char* buffer,
+                        size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ConcatenationPrepare(TfLiteContext* context, TfLiteNode* node) {
   // This function only checks the types. Additional shape validations are
   // performed in the reference implementation called during Eval().
   const TfLiteConcatenationParams* params =
@@ -214,7 +215,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ConcatenationEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* output_tensor =
       tflite::micro::GetEvalOutput(context, node, kOutputTensor);
   TF_LITE_ENSURE(context, output_tensor != nullptr);
@@ -252,7 +253,8 @@
 }  // namespace
 
 TFLMRegistration Register_CONCATENATION() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(ConcatenationInit, ConcatenationPrepare,
+                                   ConcatenationEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/conv.cc b/tensorflow/lite/micro/kernels/conv.cc
index 2630089..0df35fc 100644
--- a/tensorflow/lite/micro/kernels/conv.cc
+++ b/tensorflow/lite/micro/kernels/conv.cc
@@ -27,7 +27,7 @@
 namespace tflite {
 namespace {
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ConvEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kConvInputTensor);
   const TfLiteEvalTensor* filter =
@@ -144,7 +144,7 @@
 }  // namespace
 
 TFLMRegistration Register_CONV_2D() {
-  return tflite::micro::RegisterOp(ConvInit, ConvPrepare, Eval);
+  return tflite::micro::RegisterOp(ConvInit, ConvPrepare, ConvEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/cumsum.cc b/tensorflow/lite/micro/kernels/cumsum.cc
index f62f2a5..258cf8d 100644
--- a/tensorflow/lite/micro/kernels/cumsum.cc
+++ b/tensorflow/lite/micro/kernels/cumsum.cc
@@ -104,11 +104,11 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CumSumPrepare(TfLiteContext* context, TfLiteNode* node) {
   return CalculateOpData(context, node);
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CumSumEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   const TfLiteEvalTensor* axis_tensor =
@@ -169,7 +169,7 @@
 }  // namespace
 
 TFLMRegistration Register_CUMSUM() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, CumSumPrepare, CumSumEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/depth_to_space.cc b/tensorflow/lite/micro/kernels/depth_to_space.cc
index 7e0a8fa..d4faf7c 100644
--- a/tensorflow/lite/micro/kernels/depth_to_space.cc
+++ b/tensorflow/lite/micro/kernels/depth_to_space.cc
@@ -93,11 +93,11 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DepthToSpacePrepare(TfLiteContext* context, TfLiteNode* node) {
   return CalculateOpData(context, node);
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DepthToSpaceEval(TfLiteContext* context, TfLiteNode* node) {
   auto* params =
       reinterpret_cast<TfLiteDepthToSpaceParams*>(node->builtin_data);
 
@@ -136,7 +136,8 @@
 }  // namespace
 
 TFLMRegistration Register_DEPTH_TO_SPACE() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, DepthToSpacePrepare,
+                                   DepthToSpaceEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/depthwise_conv.cc b/tensorflow/lite/micro/kernels/depthwise_conv.cc
index 398f8cd..fa55a70 100644
--- a/tensorflow/lite/micro/kernels/depthwise_conv.cc
+++ b/tensorflow/lite/micro/kernels/depthwise_conv.cc
@@ -27,12 +27,13 @@
 namespace tflite {
 namespace {
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* DepthwiseConvInit(TfLiteContext* context, const char* buffer,
+                        size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpDataConv));
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DepthwiseConvEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TFLITE_DCHECK(node->builtin_data != nullptr);
 
@@ -143,7 +144,8 @@
 }  // namespace
 
 TFLMRegistration Register_DEPTHWISE_CONV_2D() {
-  return tflite::micro::RegisterOp(Init, DepthwiseConvPrepare, Eval);
+  return tflite::micro::RegisterOp(DepthwiseConvInit, DepthwiseConvPrepare,
+                                   DepthwiseConvEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/detection_postprocess.cc b/tensorflow/lite/micro/kernels/detection_postprocess.cc
index e807f35..fa2d4ca 100644
--- a/tensorflow/lite/micro/kernels/detection_postprocess.cc
+++ b/tensorflow/lite/micro/kernels/detection_postprocess.cc
@@ -117,7 +117,8 @@
   TfLiteQuantizationParams input_anchors;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* DetectionPostProcessInit(TfLiteContext* context, const char* buffer,
+                               size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   OpData* op_data = nullptr;
 
@@ -149,7 +150,8 @@
   return op_data;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DetectionPostProcessPrepare(TfLiteContext* context,
+                                         TfLiteNode* node) {
   auto* op_data = static_cast<OpData*>(node->user_data);
 
   MicroContext* micro_context = GetMicroContext(context);
@@ -774,7 +776,8 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DetectionPostProcessEval(TfLiteContext* context,
+                                      TfLiteNode* node) {
   TF_LITE_ENSURE(context, (kBatchSize == 1));
   auto* op_data = static_cast<OpData*>(node->user_data);
 
@@ -800,7 +803,9 @@
 }  // namespace
 
 TFLMRegistration* Register_DETECTION_POSTPROCESS() {
-  static TFLMRegistration r = tflite::micro::RegisterOp(Init, Prepare, Eval);
+  static TFLMRegistration r = tflite::micro::RegisterOp(
+      DetectionPostProcessInit, DetectionPostProcessPrepare,
+      DetectionPostProcessEval);
   return &r;
 }
 
diff --git a/tensorflow/lite/micro/kernels/div.cc b/tensorflow/lite/micro/kernels/div.cc
index b29686a..a80b3f2 100644
--- a/tensorflow/lite/micro/kernels/div.cc
+++ b/tensorflow/lite/micro/kernels/div.cc
@@ -65,12 +65,12 @@
   return kTfLiteOk;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* DivInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpDataDiv));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DivPrepare(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TFLITE_DCHECK(node->builtin_data != nullptr);
 
@@ -179,7 +179,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus DivEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->builtin_data != nullptr);
   auto* params = static_cast<TfLiteDivParams*>(node->builtin_data);
   TFLITE_DCHECK(node->user_data != nullptr);
@@ -213,7 +213,7 @@
 }  // namespace
 
 TFLMRegistration Register_DIV() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(DivInit, DivPrepare, DivEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/embedding_lookup.cc b/tensorflow/lite/micro/kernels/embedding_lookup.cc
index 77ac0e0..6a4be87 100644
--- a/tensorflow/lite/micro/kernels/embedding_lookup.cc
+++ b/tensorflow/lite/micro/kernels/embedding_lookup.cc
@@ -65,7 +65,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus EmbeddingLookUpPrepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
@@ -178,7 +178,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus EmbeddingLookUpEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* lookup =
       tflite::micro::GetEvalInput(context, node, kInputTensor_0);
   const TfLiteEvalTensor* value =
@@ -207,7 +207,8 @@
 }  // namespace
 
 TFLMRegistration Register_EMBEDDING_LOOKUP() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, EmbeddingLookUpPrepare,
+                                   EmbeddingLookUpEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/exp.cc b/tensorflow/lite/micro/kernels/exp.cc
index 1a2e00c..8d1da8f 100644
--- a/tensorflow/lite/micro/kernels/exp.cc
+++ b/tensorflow/lite/micro/kernels/exp.cc
@@ -27,7 +27,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ExpPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
@@ -51,7 +51,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ExpEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -73,7 +73,7 @@
 }  // namespace
 
 TFLMRegistration Register_EXP() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, ExpPrepare, ExpEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/expand_dims.cc b/tensorflow/lite/micro/kernels/expand_dims.cc
index 0c4c6ff..6bae37b 100644
--- a/tensorflow/lite/micro/kernels/expand_dims.cc
+++ b/tensorflow/lite/micro/kernels/expand_dims.cc
@@ -82,7 +82,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ExpandDimsPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
@@ -116,7 +116,7 @@
   }
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus ExpandDimsEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TfLiteEvalTensor* output =
@@ -143,7 +143,7 @@
 }  // namespace
 
 TFLMRegistration Register_EXPAND_DIMS() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, ExpandDimsPrepare, ExpandDimsEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/fill.cc b/tensorflow/lite/micro/kernels/fill.cc
index b1b366e..1486fcb 100644
--- a/tensorflow/lite/micro/kernels/fill.cc
+++ b/tensorflow/lite/micro/kernels/fill.cc
@@ -64,7 +64,7 @@
 constexpr int kValueTensor = 1;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FillPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   // Ensure inputs and outputs exist.
@@ -107,7 +107,7 @@
       micro::GetTensorShape(output), micro::GetTensorData<T>(output));
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FillEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* value =
       micro::GetEvalInput(context, node, kValueTensor);
   TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor);
@@ -134,7 +134,7 @@
 }  // namespace
 
 TFLMRegistration Register_FILL() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, FillPrepare, FillEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/floor.cc b/tensorflow/lite/micro/kernels/floor.cc
index 094c8b5..f92b7e0 100644
--- a/tensorflow/lite/micro/kernels/floor.cc
+++ b/tensorflow/lite/micro/kernels/floor.cc
@@ -26,7 +26,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FloorEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input =
       tflite::micro::GetEvalInput(context, node, kInputTensor);
   TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
@@ -42,7 +42,7 @@
 }  // namespace
 
 TFLMRegistration Register_FLOOR() {
-  return tflite::micro::RegisterOp(nullptr, nullptr, Eval);
+  return tflite::micro::RegisterOp(nullptr, nullptr, FloorEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/floor_div.cc b/tensorflow/lite/micro/kernels/floor_div.cc
index 5c00808..9adf614 100644
--- a/tensorflow/lite/micro/kernels/floor_div.cc
+++ b/tensorflow/lite/micro/kernels/floor_div.cc
@@ -57,11 +57,11 @@
   return kTfLiteOk;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* FloorDivInit(TfLiteContext* context, const char* buffer, size_t length) {
   return nullptr;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FloorDivPrepare(TfLiteContext* context, TfLiteNode* node) {
   return CalculateOpData(context, node);
 }
 
@@ -101,7 +101,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FloorDivEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input1 =
       tflite::micro::GetEvalInput(context, node, kInputTensor1);
   const TfLiteEvalTensor* input2 =
@@ -124,7 +124,7 @@
 }  // namespace
 
 TFLMRegistration Register_FLOOR_DIV() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(FloorDivInit, FloorDivPrepare, FloorDivEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/floor_mod.cc b/tensorflow/lite/micro/kernels/floor_mod.cc
index f459892..da2a7c9 100644
--- a/tensorflow/lite/micro/kernels/floor_mod.cc
+++ b/tensorflow/lite/micro/kernels/floor_mod.cc
@@ -62,11 +62,11 @@
   return kTfLiteOk;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* FloorModInit(TfLiteContext* context, const char* buffer, size_t length) {
   return nullptr;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FloorModPrepare(TfLiteContext* context, TfLiteNode* node) {
   return CalculateOpData(context, node);
 }
 
@@ -96,7 +96,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FloorModEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* input1 =
       tflite::micro::GetEvalInput(context, node, kInputTensor1);
   const TfLiteEvalTensor* input2 =
@@ -122,7 +122,7 @@
 }  // namespace
 
 TFLMRegistration Register_FLOOR_MOD() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(FloorModInit, FloorModPrepare, FloorModEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/fully_connected.cc b/tensorflow/lite/micro/kernels/fully_connected.cc
index 54576fa..65c8379 100644
--- a/tensorflow/lite/micro/kernels/fully_connected.cc
+++ b/tensorflow/lite/micro/kernels/fully_connected.cc
@@ -26,13 +26,14 @@
 namespace tflite {
 namespace {
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* FullyConnectedInit(TfLiteContext* context, const char* buffer,
+                         size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context,
                                            sizeof(OpDataFullyConnected));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FullyConnectedPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TFLITE_DCHECK(node->user_data != nullptr);
@@ -87,7 +88,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus FullyConnectedEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->builtin_data != nullptr);
   const auto* params =
       static_cast<const TfLiteFullyConnectedParams*>(node->builtin_data);
@@ -200,11 +201,12 @@
 }  // namespace
 
 TFLMRegistration Register_FULLY_CONNECTED() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(FullyConnectedInit, FullyConnectedPrepare,
+                                   FullyConnectedEval);
 }
 
 TFLMInferenceRegistration RegisterInference_FULLY_CONNECTED() {
-  return tflite::micro::RegisterOp(Eval);
+  return tflite::micro::RegisterOp(FullyConnectedEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/gather.cc b/tensorflow/lite/micro/kernels/gather.cc
index 9955601..a0af4c0 100644
--- a/tensorflow/lite/micro/kernels/gather.cc
+++ b/tensorflow/lite/micro/kernels/gather.cc
@@ -97,7 +97,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus GatherPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
@@ -188,7 +188,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus GatherEval(TfLiteContext* context, TfLiteNode* node) {
   const auto* params =
       reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data);
   const TfLiteEvalTensor* input =
@@ -218,7 +218,7 @@
 }  // namespace
 
 TFLMRegistration Register_GATHER() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, GatherPrepare, GatherEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/gather_nd.cc b/tensorflow/lite/micro/kernels/gather_nd.cc
index 3774ddd..d01af7c 100644
--- a/tensorflow/lite/micro/kernels/gather_nd.cc
+++ b/tensorflow/lite/micro/kernels/gather_nd.cc
@@ -28,7 +28,7 @@
 constexpr int kOutputTensor = 0;
 constexpr int MAX_INDICES_ND = 5;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus GatherNdPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
@@ -185,7 +185,7 @@
   return status;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus GatherNdEval(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteEvalTensor* params =
       tflite::micro::GetEvalInput(context, node, kParams);
   const TfLiteEvalTensor* indices =
@@ -206,7 +206,7 @@
 }  // namespace
 
 TFLMRegistration Register_GATHER_ND() {
-  return tflite::micro::RegisterOp(nullptr, Prepare, Eval);
+  return tflite::micro::RegisterOp(nullptr, GatherNdPrepare, GatherNdEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/if.cc b/tensorflow/lite/micro/kernels/if.cc
index 9143c9c..7aa8eb1 100644
--- a/tensorflow/lite/micro/kernels/if.cc
+++ b/tensorflow/lite/micro/kernels/if.cc
@@ -38,12 +38,12 @@
   int else_subgraph_index;
 };
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* IfInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpData));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus IfPrepare(TfLiteContext* context, TfLiteNode* node) {
   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
   const auto* params =
       reinterpret_cast<const TfLiteIfParams*>(node->builtin_data);
@@ -85,7 +85,7 @@
   return kTfLiteOk;
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus IfEval(TfLiteContext* context, TfLiteNode* node) {
   const OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
 
   tflite::MicroContext* micro_context = tflite::GetMicroContext(context);
@@ -117,7 +117,7 @@
 }  // namespace.
 
 TFLMRegistration Register_IF() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(IfInit, IfPrepare, IfEval);
 }
 
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/l2norm.cc b/tensorflow/lite/micro/kernels/l2norm.cc
index fa3601b..bde38de 100644
--- a/tensorflow/lite/micro/kernels/l2norm.cc
+++ b/tensorflow/lite/micro/kernels/l2norm.cc
@@ -33,7 +33,7 @@
 constexpr int kInputTensor = 0;
 constexpr int kOutputTensor = 0;
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus L2NormPrepare(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TFLITE_DCHECK(node->builtin_data != nullptr);
 
@@ -72,13 +72,13 @@
   return kTfLiteOk;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* L2NormInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context,
                                            sizeof(L2NormalizationParams));
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus L2NormEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   const L2NormalizationParams& data =
       *(static_cast<const L2NormalizationParams*>(node->user_data));
@@ -132,7 +132,7 @@
 }  // namespace
 
 TFLMRegistration Register_L2NORM_REF() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(L2NormInit, L2NormPrepare, L2NormEval);
 }
 
 TFLMRegistration Register_L2_NORMALIZATION() { return Register_L2NORM_REF(); }
diff --git a/tensorflow/lite/micro/kernels/mirror_pad.cc b/tensorflow/lite/micro/kernels/mirror_pad.cc
index 4cbaf52..aa94e1b 100644
--- a/tensorflow/lite/micro/kernels/mirror_pad.cc
+++ b/tensorflow/lite/micro/kernels/mirror_pad.cc
@@ -100,7 +100,7 @@
   }
 }
 
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus MirrorPadEval(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   TfLiteStatus status = kTfLiteOk;
   const OpDataMirrorPad* data =
@@ -161,12 +161,12 @@
   return status;
 }
 
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* MirrorPadInit(TfLiteContext* context, const char* buffer, size_t length) {
   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
   return context->AllocatePersistentBuffer(context, sizeof(OpDataMirrorPad));
 }
 
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus MirrorPadPrepare(TfLiteContext* context, TfLiteNode* node) {
   MicroContext* micro_context = GetMicroContext(context);
 
   TFLITE_DCHECK(node->user_data != nullptr);
@@ -209,7 +209,8 @@
 }  // namespace
 
 TFLMRegistration Register_MIRROR_PAD() {
-  return tflite::micro::RegisterOp(Init, Prepare, Eval);
+  return tflite::micro::RegisterOp(MirrorPadInit, MirrorPadPrepare,
+                                   MirrorPadEval);
 }
 
 }  // namespace tflite