Delegate kernel selection for int8 conv to our library - kelvin::opt::conv_per_channel_b8 now selects from several optimized implementations, or falls back to the generic scalar implementation. Change-Id: I26ea73368ea43728575ff1557330326399ebacf7
diff --git a/tensorflow/lite/micro/kernels/kelvin/conv.cc b/tensorflow/lite/micro/kernels/kelvin/conv.cc index 28ec1e8..2e15dd7 100644 --- a/tensorflow/lite/micro/kernels/kelvin/conv.cc +++ b/tensorflow/lite/micro/kernels/kelvin/conv.cc
@@ -136,13 +136,7 @@ } case kTfLiteInt8: { const auto params_q = ConvParamsQuantized(params, data); - bool opt = !(params_q.padding_values.width > 0 || - params_q.padding_values.height > 0 || - params_q.dilation_width_factor > 1 || - params_q.dilation_height_factor > 1); - auto fn = kelvin::opt::conv_per_channel_b8; - if (!opt) fn = reference_integer_ops::ConvPerChannel; - fn( + kelvin::opt::conv_per_channel_b8( params_q, data.per_channel_output_multiplier, data.per_channel_output_shift, tflite::micro::GetTensorShape(input),