Reduce vld calls in ConvS8D4.

Introduced specialized ConvS8W8D4, which unrolls hot inner loop.
Also uses reduced #'s of vld calls.

Change-Id: I77c1e6e633c27801e45846e035399b10f47e9a6b
diff --git a/tflm/opt/conv_s8.cc b/tflm/opt/conv_s8.cc
index 64e22e1..95149b3 100644
--- a/tflm/opt/conv_s8.cc
+++ b/tflm/opt/conv_s8.cc
@@ -224,6 +224,13 @@
     RUN_KERNEL(kelvin::opt::ConvPerChannelD1OD24_5x5);
   }
 
+  // special case of filter_depth = 4n, stride 2 and min width
+  if (dilation_width_factor == 1 && dilation_height_factor == 1 &&
+      stride_width == 2 && stride_height == 2 && filter_depth % 4 == 0 &&
+      output_depth >= 8 && output_width >= 8 && pad_width <= 1) {
+    RUN_KERNEL(kelvin::opt::ConvS8W8D4);
+  }
+
   // special case of filter_depth = 4n
   if (dilation_width_factor == 1 && dilation_height_factor == 1 &&
       stride_width <= 2 && stride_height <= 2 && filter_depth % 4 == 0 &&
diff --git a/tflm/opt/conv_s8.h b/tflm/opt/conv_s8.h
index 6450537..b79bd65 100644
--- a/tflm/opt/conv_s8.h
+++ b/tflm/opt/conv_s8.h
@@ -54,6 +54,16 @@
               const tflite::RuntimeShape& bias_shape, const int32_t* bias_data,
               const tflite::RuntimeShape& output_shape, int8_t* output_data);
 
+// filter depth 4n, W >= 8
+void ConvS8W8D4(const tflite::ConvParams& params,
+                const int32_t* output_multiplier, const int32_t* output_shift,
+                const tflite::RuntimeShape& input_shape,
+                const int8_t* input_data,
+                const tflite::RuntimeShape& filter_shape,
+                const int8_t* filter_data,
+                const tflite::RuntimeShape& bias_shape, const int32_t* bias_data,
+                const tflite::RuntimeShape& output_shape, int8_t* output_data);
+
 // filter depth 32n
 void ConvS8D32(const tflite::ConvParams& params,
                const int32_t* output_multiplier, const int32_t* output_shift,
diff --git a/tflm/opt/conv_s8_d4.cc b/tflm/opt/conv_s8_d4.cc
index 26f3885..ba1b960 100644
--- a/tflm/opt/conv_s8_d4.cc
+++ b/tflm/opt/conv_s8_d4.cc
@@ -27,6 +27,7 @@
 #include "tensorflow/lite/kernels/internal/runtime_shape.h"
 #include "tensorflow/lite/kernels/internal/types.h"
 #include "tflm/opt/conv_s8.h"
+#include "tflm/opt/conv_util.h"
 
 #define unlikely(x) (__builtin_expect(false || (x), false))
 #define likely(x) (__builtin_expect(false || (x), true))
@@ -34,7 +35,9 @@
 namespace kelvin::opt {
 namespace {
 
-void Filter_N_H_W_M(const int8_t* input, int8_t* output, int N, int H, int W, int M) {
+// Version of Filter_N_H_W_M which also pads outputs to 8 and inputs to 4.
+void PaddedFilter_N_H_W_M(const int8_t* input, int8_t* output, int N, int H,
+                          int W, int M) {
   const int8_t(&in)[8][H][W][M] = *(int8_t(*)[8][H][W][M])input;
   int8_t(&out)[H][W][M / 4][8][4] = *(int8_t(*)[H][W][M / 4][8][4]) output;
   // assert(M >= 4);
@@ -63,19 +66,6 @@
   }
 }
 
-void Swizzle(const int32_t* input, int32_t* output, int N) {
-  assert(N <= 8);
-  const int32_t(&in)[8] = *(int32_t(*)[8])input;
-  int32_t(&out)[32] = *(int32_t(*)[32]) output;
-  // Convert to accumulator swizzle pattern.
-  memset(out, 0, 32 * sizeof(int32_t));
-  int offsets[] = {0, 16, 8, 24, 1, 17, 9, 25};
-  for (int i = 0; i < N; ++i) {
-    int offset = offsets[i];
-    out[0 + offset] = out[2 + offset] = out[4 + offset] = out[6 + offset] = in[i];
-  }
-}
-
 }  // namespace
 
 void ConvS8D4(
@@ -150,10 +140,11 @@
   int out_channel = 0;
   do {
     int out_channels_this_iter = std::min(8, output_depth - out_channel);
-    Filter_N_H_W_M(filter_data + (out_channel * filter_height * filter_width *
-                                  filter_input_depth),
-                   p_swizzled_filter_data, out_channels_this_iter, filter_height, filter_width,
-                   filter_input_depth);
+    PaddedFilter_N_H_W_M(
+        filter_data + (out_channel * filter_height * filter_width *
+                       filter_input_depth),
+        p_swizzled_filter_data, out_channels_this_iter, filter_height,
+        filter_width, filter_input_depth);
     Swizzle(bias_data + out_channel, swizzled_bias_data, out_channels_this_iter);
     Swizzle(output_multiplier + out_channel, swizzled_mult_data, out_channels_this_iter);
     Swizzle(output_shift + out_channel, swizzled_shift_data, out_channels_this_iter);
@@ -354,4 +345,518 @@
     out_channel += out_channels_this_iter;
   } while (out_channel < output_depth);
 }
+
+// Optimized for width >= 8
+void ConvS8W8D4(
+    const tflite::ConvParams& params, const int32_t* output_multiplier,
+    const int32_t* output_shift, const tflite::RuntimeShape& input_shape,
+    const int8_t* input_data, const tflite::RuntimeShape& filter_shape,
+    const int8_t* filter_data, const tflite::RuntimeShape& bias_shape,
+    const int32_t* bias_data, const tflite::RuntimeShape& output_shape,
+    int8_t* output_data) {
+  // Get parameters.
+  const int32_t input_offset = params.input_offset;  // r = s(q - Z)
+  const int32_t neg_input_offset = -params.input_offset;  // r = s(q - Z)
+  const int stride_width = params.stride_width;
+  const int stride_height = params.stride_height;
+  const int dilation_width_factor = params.dilation_width_factor;
+  const int dilation_height_factor = params.dilation_height_factor;
+  const int pad_width = params.padding_values.width;
+  const int pad_height = params.padding_values.height;
+  const int32_t output_offset = params.output_offset;
+
+  // Set min and max value of the output.
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
+
+  // Consistency check.
+  TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+  TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
+  TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
+  TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
+  const int batches = MatchingDim(input_shape, 0, output_shape, 0);
+  const int input_depth = input_shape.Dims(3);
+  const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
+  if (bias_data) {
+    TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
+  }
+
+  // Check dimensions of the tensors.
+  const int input_height = input_shape.Dims(1);
+  const int input_width = input_shape.Dims(2);
+  const int filter_height = filter_shape.Dims(1);
+  const int filter_width = filter_shape.Dims(2);
+  const int filter_input_depth = filter_shape.Dims(3);
+  const int groups = input_depth / filter_input_depth;
+  TFLITE_DCHECK_NE(groups, 0);
+  TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0);
+  const int filters_per_group = output_depth / groups;
+  TFLITE_DCHECK_NE(filters_per_group, 0);
+  const int output_height = output_shape.Dims(1);
+  const int output_width = output_shape.Dims(2);
+
+  union {
+    vconv_u8_t conv;
+    uint32_t raw;
+  } cmds;
+  cmds.conv.mode = 0;
+  cmds.conv.start = 0;
+  cmds.conv.stop = 7;
+  cmds.conv.sbias1 = input_offset;
+  cmds.conv.sdata1 = true;
+  cmds.conv.sbias2 = 0;
+  cmds.conv.sdata2 = true;
+
+  const size_t swizzled_filter_data_size =
+      8 * filter_height * filter_width * filter_input_depth;
+  std::unique_ptr<int8_t> swizzled_filter_data(reinterpret_cast<int8_t*>(
+      ::aligned_alloc(32, swizzled_filter_data_size)));
+  int8_t* p_swizzled_filter_data = swizzled_filter_data.get();
+  int32_t swizzled_bias_data[32];
+  int32_t swizzled_mult_data[32];
+  int32_t swizzled_shift_data[32];
+
+  int out_channel = 0;
+  do {
+    int out_channels_this_iter = std::min(8, output_depth - out_channel);
+    PaddedFilter_N_H_W_M(
+        filter_data + (out_channel * filter_height * filter_width *
+                       filter_input_depth),
+        p_swizzled_filter_data, out_channels_this_iter, filter_height,
+        filter_width, filter_input_depth);
+
+    if (bias_data) {
+      Swizzle(bias_data + out_channel, swizzled_bias_data, 8);
+      vld_w_x_m(v44, swizzled_bias_data);
+    } else {
+      vdup_w_x_m(v44, 0);
+    }
+
+    Swizzle(output_multiplier + out_channel, swizzled_mult_data, out_channels_this_iter);
+    Swizzle(output_shift + out_channel, swizzled_shift_data, out_channels_this_iter);
+
+    vld_w_x_m(v56, swizzled_mult_data);
+    vld_w_x_m(v60, swizzled_shift_data);
+    vrsub_w_vx_m(v60, v60, 0);
+
+    for (int batch = 0; batch < batches; ++batch) {
+      int8_t* p_output =
+          output_data + (batch * output_height * output_width * output_depth) +
+          out_channel;
+      for (int out_y = 0; out_y < output_height; ++out_y) {
+        const int in_y_origin = (out_y * stride_height) - pad_height;
+        const int out_y_offset = (out_y * output_width * output_depth);
+        int out_x = 0;
+        while ((out_x * stride_width) < pad_width) {
+          int out_xs_this_iter = 8;
+          // 8x accumulators
+          vmv_v_m(v48, v44);
+          vmv_v_m(v52, v44);
+          acset_v(v48, v48);
+
+          int in_channel = 0;
+          while (in_channel < filter_input_depth) {
+            int in_channels_this_iter = std::min(filter_input_depth, 32);
+            // Calculate first valid filter_y
+            int filter_y = 0;
+            {
+              int in_y = in_y_origin;
+              while (in_y < 0) {
+                ++filter_y;
+                in_y += (dilation_height_factor);
+              }
+            }
+            for (; filter_y < filter_height; ++filter_y) {
+              const int y_filter_offset =
+                  (filter_y * filter_width * 8 * input_depth);
+              const int in_y = in_y_origin + dilation_height_factor * filter_y;
+              if (in_y >= input_height) {
+                break;
+              }
+              const int8_t* p_in =
+                  input_data + in_channel + (in_y * input_width * input_depth) +
+                  (batch * input_height * input_width * input_depth);
+
+              int in_x[8];
+#pragma GCC unroll 8
+              for (int i = 0; i < 8; ++i) {
+                in_x[i] = ((out_x + i) * stride_width) - pad_width;
+              }
+
+              for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
+                const int8_t* p_in_x[8];
+
+#pragma GCC unroll 8
+                for (int i = 0; i < 8; ++i) {
+                  p_in_x[i] = p_in + (in_x[i] * input_depth);
+                }
+
+                int stride = input_depth * stride_width;
+
+                if (in_x[0] < 0) {
+                  vdup_b_x(v0, -input_offset);
+                  vld_b_s_xx(v1, p_in_x[1], stride);
+                  vld_b_s_xx(v2, p_in_x[2], stride);
+                  vld_b_s_xx(v3, p_in_x[3], stride);
+                  vld_b_s_xx_m(v4, p_in_x[4], stride);
+                } else {
+                  // Inputs
+                  vld_b_s_xx_m(v0, p_in_x[0], stride);
+                  vld_b_s_xx_m(v4, p_in_x[4], stride);
+                }
+                size_t local_filter_offset = y_filter_offset +
+                                             (filter_x * 8 * input_depth) +
+                                             (in_channel * 8);
+                int8_t* p_local_filter_start =
+                    p_swizzled_filter_data + local_filter_offset;
+                vld_b_p_x_m(v8, p_local_filter_start);
+                vld_b_x_m(v12, p_local_filter_start);
+
+                cmds.conv.stop = (in_channels_this_iter / 4) - 1;
+                aconv_vxv(v48, v0, cmds, v8);
+
+#pragma GCC unroll 8
+                for (int i = 0; i < 8; ++i) {
+                  in_x[i] += dilation_width_factor;
+                }
+              }
+            }
+            in_channel += in_channels_this_iter;
+          }
+
+          vcget(v48);
+          INT32_TO_INT8_OUTPUT_PIPELINE_INPLACE2(
+              v48, v52, v56, v60, output_activation_min, output_activation_max,
+              output_offset);
+          vsraqs_b_vx(v48, v48, 0);
+          vsraqs_b_vx(v52, v52, 0);
+          int i = 0;
+          int8_t* p_out = p_output + out_y_offset + (out_x * output_depth);
+          for (; i < std::min(4, out_xs_this_iter); i++) {
+            vst_b_l_xx(v48, p_out, out_channels_this_iter);
+            p_out += output_depth;
+            vsliden_h_4_vv(v48, v48, v48);
+          }
+          for (; i < out_xs_this_iter; i++) {
+            vst_b_l_xx(v52, p_out, out_channels_this_iter);
+            p_out += output_depth;
+            vsliden_h_4_vv(v52, v52, v52);
+          }
+
+          out_x += out_xs_this_iter;
+        }  // ((out_x * stride_width) < pad_width)
+
+        // Hot loop, no x padding
+        int right_x = ((out_x + 7) * stride_width) + filter_width - pad_width;
+        while (right_x < output_width) {
+          int out_xs_this_iter = 8;
+          // 8x accumulators
+          vmv_v_m(v48, v44);
+          vmv_v_m(v52, v44);
+          acset_v(v48, v48);
+          int in_channel = 0;
+          while (in_channel < filter_input_depth) {
+            int in_channels_this_iter = std::min(filter_input_depth, 32);
+            cmds.conv.stop = (in_channels_this_iter / 4) - 1;
+
+            // Calculate first valid filter_y
+            int filter_y = 0;
+            {
+              int in_y = in_y_origin;
+              while (in_y < 0) {
+                ++filter_y;
+                in_y += (dilation_height_factor);
+              }
+            }
+            for (; filter_y < filter_height; ++filter_y) {
+              const int y_filter_offset =
+                  (filter_y * filter_width * 8 * input_depth);
+              const int in_y = in_y_origin + dilation_height_factor * filter_y;
+              if (in_y >= input_height) {
+                break;
+              }
+              const int8_t* p_in =
+                  input_data + in_channel + (in_y * input_width * input_depth) +
+                  (batch * input_height * input_width * input_depth);
+
+              int in_x = (out_x * stride_width) - pad_width;
+
+              for (int s = 0; s < stride_width; s++) {
+                int filter_x = s;
+                int stride = input_depth * stride_width;
+
+                const int8_t* p_in_x0 = p_in +
+                    ((in_x + filter_x) * input_depth);
+                vld_b_s_xx_m(v0, p_in_x0, stride);
+                p_in_x0 += 4 * stride;
+                vld_b_s_xx_m(v4, p_in_x0, stride);
+                p_in_x0 += 4 * stride;
+
+                {
+                  size_t local_filter_offset = y_filter_offset +
+                                              (filter_x * 8 * input_depth) +
+                                              (in_channel * 8);
+                  int8_t* p_local_filter_start =
+                      p_swizzled_filter_data + local_filter_offset;
+                  vld_b_p_x_m(v8, p_local_filter_start);
+                  vld_b_x_m(v12, p_local_filter_start);
+                }
+
+                aconv_vxv(v48, v0, cmds, v8);
+                filter_x += stride_width;
+
+                for (; filter_x + stride_width < filter_width;
+                       filter_x += 2 * stride_width) {
+                  // Iteration 1
+                  vmv_v(v16, v1);
+                  vmv_v(v17, v2);
+                  vmv_v(v18, v3);
+                  vmv_v(v19, v4);
+                  vmv_v(v20, v5);
+                  vmv_v(v21, v6);
+                  vmv_v(v22, v7);
+                  vld_b_l_xx(v23, p_in_x0, in_channels_this_iter);
+                  p_in_x0 += stride;
+
+                  size_t local_filter_offset0 = y_filter_offset +
+                              (filter_x * 8 * input_depth) +
+                              (in_channel * 8);
+                  int8_t* p_local_filter_start0 =
+                      p_swizzled_filter_data + local_filter_offset0;
+                  vld_b_x_m(v24, p_local_filter_start0);
+                  vld_b_x_m(v28, p_local_filter_start0 + 128);
+
+                  aconv_vxv(v48, v16, cmds, v24);
+
+                  // Iteration 2
+                  vmv_v(v0, v17);
+                  vmv_v(v1, v18);
+                  vmv_v(v2, v19);
+                  vmv_v(v3, v20);
+                  vmv_v(v4, v21);
+                  vmv_v(v5, v22);
+                  vmv_v(v6, v23);
+                  vld_b_l_xx(v7, p_in_x0, in_channels_this_iter);
+                  p_in_x0 += stride;
+
+                  size_t local_filter_offset1 = y_filter_offset +
+                              ((filter_x + stride_width) * 8 * input_depth) +
+                              (in_channel * 8);
+                  int8_t* p_local_filter_start1 =
+                      p_swizzled_filter_data + local_filter_offset1;
+                  vld_b_x_m(v8, p_local_filter_start1);
+                  vld_b_x_m(v12, p_local_filter_start1 + 128);
+
+                  aconv_vxv(v48, v0, cmds, v8);
+                }
+
+                for (; filter_x < filter_width; filter_x += stride_width) {
+                  // Iteration 1
+                  vmv_v(v16, v1);
+                  vmv_v(v17, v2);
+                  vmv_v(v18, v3);
+                  vmv_v(v19, v4);
+                  vmv_v(v20, v5);
+                  vmv_v(v21, v6);
+                  vmv_v(v22, v7);
+                  vld_b_l_xx(v23, p_in_x0, in_channels_this_iter);
+                  p_in_x0 += stride;
+
+                  size_t local_filter_offset = y_filter_offset +
+                              (filter_x * 8 * input_depth) +
+                              (in_channel * 8);
+                  int8_t* p_local_filter_start =
+                      p_swizzled_filter_data + local_filter_offset;
+                  vld_b_x_m(v24, p_local_filter_start);
+                  vld_b_x_m(v28, p_local_filter_start + 128);
+
+                  aconv_vxv(v48, v16, cmds, v24);
+                }
+              }
+            }
+            in_channel += in_channels_this_iter;
+          }  // while (in_channel < filter_input_depth);
+          vcget(v48);
+          INT32_TO_INT8_OUTPUT_PIPELINE_INPLACE2(
+              v48, v52, v56, v60, output_activation_min, output_activation_max,
+              output_offset);
+
+          vsraqs_b_vx(v48, v48, 0);
+          vsraqs_b_vx(v52, v52, 0);
+          int i = 0;
+          int8_t* p_out = p_output + out_y_offset + (out_x * output_depth);
+          for (; i < std::min(4, out_xs_this_iter); i++) {
+            vst_b_l_xx(v48, p_out, out_channels_this_iter);
+            p_out += output_depth;
+            vsliden_h_4_vv(v48, v48, v48);
+          }
+          for (; i < out_xs_this_iter; i++) {
+            vst_b_l_xx(v52, p_out, out_channels_this_iter);
+            p_out += output_depth;
+            vsliden_h_4_vv(v52, v52, v52);
+          }
+
+          right_x += out_xs_this_iter * stride_width;
+          out_x += out_xs_this_iter;
+        }
+
+        while (out_x < output_width) {
+          int out_xs_this_iter = std::min(8, output_width - out_x);
+          // 8x accumulators
+          vmv_v_m(v48, v44);
+          vmv_v_m(v52, v44);
+          acset_v(v48, v48);
+          int in_channel = 0;
+
+          while (in_channel < filter_input_depth) {
+            int in_channels_this_iter = std::min(filter_input_depth, 32);
+            // Calculate first valid filter_y
+            int filter_y = 0;
+            {
+              int in_y = in_y_origin;
+              while (in_y < 0) {
+                ++filter_y;
+                in_y += (dilation_height_factor);
+              }
+            }
+            for (; filter_y < filter_height; ++filter_y) {
+              const int y_filter_offset =
+                  (filter_y * filter_width * 8 * input_depth);
+              const int in_y = in_y_origin + dilation_height_factor * filter_y;
+              if (in_y >= input_height) {
+                break;
+              }
+              const int8_t* p_in =
+                  input_data + in_channel + (in_y * input_width * input_depth) +
+                  (batch * input_height * input_width * input_depth);
+
+              int in_x[8];
+#pragma GCC unroll 8
+              for (int i = 0; i < 8; ++i) {
+                in_x[i] = ((out_x + i) * stride_width) - pad_width;
+              }
+              for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
+                const int8_t* p_in_x[8];
+                int first_right_pad = -1;
+
+#pragma GCC unroll 8
+                for (int i = 0; i < 8; ++i) {
+                  p_in_x[i] = p_in + (in_x[i] * input_depth);
+                }
+
+#pragma GCC unroll 8
+                for (int i = 7; i >= 0; --i) {
+                  if (in_x[i] < input_width) {
+                    break;
+                  }
+                  first_right_pad = i;
+                }
+                bool left_pad = (in_x[0] < 0);
+                bool right_pad = (first_right_pad != -1);
+
+                int stride = input_depth * stride_width;
+
+                if (unlikely(left_pad)) {
+                  vdup_b_x(v0, -input_offset);
+                  vld_b_s_xx(v1, p_in_x[1], stride);
+                  vld_b_s_xx(v2, p_in_x[2], stride);
+                  vld_b_s_xx(v3, p_in_x[3], stride);
+                  vld_b_s_xx_m(v4, p_in_x[4], stride);
+                } else if (unlikely(right_pad)) {
+                  int first_pad = std::min(first_right_pad, out_xs_this_iter);
+                  switch (first_pad) {
+                    case 0:
+                      vdup_b_x(v0, neg_input_offset);
+                    case 1:
+                      vdup_b_x(v1, neg_input_offset);
+                    case 2:
+                      vdup_b_x(v2, neg_input_offset);
+                    case 3:
+                      vdup_b_x(v3, neg_input_offset);
+                    case 4:
+                      vdup_b_x(v4, neg_input_offset);
+                    case 5:
+                      vdup_b_x(v5, neg_input_offset);
+                    case 6:
+                      vdup_b_x(v6, neg_input_offset);
+                    case 7:
+                      vdup_b_x(v7, neg_input_offset);
+                  }
+                  switch (8 - first_pad) { // rest (stripmines?)
+                    case 0:
+                      vld_b_s_xx(v7, p_in_x[7], stride);
+                    case 1:
+                      vld_b_s_xx(v6, p_in_x[6], stride);
+                    case 2:
+                      vld_b_s_xx(v5, p_in_x[5], stride);
+                    case 3:
+                      vld_b_s_xx(v4, p_in_x[4], stride);
+                    case 4:
+                      vld_b_s_xx(v3, p_in_x[3], stride);
+                    case 5:
+                      vld_b_s_xx(v2, p_in_x[2], stride);
+                    case 6:
+                      vld_b_s_xx(v1, p_in_x[1], stride);
+                    case 7:
+                      vld_b_s_xx(v0, p_in_x[0], stride);
+                  }
+                } else if (likely(!left_pad && !right_pad)) {
+                  // Inputs
+                  vld_b_s_xx_m(v0, p_in_x[0], stride);
+                  vld_b_s_xx_m(v4, p_in_x[4], stride);
+                } else {
+                  vdup_b_x(v0, neg_input_offset);
+                  vdup_b_x(v7, neg_input_offset);
+                  vld_b_s_xx_m(v1, p_in_x[1], stride);
+                  vld_b_s_xx(v5, p_in_x[5], stride);
+                  vld_b_s_xx(v6, p_in_x[6], stride);
+                }
+                size_t local_filter_offset = y_filter_offset +
+                                             (filter_x * 8 * input_depth) +
+                                             (in_channel * 8);
+                int8_t* p_local_filter_start =
+                    p_swizzled_filter_data + local_filter_offset;
+                vld_b_p_x_m(v8, p_local_filter_start);
+                vld_b_x_m(v12, p_local_filter_start);
+
+                cmds.conv.stop = (in_channels_this_iter / 4) - 1;
+                aconv_vxv(v48, v0, cmds, v8);
+
+#pragma GCC unroll 8
+                for (int i = 0; i < 8; ++i) {
+                  in_x[i] += dilation_width_factor;
+                }
+              }
+            }
+            in_channel += in_channels_this_iter;
+          }  // while (in_channel < filter_input_depth);
+
+          vcget(v48);
+          INT32_TO_INT8_OUTPUT_PIPELINE_INPLACE2(
+              v48, v52, v56, v60, output_activation_min, output_activation_max,
+              output_offset);
+          vsraqs_b_vx(v48, v48, 0);
+          vsraqs_b_vx(v52, v52, 0);
+
+          int i = 0;
+          int8_t* p_out = p_output + out_y_offset + (out_x * output_depth);
+          for (; i < std::min(4, out_xs_this_iter); i++) {
+            vst_b_l_xx(v48, p_out, out_channels_this_iter);
+            p_out += output_depth;
+            vsliden_h_4_vv(v48, v48, v48);
+          }
+          for (; i < out_xs_this_iter; i++) {
+            vst_b_l_xx(v52, p_out, out_channels_this_iter);
+            p_out += output_depth;
+            vsliden_h_4_vv(v52, v52, v52);
+          }
+
+          out_x += out_xs_this_iter;
+        } // while (out_x < output_width);
+      }
+    }
+    out_channel += out_channels_this_iter;
+  } while (out_channel < output_depth);
+}
+
 }  // namespace kelvin::opt
diff --git a/tflm/opt/conv_util.h b/tflm/opt/conv_util.h
index a142c6f..34f3857 100644
--- a/tflm/opt/conv_util.h
+++ b/tflm/opt/conv_util.h
@@ -113,23 +113,15 @@
 // Swizzle values, and duplicate 4 times for stripmining.
 inline void Swizzle(const int32_t* input, int32_t* output, int N,
                     bool negate = false) {
-  const int32_t(&in)[N] = *(int32_t(*)[N])input;
-  int32_t(&out)[N * 4] = *(int32_t(*)[N * 4]) output;
+  assert(N <= 8);
+  const int32_t(&in)[8] = *(int32_t(*)[8])input;
+  int32_t(&out)[32] = *(int32_t(*)[32]) output;
   // Convert to accumulator swizzle pattern.
-  for (int i = 0; i < N / 8; ++i) {
-    int32_t* out0 = out + i * 32 + 0;
-    int32_t* out1 = out + i * 32 + 16;
-    int32_t* out2 = out + i * 32 + 8;
-    int32_t* out3 = out + i * 32 + 24;
-    for (int j = 0; j < 4; ++j) {
-      const int32_t* p_in = in + i * 8;
-      for (int k = 0; k < 2; ++k) {
-        *out0++ = *p_in++;
-        *out1++ = *p_in++;
-        *out2++ = *p_in++;
-        *out3++ = *p_in++;
-      }
-    }
+  memset(out, 0, 32 * sizeof(int32_t));
+  int offsets[] = {0, 16, 8, 24, 1, 17, 9, 25};
+  for (int i = 0; i < N; ++i) {
+    int offset = offsets[i];
+    out[0 + offset] = out[2 + offset] = out[4 + offset] = out[6 + offset] = in[i];
   }
   if (negate) {
     for (int i = 0; i < N * 4; ++i) {