Rewrite ConvPerChannelD1OD24_5x5

This new approach utilizes the aconv instruction to perform full matmul.
Creative data shuffling methods allow the implementation to achieve
significantly better useful MAC throughput.

Activation min/max is assumed to be int8 limits to eliminate the clamping
instructions in the postprocessing

ambient_face_2.8.0_504x378_grayscale_per_axis_simulator_iss is used in
testing.
The following items are measured on the simulator:
- Mismatch_count is 0.
- #0 CONV_2D takes 1.52M ticks (down from 4.02M, 2.64x speedup).
- Total cycles is now 52.9M (down from 57.9M, -8.6%).

The implementation currently contains lots of repeated and near-repeated
sections. They can be folded together in the future to improve
readability.

The following improvement items may be doable:
- Further tweak data shuffling sections to take less time
- Rearrange instructions on bottlenecks

Change-Id: Ibfcea94988b9ece4ea98a61c139ce44e967b4fd8
diff --git a/tflm/opt/conv_s8.cc b/tflm/opt/conv_s8.cc
index 7842b0d..10e8f41 100644
--- a/tflm/opt/conv_s8.cc
+++ b/tflm/opt/conv_s8.cc
@@ -184,13 +184,13 @@
   const auto dilation_height_factor = params.dilation_height_factor;
   const auto pad_width = params.padding_values.width;
   const auto pad_height = params.padding_values.height;
-  const auto input_batch = input_shape.Dims(0);
   const auto input_height = input_shape.Dims(1);
   const auto input_width = input_shape.Dims(2);
   const auto input_depth = input_shape.Dims(3);
   const auto filter_height = filter_shape.Dims(1);
   const auto filter_width = filter_shape.Dims(2);
   const auto filter_depth = filter_shape.Dims(3);
+  const auto output_height = output_shape.Dims(1);
   const auto output_width = output_shape.Dims(2);
   const auto output_depth = output_shape.Dims(3);
 
@@ -207,8 +207,7 @@
       stride_width == 1 && dilation_height_factor == 1 &&
       dilation_width_factor == 1 && pad_height == 0 && pad_width == 0 &&
       (input_depth == filter_depth)) {
-
-    if ((input_depth == 32) && (input_batch * input_height * input_width) >= 4) {
+    if ((input_depth == 32) && (batches * input_height * input_width) >= 4) {
       RUN_KERNEL(kelvin::opt::ConvS8K1x1D32);
     }
 
@@ -227,8 +226,14 @@
   }
 
   if (input_depth == 1 && filter_width == 5 && filter_height == 5 &&
-      stride_width == 2 &&
-      output_depth == 24) {
+      stride_width == 2 && dilation_width_factor == 1 && output_depth == 24 &&
+      params.quantized_activation_min == -128 &&
+      params.quantized_activation_max == 127 &&
+      // Special limitations of the implementation.
+      pad_height < 5 && pad_width < 5 &&
+      (output_height - 1) * stride_height - pad_height < input_height &&
+      (output_width - 1) * stride_width - pad_width < input_width &&
+      input_width >= 19) {
     RUN_KERNEL(kelvin::opt::ConvPerChannelD1OD24_5x5);
   }
 
diff --git a/tflm/opt/conv_s8_d1.cc b/tflm/opt/conv_s8_d1.cc
index c03a7f5..35b90a6 100644
--- a/tflm/opt/conv_s8_d1.cc
+++ b/tflm/opt/conv_s8_d1.cc
@@ -65,394 +65,109 @@
   }
 }
 
+// Internal helper function within ConvPerChannelD1OD24_5x5.
+__attribute__((always_inline)) inline void
+ConvPerChannelD1OD24_5x5_inputshuffle() {
+  // IN: v48-v52 (modified)
+  // OUT: v0-v7
+  // CLOBBER: v22-v23, v32-v33
+
+  // Zips row0123 together into v48-v51.
+  vzip_b_vv(v22, v48, v50);  // Also writes v23.
+  vzip_b_vv(v32, v49, v51);  // Also writes v33.
+  vzip_b_vv(v48, v22, v32);  // Also writes v49.
+  vzip_b_vv(v50, v23, v33);  // Also writes v51 but it's unused.
+
+  vsliden_h_1_vv(v33, v52, v52);
+  vslidep_w_3_vv(v22, v48, v48);
+  vslidep_w_1_vv(v23, v48, v48);
+  // Patch 0 is row0123[0:19] with row4[0:4].
+  vsliden_w_3_vv(v0, v22, v52);  // RHS not yet rotated.
+  // Patch 1 is row0123[8:27] with row4[2:6].
+  vsliden_w_3_vv(v1, v23, v33);
+
+  vsliden_h_2_vv(v32, v52, v52);
+  vsliden_h_3_vv(v33, v52, v52);
+  vsliden_w_1_vv(v22, v48, v49);
+  vsliden_w_3_vv(v23, v48, v49);
+  // Patch 2 is row0123[16:35] with row4[4:8].
+  vsliden_w_3_vv(v2, v22, v32);
+  // Patch 3 is row0123[24:43] with row4[6:10].
+  vsliden_w_3_vv(v3, v23, v33);
+
+  vsliden_h_3_vv(v33, v32, v32);
+  vsliden_h_4_vv(v32, v52, v52);
+  vslidep_w_3_vv(v22, v49, v49);
+  vslidep_w_1_vv(v23, v49, v49);
+  // Patch 4 is row0123[32:51] with row4[8:12].
+  vsliden_w_3_vv(v4, v22, v32);
+  // Patch 5 is row0123[40:59] with row4[10:14].
+  vsliden_w_3_vv(v5, v23, v33);
+
+  vsliden_h_3_vv(v33, v32, v32);
+  vsliden_w_3_vv(v32, v52, v52);
+  vsliden_w_1_vv(v22, v49, v50);
+  vsliden_w_3_vv(v23, v49, v50);
+  // Patch 6 is row0123[48:67] with row4[12:16].
+  vsliden_w_3_vv(v6, v22, v32);
+  // Patch 7 is row0123[56:75] with row4[14:18].
+  vsliden_w_3_vv(v7, v23, v33);
+}
+
+// Internal helper function within ConvPerChannelD1OD24_5x5.
+__attribute__((always_inline)) inline void ConvPerChannelD1OD24_5x5_postproc(
+    const int32_t* output_multiplier, const int32_t* output_shift,
+    int32_t output_offset, int32_t output_activation_min,
+    int32_t output_activation_max, int8_t* out_ptr_col0, int8_t* out_ptr_col4) {
+  // IN: acc and params
+  // OUT: memory, see out_ptr_*
+  // CLOBBER: v22-v23, v32-v33, v48-v55
+
+  // Retrieves results.
+  vcget(v48);  // v48-v55 is written.
+
+  // Postprocessing and output.
+  vevnodd_w_vv(v22, v48, v52);  // Also writes v23.
+  vevnodd_w_vv(v32, v49, v53);  // Also writes v33.
+  vdmulh_w_rn_vx(v22, v22, output_multiplier[0]);
+  vdmulh_w_rn_vx(v23, v23, output_multiplier[4]);
+  vdmulh_w_rn_vx(v32, v32, output_multiplier[2]);
+  vdmulh_w_rn_vx(v33, v33, output_multiplier[6]);
+  vsha_w_r_vx(v22, v22, -output_shift[0]);
+  vsha_w_r_vx(v23, v23, -output_shift[4]);
+  vsha_w_r_vx(v32, v32, -output_shift[2]);
+  vsha_w_r_vx(v33, v33, -output_shift[6]);
+  vzip_w_vv(v22, v22, v23);  // Also writes v23.
+  vzip_w_vv(v32, v32, v33);  // Also writes v33.
+  vmvp_vv(v48, v22, v32);    // Also writes v49.
+  vmvp_vv(v52, v23, v33);    // Also writes v53.
+
+  vevnodd_w_vv(v22, v50, v54);  // Also writes v23.
+  vevnodd_w_vv(v32, v51, v55);  // Also writes v33.
+  vdmulh_w_rn_vx(v22, v22, output_multiplier[1]);
+  vdmulh_w_rn_vx(v23, v23, output_multiplier[5]);
+  vdmulh_w_rn_vx(v32, v32, output_multiplier[3]);
+  vdmulh_w_rn_vx(v33, v33, output_multiplier[7]);
+  vsha_w_r_vx(v22, v22, -output_shift[1]);
+  vsha_w_r_vx(v23, v23, -output_shift[5]);
+  vsha_w_r_vx(v32, v32, -output_shift[3]);
+  vsha_w_r_vx(v33, v33, -output_shift[7]);
+  vzip_w_vv(v22, v22, v23);  // Also writes v23.
+  vzip_w_vv(v32, v32, v33);  // Also writes v33.
+  vmvp_vv(v50, v22, v32);    // Also writes v51.
+  vmvp_vv(v54, v23, v33);    // Also writes v55.
+
+  vadd_w_vx_m(v48, v48, output_offset);
+  vadd_w_vx_m(v52, v52, output_offset);
+
+  vsraqs_b_vx(v48, v48, 0);
+  vsraqs_b_vx(v52, v52, 0);
+  vstq_b_s_xx(v48, out_ptr_col0, /*output_depth=*/24);
+  vstq_b_s_xx(v52, out_ptr_col4, /*output_depth=*/24);
+}
+
 }  // namespace
 
-#define FLT_0_0 v0
-#define FLT_0_1 v3
-#define FLT_0_2 v6
-#define FLT_0_3 v9
-#define FLT_0_4 v12
-
-#define FLT_1_0 v1
-#define FLT_1_1 v4
-#define FLT_1_2 v7
-#define FLT_1_3 v10
-#define FLT_1_4 v13
-
-#define FLT_2_0 v2
-#define FLT_2_1 v5
-#define FLT_2_2 v8
-#define FLT_2_3 v11
-#define FLT_2_4 v14
-
-#define FLT_3_0 v15
-#define FLT_3_1 v16
-#define FLT_3_2 v17
-#define FLT_3_3 v18
-#define FLT_3_4 v19
-
-#define FLT_HOLE v20
-#define FLT_4_0 v21
-#define FLT_4_1 v22
-#define FLT_4_2 v23
-#define FLT_4_3 v24
-#define FLT_4_4 v25
-
-#define INPUT_0_0 v26
-#define INPUT_0_1 v29
-#define INPUT_0_2 v32
-#define INPUT_0_3 v35
-#define INPUT_0_4 v38
-
-#define INPUT_1_0 v27
-#define INPUT_1_1 v30
-#define INPUT_1_2 v33
-#define INPUT_1_3 v36
-#define INPUT_1_4 v39
-
-#define INPUT_2_0 v28
-#define INPUT_2_1 v31
-#define INPUT_2_2 v34
-#define INPUT_2_3 v37
-#define INPUT_2_4 v40
-
-#define INPUT_3_0 v41
-#define INPUT_3_1 v42
-#define INPUT_3_2 v43
-#define INPUT_3_3 v44
-#define INPUT_3_4 v45
-
-#define INPUT_4_0 v46
-#define INPUT_4_1 v47
-#define INPUT_4_2 v48
-#define INPUT_4_3 v49
-#define INPUT_4_4 v50
-
-#define CALCULATE_IN_X(in_x_origin)                        \
-  {                                                        \
-    _Pragma("GCC unroll 5") for (int i = 0; i < 5; ++i) {  \
-      in_x[i] = in_x_origin + (dilation_width_factor * i); \
-    }                                                      \
-  }
-
-#define CALCULATE_IN_Y(in_y_origin)                         \
-  {                                                         \
-    _Pragma("GCC unroll 5") for (int i = 0; i < 5; ++i) {   \
-      in_y[i] = in_y_origin + (dilation_height_factor * i); \
-    }                                                       \
-  }
-
-#define PAD_ROW_0(input_offset)   \
-  {                               \
-    vdup_b_x(INPUT_0_0, -input_offset); \
-    vdup_b_x(INPUT_0_1, -input_offset); \
-    vdup_b_x(INPUT_0_2, -input_offset); \
-    vdup_b_x(INPUT_0_3, -input_offset); \
-    vdup_b_x(INPUT_0_4, -input_offset); \
-  }
-#define PAD_ROW_1(input_offset)   \
-  {                               \
-    vdup_b_x(INPUT_1_0, -input_offset); \
-    vdup_b_x(INPUT_1_1, -input_offset); \
-    vdup_b_x(INPUT_1_2, -input_offset); \
-    vdup_b_x(INPUT_1_3, -input_offset); \
-    vdup_b_x(INPUT_1_4, -input_offset); \
-  }
-#define PAD_ROW_2(input_offset)   \
-  {                            \
-    vdup_b_x(INPUT_2_0, -input_offset); \
-    vdup_b_x(INPUT_2_1, -input_offset); \
-    vdup_b_x(INPUT_2_2, -input_offset); \
-    vdup_b_x(INPUT_2_3, -input_offset); \
-    vdup_b_x(INPUT_2_4, -input_offset); \
-  }
-#define PAD_ROW_3(input_offset)   \
-  {                               \
-    vdup_b_x(INPUT_3_0, -input_offset); \
-    vdup_b_x(INPUT_3_1, -input_offset); \
-    vdup_b_x(INPUT_3_2, -input_offset); \
-    vdup_b_x(INPUT_3_3, -input_offset); \
-    vdup_b_x(INPUT_3_4, -input_offset); \
-  }
-#define PAD_ROW_4(input_offset)   \
-  {                               \
-    vdup_b_x(INPUT_4_0, -input_offset); \
-    vdup_b_x(INPUT_4_1, -input_offset); \
-    vdup_b_x(INPUT_4_2, -input_offset); \
-    vdup_b_x(INPUT_4_3, -input_offset); \
-    vdup_b_x(INPUT_4_4, -input_offset); \
-  }
-
-#define LOAD_ROW_0(p_input, input_width, in_y, in_x)         \
-  {                                                          \
-    const int8_t* p_row = p_input + (in_y[0] * input_width); \
-    vdup_b_x(INPUT_0_0, *(p_row + in_x[0]));                       \
-    vdup_b_x(INPUT_0_1, *(p_row + in_x[1]));                       \
-    vdup_b_x(INPUT_0_2, *(p_row + in_x[2]));                       \
-    vdup_b_x(INPUT_0_3, *(p_row + in_x[3]));                       \
-    vdup_b_x(INPUT_0_4, *(p_row + in_x[4]));                       \
-  }
-
-#define LOAD_ROW_1(p_input, input_width, in_y, in_x)         \
-  {                                                          \
-    const int8_t* p_row = p_input + (in_y[1] * input_width); \
-    vdup_b_x(INPUT_1_0, *(p_row + in_x[0]));                       \
-    vdup_b_x(INPUT_1_1, *(p_row + in_x[1]));                       \
-    vdup_b_x(INPUT_1_2, *(p_row + in_x[2]));                       \
-    vdup_b_x(INPUT_1_3, *(p_row + in_x[3]));                       \
-    vdup_b_x(INPUT_1_4, *(p_row + in_x[4]));                       \
-  }
-
-#define LOAD_ROW_2(p_input, input_width, in_y, in_x)         \
-  {                                                          \
-    const int8_t* p_row = p_input + (in_y[2] * input_width); \
-    vdup_b_x(INPUT_2_0, *(p_row + in_x[0]));                       \
-    vdup_b_x(INPUT_2_1, *(p_row + in_x[1]));                       \
-    vdup_b_x(INPUT_2_2, *(p_row + in_x[2]));                       \
-    vdup_b_x(INPUT_2_3, *(p_row + in_x[3]));                       \
-    vdup_b_x(INPUT_2_4, *(p_row + in_x[4]));                       \
-  }
-
-#define LOAD_ROW_3(p_input, input_width, in_y, in_x)         \
-  {                                                          \
-    const int8_t* p_row = p_input + (in_y[3] * input_width); \
-    vdup_b_x(INPUT_3_0, *(p_row + in_x[0]));                       \
-    vdup_b_x(INPUT_3_1, *(p_row + in_x[1]));                       \
-    vdup_b_x(INPUT_3_2, *(p_row + in_x[2]));                       \
-    vdup_b_x(INPUT_3_3, *(p_row + in_x[3]));                       \
-    vdup_b_x(INPUT_3_4, *(p_row + in_x[4]));                       \
-  }
-
-#define LOAD_ROW_4(p_input, input_width, in_y, in_x)         \
-  {                                                          \
-    const int8_t* p_row = p_input + (in_y[4] * input_width); \
-    vdup_b_x(INPUT_4_0, *(p_row + in_x[0]));                       \
-    vdup_b_x(INPUT_4_1, *(p_row + in_x[1]));                       \
-    vdup_b_x(INPUT_4_2, *(p_row + in_x[2]));                       \
-    vdup_b_x(INPUT_4_3, *(p_row + in_x[3]));                       \
-    vdup_b_x(INPUT_4_4, *(p_row + in_x[4]));                       \
-  }
-
-#define H_PAD_OR_LOAD_ROW_0(p_input, input_width, input_offset, in_y, in_x) \
-  if (in_x[0] >= 0 && in_x[4] < input_width) {                              \
-    LOAD_ROW_0(p_input, input_width, in_y, in_x);                           \
-  } else {                                                                  \
-    const int8_t* p_row = p_input + (in_y[0] * input_width);                \
-    if (in_x[0] < 0 || in_x[0] >= input_width) {                            \
-      vdup_b_x(INPUT_0_0, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_0_0, *(p_row + in_x[0]));                                    \
-    }                                                                       \
-    if (in_x[1] < 0 || in_x[1] >= input_width) {                            \
-      vdup_b_x(INPUT_0_1, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_0_1, *(p_row + in_x[1]));                                    \
-    }                                                                       \
-    if (in_x[2] < 0 || in_x[2] >= input_width) {                            \
-      vdup_b_x(INPUT_0_2, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_0_2, *(p_row + in_x[2]));                                    \
-    }                                                                       \
-    if (in_x[3] < 0 || in_x[3] >= input_width) {                            \
-      vdup_b_x(INPUT_0_3, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_0_3, *(p_row + in_x[3]));                                    \
-    }                                                                       \
-    if (in_x[4] < 0 || in_x[4] >= input_width) {                            \
-      vdup_b_x(INPUT_0_4, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_0_4, *(p_row + in_x[4]));                                    \
-    }                                                                       \
-  }
-
-#define H_PAD_OR_LOAD_ROW_1(p_input, input_width, input_offset, in_y, in_x) \
-  if (in_x[0] >= 0 && in_x[4] < input_width) {                              \
-    LOAD_ROW_1(p_input, input_width, in_y, in_x);                           \
-  } else {                                                                  \
-    const int8_t* p_row = p_input + (in_y[1] * input_width);                \
-    if (in_x[0] < 0 || in_x[0] >= input_width) {                            \
-      vdup_b_x(INPUT_1_0, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_1_0, *(p_row + in_x[0]));                                    \
-    }                                                                       \
-    if (in_x[1] < 0 || in_x[1] >= input_width) {                            \
-      vdup_b_x(INPUT_1_1, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_1_1, *(p_row + in_x[1]));                                    \
-    }                                                                       \
-    if (in_x[2] < 0 || in_x[2] >= input_width) {                            \
-      vdup_b_x(INPUT_1_2, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_1_2, *(p_row + in_x[2]));                                    \
-    }                                                                       \
-    if (in_x[3] < 0 || in_x[3] >= input_width) {                            \
-      vdup_b_x(INPUT_1_3, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_1_3, *(p_row + in_x[3]));                                    \
-    }                                                                       \
-    if (in_x[4] < 0 || in_x[4] >= input_width) {                            \
-      vdup_b_x(INPUT_1_4, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_1_4, *(p_row + in_x[4]));                                    \
-    }                                                                       \
-  }
-
-#define H_PAD_OR_LOAD_ROW_2(p_input, input_width, input_offset, in_y, in_x) \
-  if (in_x[0] >= 0 && in_x[4] < input_width) {                              \
-    LOAD_ROW_2(p_input, input_width, in_y, in_x);                           \
-  } else {                                                                  \
-    const int8_t* p_row = p_input + (in_y[2] * input_width);                \
-    if (in_x[0] < 0 || in_x[0] >= input_width) {                            \
-      vdup_b_x(INPUT_2_0, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_2_0, *(p_row + in_x[0]));                                    \
-    }                                                                       \
-    if (in_x[1] < 0 || in_x[1] >= input_width) {                            \
-      vdup_b_x(INPUT_2_1, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_2_1, *(p_row + in_x[1]));                                    \
-    }                                                                       \
-    if (in_x[2] < 0 || in_x[2] >= input_width) {                            \
-      vdup_b_x(INPUT_2_2, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_2_2, *(p_row + in_x[2]));                                    \
-    }                                                                       \
-    if (in_x[3] < 0 || in_x[3] >= input_width) {                            \
-      vdup_b_x(INPUT_2_3, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_2_3, *(p_row + in_x[3]));                                    \
-    }                                                                       \
-    if (in_x[4] < 0 || in_x[4] >= input_width) {                            \
-      vdup_b_x(INPUT_2_4, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_2_4, *(p_row + in_x[4]));                                    \
-    }                                                                       \
-  }
-
-#define H_PAD_OR_LOAD_ROW_3(p_input, input_width, input_offset, in_y, in_x) \
-  if (in_x[0] >= 0 && in_x[4] < input_width) {                              \
-    LOAD_ROW_3(p_input, input_width, in_y, in_x);                           \
-  } else {                                                                  \
-    const int8_t* p_row = p_input + (in_y[3] * input_width);                \
-    if (in_x[0] < 0 || in_x[0] >= input_width) {                            \
-      vdup_b_x(INPUT_3_0, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_3_0, *(p_row + in_x[0]));                                    \
-    }                                                                       \
-    if (in_x[1] < 0 || in_x[1] >= input_width) {                            \
-      vdup_b_x(INPUT_3_1, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_3_1, *(p_row + in_x[1]));                                    \
-    }                                                                       \
-    if (in_x[2] < 0 || in_x[2] >= input_width) {                            \
-      vdup_b_x(INPUT_3_2, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_3_2, *(p_row + in_x[2]));                                    \
-    }                                                                       \
-    if (in_x[3] < 0 || in_x[3] >= input_width) {                            \
-      vdup_b_x(INPUT_3_3, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_3_3, *(p_row + in_x[3]));                                    \
-    }                                                                       \
-    if (in_x[4] < 0 || in_x[4] >= input_width) {                            \
-      vdup_b_x(INPUT_3_4, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_3_4, *(p_row + in_x[4]));                                    \
-    }                                                                       \
-  }
-
-#define H_PAD_OR_LOAD_ROW_4(p_input, input_width, input_offset, in_y, in_x) \
-  if (in_x[0] >= 0 && in_x[4] < input_width) {                              \
-    LOAD_ROW_4(p_input, input_width, in_y, in_x);                           \
-  } else {                                                                  \
-    const int8_t* p_row = p_input + (in_y[4] * input_width);                \
-    if (in_x[0] < 0 || in_x[0] >= input_width) {                            \
-      vdup_b_x(INPUT_4_0, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_4_0, *(p_row + in_x[0]));                                    \
-    }                                                                       \
-    if (in_x[1] < 0 || in_x[1] >= input_width) {                            \
-      vdup_b_x(INPUT_4_1, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_4_1, *(p_row + in_x[1]));                                    \
-    }                                                                       \
-    if (in_x[2] < 0 || in_x[2] >= input_width) {                            \
-      vdup_b_x(INPUT_4_2, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_4_2, *(p_row + in_x[2]));                                    \
-    }                                                                       \
-    if (in_x[3] < 0 || in_x[3] >= input_width) {                            \
-      vdup_b_x(INPUT_4_3, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_4_3, *(p_row + in_x[3]));                                    \
-    }                                                                       \
-    if (in_x[4] < 0 || in_x[4] >= input_width) {                            \
-      vdup_b_x(INPUT_4_4, -input_offset);                                         \
-    } else {                                                                \
-      vdup_b_x(INPUT_4_4, *(p_row + in_x[4]));                                    \
-    }                                                                       \
-  }
-
-#define _H_PAD_OR_LOAD_ROW(row, p_input, input_width, input_offset, in_y, \
-                           in_x)                                          \
-  H_PAD_OR_LOAD_ROW_##row(p_input, input_width, input_offset, in_y, in_x);
-
-#define _PAD_OR_LOAD_ROW(row, p_input, input_height, input_width, in_y, in_x,  \
-                         input_offset)                                         \
-  {                                                                            \
-    if (in_y[row] < 0 || in_y[row] >= input_height) {                          \
-      PAD_ROW_##row(input_offset);                                             \
-    } else {                                                                   \
-      _H_PAD_OR_LOAD_ROW(row, p_input, input_width, input_offset, in_y, in_x); \
-    }                                                                          \
-  }
-
-#define PAD_OR_LOAD_ROW_0(p_input, input_height, input_width, in_y, in_x, \
-                          input_offset)                                   \
-  _PAD_OR_LOAD_ROW(0, p_input, input_height, input_width, in_y, in_x,     \
-                   input_offset);
-#define PAD_OR_LOAD_ROW_1(p_input, input_height, input_width, in_y, in_x, \
-                          input_offset)                                   \
-  _PAD_OR_LOAD_ROW(1, p_input, input_height, input_width, in_y, in_x,     \
-                   input_offset);
-#define PAD_OR_LOAD_ROW_2(p_input, input_height, input_width, in_y, in_x, \
-                          input_offset)                                   \
-  _PAD_OR_LOAD_ROW(2, p_input, input_height, input_width, in_y, in_x,     \
-                   input_offset);
-#define PAD_OR_LOAD_ROW_3(p_input, input_height, input_width, in_y, in_x, \
-                          input_offset)                                   \
-  _PAD_OR_LOAD_ROW(3, p_input, input_height, input_width, in_y, in_x,     \
-                   input_offset);
-#define PAD_OR_LOAD_ROW_4(p_input, input_height, input_width, in_y, in_x, \
-                          input_offset)                                   \
-  _PAD_OR_LOAD_ROW(4, p_input, input_height, input_width, in_y, in_x,     \
-                   input_offset);
-
-#define COMPUTE(cmds, swizzled_bias_data) \
-  {                                       \
-    vld_w_x_m(v60, swizzled_bias_data);   \
-    adwinit_v(v60, v60);                  \
-    adwconv_vxv(v60, INPUT_0_0, cmds, FLT_0_0);      \
-    adwconv_vxv(v60, INPUT_0_1, cmds, FLT_0_1);      \
-    adwconv_vxv(v60, INPUT_0_2, cmds, FLT_0_2);      \
-    adwconv_vxv(v60, INPUT_0_3, cmds, FLT_0_3);      \
-    adwconv_vxv(v60, INPUT_0_4, cmds, FLT_0_4);     \
-    adwconv_vxv(v60, INPUT_3_0, cmds, FLT_3_0);     \
-    adwconv_vxv(v60, INPUT_3_3, cmds, FLT_3_3);     \
-    adwconv_vxv(v60, INPUT_3_4, cmds, FLT_HOLE);     \
-    vdwconv_vxv(v60, INPUT_4_2, cmds, FLT_4_2);     \
-  }
-
-#define OUTPUT(output_activation_min, output_activation_max, output_offset, \
-               local_output_data, n_channels)                               \
-  {                                                                         \
-    INT32_TO_INT8_OUTPUT_PIPELINE_INPLACE(                                  \
-        v60, v52, v56, output_activation_min, output_activation_max,        \
-        output_offset);                                                     \
-    vsraqs_b_vx(v60, v60, 0);                                               \
-    vst_b_l_xx(v60, local_output_data, n_channels);                         \
-  }
-
 // Estimated count of arithmetic ops: 58.297 M  ops, equivalently 29.148 M  MACs
 void ConvPerChannelD1OD24_5x5(
     const tflite::ConvParams& params, const int32_t* output_multiplier,
@@ -463,27 +178,33 @@
     int8_t* output_data) {
   // Get parameters.
   const int32_t input_offset = params.input_offset;  // r = s(q - Z)
-  const int stride_width = params.stride_width;
+  constexpr int stride_width = 2;
+  TFLITE_DCHECK_EQ(params.stride_width, stride_width);
   const int stride_height = params.stride_height;
-  const int dilation_width_factor = params.dilation_width_factor;
+  constexpr int dilation_width_factor = 1;
+  TFLITE_DCHECK_EQ(params.dilation_width_factor, dilation_width_factor);
   const int dilation_height_factor = params.dilation_height_factor;
   const int pad_width = params.padding_values.width;
   const int pad_height = params.padding_values.height;
   const int32_t output_offset = params.output_offset;
 
   // Set min and max value of the output.
-  const int32_t output_activation_min = params.quantized_activation_min;
-  const int32_t output_activation_max = params.quantized_activation_max;
+  constexpr int32_t output_activation_min = -128;
+  TFLITE_DCHECK_EQ(params.quantized_activation_min, output_activation_min);
+  constexpr int32_t output_activation_max = 127;
+  TFLITE_DCHECK_EQ(params.quantized_activation_max, output_activation_max);
 
   // Consistency check.
   TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
   TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
   TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
   TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
-  // const int batches = tflite::MatchingDim(input_shape, 0, output_shape, 0);
-  const int input_depth = input_shape.Dims(3);
-  const int output_depth =
-      tflite::MatchingDim(filter_shape, 0, output_shape, 3);
+  const int batches = tflite::MatchingDim(input_shape, 0, output_shape, 0);
+  constexpr int input_depth = 1;
+  TFLITE_DCHECK_EQ(input_shape.Dims(3), input_depth);
+  constexpr const int output_depth = 24;
+  TFLITE_DCHECK_EQ(tflite::MatchingDim(filter_shape, 0, output_shape, 3),
+                   output_depth);
   if (bias_data) {
     TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
   }
@@ -491,472 +212,1242 @@
   // Check dimensions of the tensors.
   const int input_height = input_shape.Dims(1);
   const int input_width = input_shape.Dims(2);
-  const int filter_height = filter_shape.Dims(1);
-  const int filter_width = filter_shape.Dims(2);
-  const int filter_input_depth = filter_shape.Dims(3);
-  const int groups = input_depth / filter_input_depth;
-  TFLITE_DCHECK_NE(groups, 0);
-  TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0);
-  const int filters_per_group = output_depth / groups;
-  TFLITE_DCHECK_NE(filters_per_group, 0);
+  constexpr int filter_height = 5;
+  TFLITE_DCHECK_EQ(filter_shape.Dims(1), filter_height);
+  constexpr int filter_width = 5;
+  TFLITE_DCHECK_EQ(filter_shape.Dims(2), filter_width);
+  // Input depth is 1 so filter input depth must be 1.
+  TFLITE_DCHECK_EQ(filter_shape.Dims(3), 1);
   const int output_height = output_shape.Dims(1);
   const int output_width = output_shape.Dims(2);
 
-  // Scratch pads to juggle data
-  const size_t swizzled_filter_data_size = 24 * filter_height * filter_width;
-  std::unique_ptr<int8_t> swizzled_filter_data(reinterpret_cast<int8_t*>(
-      ::aligned_alloc(32, swizzled_filter_data_size)));
-  int32_t swizzled_bias_data[32];
-  int32_t swizzled_output_multiplier[32];
-  int32_t swizzled_output_shift[32];
-  // Transpose filter for easy loading
-  for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
-    for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
-      for (int i = 0; i < 24; i++) {
-        int filter_location =
-            (filter_y * filter_width * 24) + (filter_x * 24) + i;
-        swizzled_filter_data.get()[filter_location] =
-            filter_data[tflite::Offset(filter_shape, i, filter_y, filter_x, 0)];
-      }
-    }
-  }
+  constexpr int patches_per_iteration = 8;
+  constexpr int load_width =
+      1 + (patches_per_iteration /*=8*/ - 1) * stride_width /*=2*/ +
+      (filter_width /*=5*/ - 1) * dilation_width_factor /*=1*/;
 
-  const int8_t* p_flt_0 = swizzled_filter_data.get() + (0 * filter_width * 24);
-  const int8_t* p_flt_1 = swizzled_filter_data.get() + (1 * filter_width * 24);
-  const int8_t* p_flt_2 = swizzled_filter_data.get() + (2 * filter_width * 24);
-  const int8_t* p_flt_3 = swizzled_filter_data.get() + (3 * filter_width * 24);
-  const int8_t* p_flt_4 = swizzled_filter_data.get() + (4 * filter_width * 24);
-  vld_b_lp_xx(FLT_0_0, p_flt_0, 24);
-  vld_b_lp_xx(FLT_0_1, p_flt_0, 24);
-  vld_b_lp_xx(FLT_0_2, p_flt_0, 24);
-  vld_b_lp_xx(FLT_0_3, p_flt_0, 24);
-  vld_b_lp_xx(FLT_0_4, p_flt_0, 24);
+  // This optimized path requires all output pixels must read at least 1
+  // input pixel.
+  TFLITE_DCHECK_LT(pad_height, 5);
+  TFLITE_DCHECK_LT(pad_width, 5);
+  TFLITE_DCHECK_LT((output_height - 1) * stride_height - pad_height,
+                   input_height);
+  TFLITE_DCHECK_LT((output_width - 1) * stride_width /*=2*/ - pad_width,
+                   input_width);
+  // This optimized path is complex enough and we don't wish to handle a load
+  // with padding on both sides.
+  TFLITE_DCHECK_GE(input_width * input_depth /*=1*/, load_width);
 
-  vld_b_lp_xx(FLT_1_0, p_flt_1, 24);
-  vld_b_lp_xx(FLT_1_1, p_flt_1, 24);
-  vld_b_lp_xx(FLT_1_2, p_flt_1, 24);
-  vld_b_lp_xx(FLT_1_3, p_flt_1, 24);
-  vld_b_lp_xx(FLT_1_4, p_flt_1, 24);
+  // Hopefully this lambda helps the compiler get as much done statically
+  // as possible.
+  auto make_aconv_cmd = [](int in_off) {
+    union {
+      vconv_u8_t cmd;
+      uint32_t raw;
+    } cmd;
+    vconv_u8_t conv_cmd = {
+        .mode = 0,
+        .start = 0,
+        .stop = 6,  // We're doing [8,25]x[25,8] which is 7 ticks.
+        .sbias1 = in_off,
+        .sdata1 = true,
+        .sbias2 = 0,
+        .sdata2 = true,
+    };
+    cmd.cmd = conv_cmd;
+    return cmd.raw;
+  };
+  auto batch_start_offset = [](int b, int height, int width, int depth) {
+    return b * height * width * depth;
+  };
+  auto row_start_offset = [](int y, int width, int depth) {
+    return y * width * depth;
+  };
+  const uint32_t aconv_cmd = make_aconv_cmd(input_offset);
 
-  vld_b_lp_xx(FLT_2_0, p_flt_2, 24);
-  vld_b_lp_xx(FLT_2_1, p_flt_2, 24);
-  vld_b_lp_xx(FLT_2_2, p_flt_2, 24);
-  vld_b_lp_xx(FLT_2_3, p_flt_2, 24);
-  vld_b_lp_xx(FLT_2_4, p_flt_2, 24);
-
-  vld_b_lp_xx(FLT_3_0, p_flt_3, 24);
-  vld_b_lp_xx(FLT_3_1, p_flt_3, 24);
-  vld_b_lp_xx(FLT_3_2, p_flt_3, 24);
-  vld_b_lp_xx(FLT_3_3, p_flt_3, 24);
-  vld_b_lp_xx(FLT_3_4, p_flt_3, 24);
-
-  vdup_b_x(FLT_HOLE, 0);
-  vld_b_lp_xx(FLT_4_0, p_flt_4, 24);
-  vld_b_lp_xx(FLT_4_1, p_flt_4, 24);
-  vld_b_lp_xx(FLT_4_2, p_flt_4, 24);
-  vld_b_lp_xx(FLT_4_3, p_flt_4, 24);
-  vld_b_lp_xx(FLT_4_4, p_flt_4, 24);
-
-  union {
-    vdwconv_u8_t dwconv;
-    uint32_t raw;
-  } cmds;
-  cmds.raw = 0;
-  cmds.dwconv.sdata1 = true;
-  cmds.dwconv.sbias1 = input_offset;
-  cmds.dwconv.sdata2 = true;
-  cmds.dwconv.sbias2 = 0;
-  cmds.dwconv.mode = 0;
-  cmds.dwconv.sparsity = 0;
-  cmds.dwconv.regbase = 0;
-  int out_channel = 0;
-  int n_channels = 24;
-
-  memset(swizzled_bias_data, 0, 32 * sizeof(uint32_t));
-  JumptableSwizzle(bias_data + out_channel, swizzled_bias_data, n_channels);
-  memset(swizzled_output_multiplier, 0, 32 * sizeof(uint32_t));
-  JumptableSwizzle(output_multiplier + out_channel, swizzled_output_multiplier,
-                   n_channels);
-  JumptableSwizzle(output_shift + out_channel, swizzled_output_shift,
-                   n_channels);
-  vld_w_x_m(v52, swizzled_output_multiplier);
-  vld_w_x_m(v56, swizzled_output_shift);
-  vrsub_w_vx_m(v56, v56, 0);
-
-  int8_t* local_output_data = output_data + out_channel;
-  int in_y[5];
-  int in_x[7];
   int out_y = 0;
-  const int8_t* p_input = input_data;
-  // Handle top row padding
-  for (; out_y < pad_height; ++out_y) {
-    int out_x = 0;
-    const int in_y_origin = (out_y * stride_height) - pad_height;
-    CALCULATE_IN_Y(in_y_origin);
-    // Left padding required
-    for (; out_x < pad_width; ++out_x) {
-      const int in_x_origin = (out_x * stride_width) - pad_width;
-      CALCULATE_IN_X(in_x_origin);
-      PAD_OR_LOAD_ROW_0(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_1(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_2(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_3(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_4(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      COMPUTE(cmds, swizzled_bias_data);
-      OUTPUT(output_activation_min, output_activation_max, output_offset,
-             local_output_data, n_channels);
-      local_output_data += output_depth;
+  int in_y_min = -pad_height;
+  int in_y_max = in_y_min + (filter_height - 1) * dilation_height_factor;
+
+  // Reg Map:
+  // v0-v7  : input patches[0..7]
+  // v8-v15 : weight for OD[0..7]
+  // v16-v21: bias for OD[0..23], each reg is a int32x4 stored twice
+  // v22-v23: temp
+  // v24-v31: weight for OD[8..15]
+  // v32-v34: temp
+  // v35-v39: unused
+  // v40-v47: weight for OD[16..23]
+  // v48-v55: temp
+  // v56-v63: unused
+
+  // Temp reg usage:
+  // - input loading
+  //   - v22-v23: additional loading buffer.
+  //   - v32-v34: additional loading buffer.
+  //   - v48-v52: input for use in next step.
+  //   - v53: scratch pad.
+  // - input handling
+  //   - v22-v23: scratch pad.
+  //   - v32-v33: scratch pad.
+  //   - v48-v52: ready-to-use input, with horizontal padding applied where
+  //     needed.
+  // - aconv
+  //   - v48-v55: prepare and set acc.
+  // - output
+  //   - v22-v23: scratch pad.
+  //   - v32-v33: scratch pad.
+  //   - v48-v55: get acc and run postproc in place.
+
+  if (bias_data) {
+    int32_t od_param_buffer[48] __attribute__((aligned(32)));
+    // Preloads bias.
+    for (int i = 0; i < 6; i++) {
+      od_param_buffer[(8 * i) + 0] = bias_data[(4 * i) + 0];
+      od_param_buffer[(8 * i) + 1] = bias_data[(4 * i) + 2];
+      od_param_buffer[(8 * i) + 2] = bias_data[(4 * i) + 1];
+      od_param_buffer[(8 * i) + 3] = bias_data[(4 * i) + 3];
+      od_param_buffer[(8 * i) + 4] = bias_data[(4 * i) + 0];
+      od_param_buffer[(8 * i) + 5] = bias_data[(4 * i) + 2];
+      od_param_buffer[(8 * i) + 6] = bias_data[(4 * i) + 1];
+      od_param_buffer[(8 * i) + 7] = bias_data[(4 * i) + 3];
     }
-    // No side padding
-    for (; out_x < (output_width - pad_width); ++out_x) {
-      const int in_x_origin = (out_x * stride_width) - pad_width;
-      CALCULATE_IN_X(in_x_origin);
-      PAD_OR_LOAD_ROW_0(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_1(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_2(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_3(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_4(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      COMPUTE(cmds, swizzled_bias_data);
-      OUTPUT(output_activation_min, output_activation_max, output_offset,
-             local_output_data, n_channels);
-      local_output_data += output_depth;
-    }
-    // Right padding required
-    for (; out_x < output_width; ++out_x) {
-      const int in_x_origin = (out_x * stride_width) - pad_width;
-      CALCULATE_IN_X(in_x_origin);
-      PAD_OR_LOAD_ROW_0(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_1(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_2(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_3(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_4(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      COMPUTE(cmds, swizzled_bias_data);
-      OUTPUT(output_activation_min, output_activation_max, output_offset,
-             local_output_data, n_channels);
-      local_output_data += output_depth;
-    }
+    vld_b_x_m(v16, &od_param_buffer[0]);
+    vld_b_x(v20, &od_param_buffer[32]);
+    vld_b_x(v21, &od_param_buffer[40]);
+  } else {
+    vdup_b_x_m(v16, 0);
+    vdup_b_x(v20, 0);
+    vdup_b_x(v21, 0);
   }
 
-  // No height padding
-  for (; out_y < (output_height - pad_height); ++out_y) {
-    const int in_y_origin = (out_y * stride_height) - pad_height;
-    CALCULATE_IN_Y(in_y_origin);
-    // Left padding
-    int out_x = 0;
-    for (; out_x < pad_width; ++out_x) {
-      const int in_x_origin = (out_x * stride_width) - pad_width;
-      CALCULATE_IN_X(in_x_origin);
-      PAD_OR_LOAD_ROW_0(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_1(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_2(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_3(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_4(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      COMPUTE(cmds, swizzled_bias_data);
-      OUTPUT(output_activation_min, output_activation_max, output_offset,
-             local_output_data, n_channels);
-      local_output_data += output_depth;
-    }
-    for (; out_x + 2 <= (output_width - pad_width); out_x += 2) {
-      const int in_x_origin = (out_x * stride_width) - pad_width;
-
-      #pragma GCC unroll 7
-      for (int i = 0; i < 7; ++i) {
-        in_x[i] = in_x_origin + (dilation_width_factor * i);
+  {
+    // Prepares weight data for preloading. This arrangement is irregular.
+    // Logically, this means the following representation for each OD:
+    // A0 B0 C0 D0 A1 B1 C1 D1 ... A4 B4 C4 D4 E0 E1 E2 E3 E4 0 (x7)
+    // Every 8 OD must then be zipped together into 8 regs (A0*8 B0*8 ......)
+    int8_t filter_regs[output_depth /*=24*/ * 32] __attribute__((aligned(32)));
+    ::memset(filter_regs, 0, output_depth /*=24*/ * 32);
+    for (int ch = 0; ch < output_depth /*=24*/; ++ch) {
+      const int regbank = ch / 8;
+      const int ch_tail = ch & 0x7;
+      for (int y = 0; y < 4; ++y) {  // Row 4 will be handled separately.
+        for (int x = 0; x < 5; ++x) {
+          filter_regs[regbank * 256 + x * 32 + ch_tail * 4 + y] =
+              filter_data[tflite::Offset(filter_shape, ch, y, x, 0)];
+        }
       }
-      const int8_t* p_rows[5];
-      #pragma GCC unroll 5
-      for (int i = 0; i < 5; ++i) {
-        p_rows[i] = p_input + (in_y[i] * input_width);
+      // Reg 5 in each bank is 8ch x E0-E3.
+      for (int x = 0; x < 4; ++x) {
+        filter_regs[regbank * 256 + 5 * 32 + ch_tail * 4 + x] =
+            filter_data[tflite::Offset(filter_shape, ch, 4, x, 0)];
       }
-
-      vdup_b_x(INPUT_0_0, *(p_rows[0] + in_x[0]));
-      vdup_b_x(INPUT_0_1, *(p_rows[0] + in_x[1]));
-      vdup_b_x(INPUT_0_2, *(p_rows[0] + in_x[2]));
-      vdup_b_x(INPUT_0_3, *(p_rows[0] + in_x[3]));
-      vdup_b_x(INPUT_0_4, *(p_rows[0] + in_x[4]));
-
-      vdup_b_x(INPUT_1_0, *(p_rows[1] + in_x[0]));
-      vdup_b_x(INPUT_1_1, *(p_rows[1] + in_x[1]));
-      vdup_b_x(INPUT_1_2, *(p_rows[1] + in_x[2]));
-      vdup_b_x(INPUT_1_3, *(p_rows[1] + in_x[3]));
-      vdup_b_x(INPUT_1_4, *(p_rows[1] + in_x[4]));
-
-      vdup_b_x(INPUT_2_0, *(p_rows[2] + in_x[0]));
-      vdup_b_x(INPUT_2_1, *(p_rows[2] + in_x[1]));
-      vdup_b_x(INPUT_2_2, *(p_rows[2] + in_x[2]));
-      vdup_b_x(INPUT_2_3, *(p_rows[2] + in_x[3]));
-      vdup_b_x(INPUT_2_4, *(p_rows[2] + in_x[4]));
-
-      vdup_b_x(INPUT_3_0, *(p_rows[3] + in_x[0]));
-      vdup_b_x(INPUT_3_1, *(p_rows[3] + in_x[1]));
-      vdup_b_x(INPUT_3_2, *(p_rows[3] + in_x[2]));
-      vdup_b_x(INPUT_3_3, *(p_rows[3] + in_x[3]));
-      vdup_b_x(INPUT_3_4, *(p_rows[3] + in_x[4]));
-
-      vdup_b_x(INPUT_4_0, *(p_rows[4] + in_x[0]));
-      vdup_b_x(INPUT_4_1, *(p_rows[4] + in_x[1]));
-      vdup_b_x(INPUT_4_2, *(p_rows[4] + in_x[2]));
-      vdup_b_x(INPUT_4_3, *(p_rows[4] + in_x[3]));
-      vdup_b_x(INPUT_4_4, *(p_rows[4] + in_x[4]));
-
-      vld_w_x_m(v60, swizzled_bias_data);
-      adwinit_v(v60, v60);
-      adwconv_vxv(v60, INPUT_0_0, cmds, FLT_0_0);
-      adwconv_vxv(v60, INPUT_0_1, cmds, FLT_0_1);
-      adwconv_vxv(v60, INPUT_0_2, cmds, FLT_0_2);
-      adwconv_vxv(v60, INPUT_0_3, cmds, FLT_0_3);
-      adwconv_vxv(v60, INPUT_0_4, cmds, FLT_0_4);
-      adwconv_vxv(v60, INPUT_3_0, cmds, FLT_3_0);
-      adwconv_vxv(v60, INPUT_3_3, cmds, FLT_3_3);
-      adwconv_vxv(v60, INPUT_3_4, cmds, FLT_HOLE);
-      vdwconv_vxv(v60, INPUT_4_2, cmds, FLT_4_2);
-      vmv_v(INPUT_0_0, v60);
-      vmv_v(INPUT_1_0, v61);
-      vmv_v(INPUT_2_0, v62);
-      vmv_v(INPUT_0_1, v63);
-
-      vdup_b_x(INPUT_3_0, *(p_rows[3] + in_x[5]));
-      vdup_b_x(INPUT_3_1, *(p_rows[3] + in_x[6]));
-
-      vmv_v(INPUT_4_0, INPUT_4_2);
-      vmv_v(INPUT_4_1, INPUT_4_3);
-      vmv_v(INPUT_4_2, INPUT_4_4);
-      vdup_b_x(INPUT_4_3, *(p_rows[4] + in_x[5]));
-      vdup_b_x(INPUT_4_4, *(p_rows[4] + in_x[6]));
-
-      vld_w_x_m(v60, swizzled_bias_data);
-      adwinit_v(v60, v60);
-      adwconv_vxv(v60, INPUT_0_2, cmds, FLT_0_0);
-      adwconv_vxv(v60, INPUT_0_3, cmds, FLT_0_1);
-
-      vmv_v(INPUT_0_2, INPUT_0_0);
-      vmv_v(INPUT_1_2, INPUT_1_0);
-      vmv_v(INPUT_2_2, INPUT_2_0);
-      vmv_v(INPUT_0_3, INPUT_0_1);
-
-      vdup_b_x(INPUT_0_0, *(p_rows[0] + in_x[5]));
-      vdup_b_x(INPUT_0_1, *(p_rows[0] + in_x[6]));
-      vdup_b_x(INPUT_1_0, *(p_rows[1] + in_x[5]));
-      vdup_b_x(INPUT_1_1, *(p_rows[1] + in_x[6]));
-      vdup_b_x(INPUT_2_0, *(p_rows[2] + in_x[5]));
-      vdup_b_x(INPUT_2_1, *(p_rows[2] + in_x[6]));
-
-      adwconv_vxv(v60, INPUT_0_4, cmds, FLT_0_2);
-      adwconv_vxv(v60, INPUT_0_0, cmds, FLT_0_3);
-      adwconv_vxv(v60, INPUT_0_1, cmds, FLT_0_4);
-      adwconv_vxv(v60, INPUT_3_2, cmds, FLT_3_0);
-      adwconv_vxv(v60, INPUT_3_0, cmds, FLT_3_3);
-      adwconv_vxv(v60, INPUT_3_4, cmds, FLT_HOLE);
-      vdwconv_vxv(v60, INPUT_4_2, cmds, FLT_4_2);
-      INT32_TO_INT8_OUTPUT_PIPELINE_INPLACE2(
-        v60, INPUT_0_2, v52, v56, output_activation_min,
-        output_activation_max, output_offset
-      );
-      vsraqs_b_vx(INPUT_0_2, INPUT_0_2, 0);
-      vst_b_l_xx(INPUT_0_2, local_output_data, n_channels);
-      local_output_data += output_depth;
-      vsraqs_b_vx(v60, v60, 0);
-      vst_b_l_xx(v60, local_output_data, n_channels);
-      local_output_data += output_depth;
+      // Reg 6 in each bank is 8ch x E4.
+      filter_regs[regbank * 256 + 6 * 32 + ch_tail * 4] =
+          filter_data[tflite::Offset(filter_shape, ch, 4, 4, 0)];
+      // Reg 7 is unused and SBZ.
     }
-    for (; out_x < (output_width - pad_width); ++out_x) {
-      const int in_x_origin = (out_x * stride_width) - pad_width;
-
-      CALCULATE_IN_X(in_x_origin);
-      LOAD_ROW_0(p_input, input_width, in_y, in_x);
-      LOAD_ROW_1(p_input, input_width, in_y, in_x);
-      LOAD_ROW_2(p_input, input_width, in_y, in_x);
-      LOAD_ROW_3(p_input, input_width, in_y, in_x);
-      LOAD_ROW_4(p_input, input_width, in_y, in_x);
-      COMPUTE(cmds, swizzled_bias_data);
-      OUTPUT(output_activation_min, output_activation_max, output_offset,
-             local_output_data, n_channels);
-      local_output_data += output_depth;
-    }
-    // Right padding
-    for (; out_x < output_width; ++out_x) {
-      const int in_x_origin = (out_x * stride_width) - pad_width;
-      CALCULATE_IN_X(in_x_origin);
-      PAD_OR_LOAD_ROW_0(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_1(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_2(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_3(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_4(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      COMPUTE(cmds, swizzled_bias_data);
-      OUTPUT(output_activation_min, output_activation_max, output_offset,
-             local_output_data, n_channels);
-      local_output_data += output_depth;
-    }
+    vld_b_x_m(v8, &filter_regs[0 * 32]);
+    vld_b_x_m(v12, &filter_regs[4 * 32]);
+    vld_b_x_m(v24, &filter_regs[8 * 32]);
+    vld_b_x_m(v28, &filter_regs[12 * 32]);
+    vld_b_x_m(v40, &filter_regs[16 * 32]);
+    vld_b_x_m(v44, &filter_regs[20 * 32]);
   }
 
-  // Handle bottom row padding
-  for (; out_y < output_height; ++out_y) {
-    const int in_y_origin = (out_y * stride_height) - pad_height;
-    CALCULATE_IN_Y(in_y_origin);
-    int out_x = 0;
-    for (; out_x < pad_width; ++out_x) {
-      const int in_x_origin = (out_x * stride_width) - pad_width;
-      CALCULATE_IN_X(in_x_origin);
-      PAD_OR_LOAD_ROW_0(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_1(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_2(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_3(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_4(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      COMPUTE(cmds, swizzled_bias_data);
-      OUTPUT(output_activation_min, output_activation_max, output_offset,
-             local_output_data, n_channels);
-      local_output_data += output_depth;
+  for (int batch = 0; batch < batches; ++batch) {
+    const int8_t* const p_input = &input_data[batch_start_offset(
+        batch, input_height, input_width, input_depth /*=1*/)];
+    int8_t* const p_output = &output_data[batch_start_offset(
+        batch, output_height, output_width, output_depth /*=24*/)];
+
+    // Top loop.
+    for (; in_y_min < 0;
+         ++out_y, in_y_min += stride_height, in_y_max += stride_height) {
+      int out_x_min = 0;
+      int out_x_max = patches_per_iteration /*=8*/ - 1;
+      int in_x_min = -pad_width;
+      int in_x_max = in_x_min + load_width - 1;
+      // Top padding is active so we're not going to read the first row.
+      const int8_t* in_ptr_row1 =
+          &p_input[row_start_offset(in_y_min + 1 * dilation_height_factor,
+                                    input_width /*=5*/, input_depth /*=1*/)];
+      const int8_t* in_ptr_row2 =
+          &p_input[row_start_offset(in_y_min + 2 * dilation_height_factor,
+                                    input_width /*=5*/, input_depth /*=1*/)];
+      const int8_t* in_ptr_row3 =
+          &p_input[row_start_offset(in_y_min + 3 * dilation_height_factor,
+                                    input_width /*=5*/, input_depth /*=1*/)];
+      const int8_t* in_ptr_row4 = &p_input[row_start_offset(
+          in_y_max, input_width /*=5*/, input_depth /*=1*/)];
+      int8_t* out_ptr_col0 = &p_output[row_start_offset(out_y, output_width,
+                                                        output_depth /*=24*/)];
+      int8_t* out_ptr_col4 = out_ptr_col0 + 4 * output_depth /*=24*/;
+
+      // Top left corner.
+      // This could only happen [0:1] times.
+      if (pad_width > 0) {
+        // Same as in_x_max + 1.
+        const int true_load_width = load_width - pad_width;
+
+        // Loads all needed rows.
+        switch (-in_y_min) {
+          case 1:
+            vld_b_l_xx(v23, in_ptr_row1, true_load_width);
+            [[fallthrough]];
+          case 2:
+            vld_b_l_xx(v32, in_ptr_row2, true_load_width);
+            [[fallthrough]];
+          case 3:
+            vld_b_l_xx(v33, in_ptr_row3, true_load_width);
+            [[fallthrough]];
+          case 4:
+            vld_b_l_xx(v34, in_ptr_row4, true_load_width);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+        // Fills padding rows after loading, to improve parallelism.
+        // An extra padding-only row to use in horizontal padding.
+        vdup_b_x(v53, -input_offset);
+        switch (in_y_min) {
+          case -4:
+            vmv_v(v33, v53);
+            [[fallthrough]];
+          case -3:
+            vmv_v(v32, v53);
+            [[fallthrough]];
+          case -2:
+            vmv_v(v23, v53);
+            [[fallthrough]];
+          case -1:
+            // The first row is vertical padding, no need to pad horizontally.
+            vmv_v(v48, v53);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+
+        // Applies left padding as needed.
+        // Can't pass pad_width because the vslide* wants imm.
+        // Can't use vx encoding because scalar is only accepted on the RHS.
+        // v53 is -input_offset broadcasted to all lanes.
+        switch (pad_width) {
+          case 1:
+            vslidep_b_1_vv(v49, v53, v23);
+            vslidep_b_1_vv(v50, v53, v32);
+            vslidep_b_1_vv(v51, v53, v33);
+            vslidep_b_1_vv(v52, v53, v34);
+            break;
+          case 2:
+            vslidep_b_2_vv(v49, v53, v23);
+            vslidep_b_2_vv(v50, v53, v32);
+            vslidep_b_2_vv(v51, v53, v33);
+            vslidep_b_2_vv(v52, v53, v34);
+            break;
+          case 3:
+            vslidep_b_3_vv(v49, v53, v23);
+            vslidep_b_3_vv(v50, v53, v32);
+            vslidep_b_3_vv(v51, v53, v33);
+            vslidep_b_3_vv(v52, v53, v34);
+            break;
+          case 4:
+            vslidep_b_4_vv(v49, v53, v23);
+            vslidep_b_4_vv(v50, v53, v32);
+            vslidep_b_4_vv(v51, v53, v33);
+            vslidep_b_4_vv(v52, v53, v34);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+
+        // Rearranges input data into place.
+        ConvPerChannelD1OD24_5x5_inputshuffle();
+
+        // Computes OD[0..7]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v16, v17);  // Also writes v49.
+        vmvp_vv(v50, v16, v17);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v8);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(output_multiplier, output_shift,
+                                          output_offset, output_activation_min,
+                                          output_activation_max, out_ptr_col0,
+                                          out_ptr_col4);
+
+        // Computes OD[8..15]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v18, v19);  // Also writes v49.
+        vmvp_vv(v50, v18, v19);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v24);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 8, output_shift + 8, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 8,
+            out_ptr_col4 + 8);
+
+        // Computes OD[16..23]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v20, v21);  // Also writes v49.
+        vmvp_vv(v50, v20, v21);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v40);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 16, output_shift + 16, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 16,
+            out_ptr_col4 + 16);
+
+        // Proceed to next block.
+        out_x_min += patches_per_iteration /*=8*/;
+        out_x_max += patches_per_iteration /*=8*/;
+        in_x_min += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_x_max += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_ptr_row1 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        in_ptr_row2 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        in_ptr_row3 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        in_ptr_row4 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        out_ptr_col0 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+        out_ptr_col4 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+      }
+
+      // Top edge.
+      while (out_x_max < output_width && in_x_max < input_width) {
+        // Loads all rows that we're not padding.
+        switch (-in_y_min) {
+          case 1:
+            vld_b_l_xx(v49, in_ptr_row1, load_width);
+            [[fallthrough]];
+          case 2:
+            vld_b_l_xx(v50, in_ptr_row2, load_width);
+            [[fallthrough]];
+          case 3:
+            vld_b_l_xx(v51, in_ptr_row3, load_width);
+            [[fallthrough]];
+          case 4:
+            vld_b_l_xx(v52, in_ptr_row4, load_width);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+        // Fills padding rows.
+        switch (in_y_min) {
+          case -4:
+            vdup_b_x(v51, -input_offset);
+            [[fallthrough]];
+          case -3:
+            vdup_b_x(v50, -input_offset);
+            [[fallthrough]];
+          case -2:
+            vdup_b_x(v49, -input_offset);
+            [[fallthrough]];
+          case -1:
+            vdup_b_x(v48, -input_offset);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+
+        // Rearranges input data into place.
+        ConvPerChannelD1OD24_5x5_inputshuffle();
+
+        // Computes OD[0..7]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v16, v17);  // Also writes v49.
+        vmvp_vv(v50, v16, v17);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v8);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(output_multiplier, output_shift,
+                                          output_offset, output_activation_min,
+                                          output_activation_max, out_ptr_col0,
+                                          out_ptr_col4);
+
+        // Computes OD[8..15]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v18, v19);  // Also writes v49.
+        vmvp_vv(v50, v18, v19);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v24);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 8, output_shift + 8, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 8,
+            out_ptr_col4 + 8);
+
+        // Computes OD[16..23]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v20, v21);  // Also writes v49.
+        vmvp_vv(v50, v20, v21);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v40);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 16, output_shift + 16, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 16,
+            out_ptr_col4 + 16);
+
+        // Proceed to next block.
+        out_x_min += patches_per_iteration /*=8*/;
+        out_x_max += patches_per_iteration /*=8*/;
+        in_x_min += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_x_max += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_ptr_row1 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row2 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row3 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row4 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        out_ptr_col0 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+        out_ptr_col4 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+      }
+
+      // Top right corner.
+      while (out_x_min < output_width) {
+        const int true_patches =
+            std::min(output_width - out_x_min, patches_per_iteration);
+        const int true_load_width = std::max(input_width - in_x_min, 0);
+
+        // Prepare the selector vector for right padding.
+        {
+          int8_t selector[32];
+          memset(selector, 1, true_load_width);
+          memset(selector + true_load_width, 0, 32 - true_load_width);
+          vld_b_x(v53, selector);
+        }
+        // Loads all needed rows and applies right padding.
+        switch (-in_y_min) {
+          case 1:
+            vld_b_l_xx(v49, in_ptr_row1, true_load_width);
+            vsel_b_vx(v49, v53, -input_offset);
+            [[fallthrough]];
+          case 2:
+            vld_b_l_xx(v50, in_ptr_row2, true_load_width);
+            vsel_b_vx(v50, v53, -input_offset);
+            [[fallthrough]];
+          case 3:
+            vld_b_l_xx(v51, in_ptr_row3, true_load_width);
+            vsel_b_vx(v51, v53, -input_offset);
+            [[fallthrough]];
+          case 4:
+            vld_b_l_xx(v52, in_ptr_row4, true_load_width);
+            vsel_b_vx(v52, v53, -input_offset);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+        // Fills padding rows.
+        switch (in_y_min) {
+          case -4:
+            vdup_b_x(v51, -input_offset);
+            [[fallthrough]];
+          case -3:
+            vdup_b_x(v50, -input_offset);
+            [[fallthrough]];
+          case -2:
+            vdup_b_x(v49, -input_offset);
+            [[fallthrough]];
+          case -1:
+            vdup_b_x(v48, -input_offset);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+
+        // Rearranges input data into place.
+        ConvPerChannelD1OD24_5x5_inputshuffle();
+
+        // We added enough padding to complete a full block, but VSTQ
+        // cannot have a write-limiter attached. To workaround this we
+        // store to a large-enough buffer and then copy as needed.
+        int8_t vstq_buffer[patches_per_iteration /*=8*/ * output_depth /*=24*/]
+            __attribute__((aligned(32)));
+        int8_t* temp_out_ptr_col0 = &vstq_buffer[0];
+        int8_t* temp_out_ptr_col4 = &vstq_buffer[4 * output_depth /*=24*/];
+
+        // Computes OD[0..7]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v16, v17);  // Also writes v49.
+        vmvp_vv(v50, v16, v17);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v8);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(output_multiplier, output_shift,
+                                          output_offset, output_activation_min,
+                                          output_activation_max,
+                                          temp_out_ptr_col0, temp_out_ptr_col4);
+
+        // Computes OD[8..15]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v18, v19);  // Also writes v49.
+        vmvp_vv(v50, v18, v19);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v24);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 8, output_shift + 8, output_offset,
+            output_activation_min, output_activation_max, temp_out_ptr_col0 + 8,
+            temp_out_ptr_col4 + 8);
+
+        // Computes OD[16..23]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v20, v21);  // Also writes v49.
+        vmvp_vv(v50, v20, v21);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v40);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 16, output_shift + 16, output_offset,
+            output_activation_min, output_activation_max,
+            temp_out_ptr_col0 + 16, temp_out_ptr_col4 + 16);
+
+        // Copies useful results back.
+        ::memcpy(out_ptr_col0, temp_out_ptr_col0,
+                 true_patches * output_depth /*=24*/);
+
+        // Proceed to next block.
+        // It is easier to use patches_per_iteration instead of true_patches
+        // here because the former one is constexpr. These two values will
+        // differ iff we've just finished the last block on the row.
+        out_x_min += patches_per_iteration /*=8*/;
+        out_x_max += patches_per_iteration /*=8*/;
+        in_x_min += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_x_max += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_ptr_row1 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row2 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row3 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row4 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        out_ptr_col0 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+        out_ptr_col4 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+      }
     }
-    for (; out_x < (output_width - pad_width); ++out_x) {
-      const int in_x_origin = (out_x * stride_width) - pad_width;
-      CALCULATE_IN_X(in_x_origin);
-      PAD_OR_LOAD_ROW_0(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_1(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_2(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_3(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_4(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      COMPUTE(cmds, swizzled_bias_data);
-      OUTPUT(output_activation_min, output_activation_max, output_offset,
-             local_output_data, n_channels);
-      local_output_data += output_depth;
+
+    // Main loop (no vertical padding).
+    for (; out_y < output_height && in_y_max < input_height;
+         ++out_y, in_y_min += stride_height, in_y_max += stride_height) {
+      int out_x_min = 0;
+      int out_x_max = patches_per_iteration /*=8*/ - 1;
+      int in_x_min = -pad_width;
+      int in_x_max = in_x_min + load_width - 1;
+      const int8_t* in_ptr_row0 = &p_input[row_start_offset(
+          in_y_min, input_width /*=5*/, input_depth /*=1*/)];
+      const int8_t* in_ptr_row1 =
+          &p_input[row_start_offset(in_y_min + 1 * dilation_height_factor,
+                                    input_width /*=5*/, input_depth /*=1*/)];
+      const int8_t* in_ptr_row2 =
+          &p_input[row_start_offset(in_y_min + 2 * dilation_height_factor,
+                                    input_width /*=5*/, input_depth /*=1*/)];
+      const int8_t* in_ptr_row3 =
+          &p_input[row_start_offset(in_y_min + 3 * dilation_height_factor,
+                                    input_width /*=5*/, input_depth /*=1*/)];
+      const int8_t* in_ptr_row4 = &p_input[row_start_offset(
+          in_y_max, input_width /*=5*/, input_depth /*=1*/)];
+      int8_t* out_ptr_col0 = &p_output[row_start_offset(out_y, output_width,
+                                                        output_depth /*=24*/)];
+      int8_t* out_ptr_col4 = out_ptr_col0 + 4 * output_depth /*=24*/;
+
+      // Left edge.
+      // This could only happen [0:1] times.
+      if (pad_width > 0) {
+        // Same as in_x_max + 1.
+        const int true_load_width = load_width - pad_width;
+
+        // Loads all needed rows.
+        vld_b_l_xx(v22, in_ptr_row0, true_load_width);
+        vld_b_l_xx(v23, in_ptr_row1, true_load_width);
+        vld_b_l_xx(v32, in_ptr_row2, true_load_width);
+        vld_b_l_xx(v33, in_ptr_row3, true_load_width);
+        vld_b_l_xx(v34, in_ptr_row4, true_load_width);
+
+        // Applies left padding as needed.
+        vdup_b_x(v53, -input_offset);
+        // Can't pass pad_width because the vslide* wants imm.
+        // Can't use vx encoding because scalar has to be on the RHS.
+        // v48 (padded input row 0) is -input_offset broadcasted.
+        switch (pad_width) {
+          case 1:
+            vslidep_b_1_vv(v48, v53, v22);
+            vslidep_b_1_vv(v49, v53, v23);
+            vslidep_b_1_vv(v50, v53, v32);
+            vslidep_b_1_vv(v51, v53, v33);
+            vslidep_b_1_vv(v52, v53, v34);
+            break;
+          case 2:
+            vslidep_b_2_vv(v48, v53, v22);
+            vslidep_b_2_vv(v49, v53, v23);
+            vslidep_b_2_vv(v50, v53, v32);
+            vslidep_b_2_vv(v51, v53, v33);
+            vslidep_b_2_vv(v52, v53, v34);
+            break;
+          case 3:
+            vslidep_b_3_vv(v48, v53, v22);
+            vslidep_b_3_vv(v49, v53, v23);
+            vslidep_b_3_vv(v50, v53, v32);
+            vslidep_b_3_vv(v51, v53, v33);
+            vslidep_b_3_vv(v52, v53, v34);
+            break;
+          case 4:
+            vslidep_b_4_vv(v48, v53, v22);
+            vslidep_b_4_vv(v49, v53, v23);
+            vslidep_b_4_vv(v50, v53, v32);
+            vslidep_b_4_vv(v51, v53, v33);
+            vslidep_b_4_vv(v52, v53, v34);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+
+        // Rearranges input data into place.
+        ConvPerChannelD1OD24_5x5_inputshuffle();
+
+        // Computes OD[0..7]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v16, v17);  // Also writes v49.
+        vmvp_vv(v50, v16, v17);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v8);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(output_multiplier, output_shift,
+                                          output_offset, output_activation_min,
+                                          output_activation_max, out_ptr_col0,
+                                          out_ptr_col4);
+
+        // Computes OD[8..15]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v18, v19);  // Also writes v49.
+        vmvp_vv(v50, v18, v19);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v24);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 8, output_shift + 8, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 8,
+            out_ptr_col4 + 8);
+
+        // Computes OD[16..23]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v20, v21);  // Also writes v49.
+        vmvp_vv(v50, v20, v21);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v40);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 16, output_shift + 16, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 16,
+            out_ptr_col4 + 16);
+
+        // Proceed to next block.
+        out_x_min += patches_per_iteration /*=8*/;
+        out_x_max += patches_per_iteration /*=8*/;
+        in_x_min += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_x_max += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_ptr_row0 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        in_ptr_row1 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        in_ptr_row2 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        in_ptr_row3 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        in_ptr_row4 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        out_ptr_col0 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+        out_ptr_col4 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+      }
+
+      // Center.
+      while (out_x_max < output_width && in_x_max < input_width) {
+        // Loads all needed rows.
+        // TODO(davidgao): all these reads are misaligned.
+        //   Pad the input image may give further speedup.
+        vld_b_l_xx(v48, in_ptr_row0, load_width);
+        vld_b_l_xx(v49, in_ptr_row1, load_width);
+        vld_b_l_xx(v50, in_ptr_row2, load_width);
+        vld_b_l_xx(v51, in_ptr_row3, load_width);
+        vld_b_l_xx(v52, in_ptr_row4, load_width);
+
+        // Rearranges input data into place.
+        ConvPerChannelD1OD24_5x5_inputshuffle();
+
+        // Computes OD[0..7]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v16, v17);  // Also writes v49.
+        vmvp_vv(v50, v16, v17);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v8);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(output_multiplier, output_shift,
+                                          output_offset, output_activation_min,
+                                          output_activation_max, out_ptr_col0,
+                                          out_ptr_col4);
+
+        // Computes OD[8..15]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v18, v19);  // Also writes v49.
+        vmvp_vv(v50, v18, v19);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v24);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 8, output_shift + 8, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 8,
+            out_ptr_col4 + 8);
+
+        // Computes OD[16..23]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v20, v21);  // Also writes v49.
+        vmvp_vv(v50, v20, v21);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v40);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 16, output_shift + 16, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 16,
+            out_ptr_col4 + 16);
+
+        // Proceed to next block.
+        out_x_min += patches_per_iteration /*=8*/;
+        out_x_max += patches_per_iteration /*=8*/;
+        in_x_min += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_x_max += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_ptr_row0 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row1 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row2 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row3 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row4 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        out_ptr_col0 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+        out_ptr_col4 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+      }
+
+      // Right edge.
+      while (out_x_min < output_width) {
+        const int true_patches =
+            std::min(output_width - out_x_min, patches_per_iteration);
+        const int true_load_width = std::max(input_width - in_x_min, 0);
+
+        // Prepare the selector vector for right padding.
+        {
+          int8_t selector[32];
+          memset(selector, 1, true_load_width);
+          memset(selector + true_load_width, 0, 32 - true_load_width);
+          vld_b_x(v53, selector);
+        }
+        // Loads all needed rows and applies right padding.
+        // Loads all needed rows.
+        vld_b_l_xx(v48, in_ptr_row0, true_load_width);
+        vld_b_l_xx(v49, in_ptr_row1, true_load_width);
+        vld_b_l_xx(v50, in_ptr_row2, true_load_width);
+        vld_b_l_xx(v51, in_ptr_row3, true_load_width);
+        vld_b_l_xx(v52, in_ptr_row4, true_load_width);
+        vsel_b_vx(v48, v53, -input_offset);
+        vsel_b_vx(v49, v53, -input_offset);
+        vsel_b_vx(v50, v53, -input_offset);
+        vsel_b_vx(v51, v53, -input_offset);
+        vsel_b_vx(v52, v53, -input_offset);
+
+        // Rearranges input data into place.
+        ConvPerChannelD1OD24_5x5_inputshuffle();
+
+        // We added enough padding to complete a full block, but VSTQ
+        // cannot have a write-limiter attached. To workaround this we
+        // store to a large-enough buffer and then copy as needed.
+        int8_t vstq_buffer[patches_per_iteration /*=8*/ * output_depth /*=24*/]
+            __attribute__((aligned(32)));
+        int8_t* temp_out_ptr_col0 = &vstq_buffer[0];
+        int8_t* temp_out_ptr_col4 = &vstq_buffer[4 * output_depth /*=24*/];
+
+        // Computes OD[0..7]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v16, v17);  // Also writes v49.
+        vmvp_vv(v50, v16, v17);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v8);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(output_multiplier, output_shift,
+                                          output_offset, output_activation_min,
+                                          output_activation_max,
+                                          temp_out_ptr_col0, temp_out_ptr_col4);
+
+        // Computes OD[8..15]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v18, v19);  // Also writes v49.
+        vmvp_vv(v50, v18, v19);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v24);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 8, output_shift + 8, output_offset,
+            output_activation_min, output_activation_max, temp_out_ptr_col0 + 8,
+            temp_out_ptr_col4 + 8);
+
+        // Computes OD[16..23]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v20, v21);  // Also writes v49.
+        vmvp_vv(v50, v20, v21);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v40);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 16, output_shift + 16, output_offset,
+            output_activation_min, output_activation_max,
+            temp_out_ptr_col0 + 16, temp_out_ptr_col4 + 16);
+
+        // Copies useful results back.
+        // TODO(davidgao): this could use some vector copying.
+        ::memcpy(out_ptr_col0, temp_out_ptr_col0,
+                 true_patches * output_depth /*=24*/);
+
+        // Proceed to next block.
+        // It is easier to use patches_per_iteration instead of true_patches
+        // here because the former one is constexpr. These two values will
+        // differ iff we've just finished the last block on the row.
+        out_x_min += patches_per_iteration /*=8*/;
+        out_x_max += patches_per_iteration /*=8*/;
+        in_x_min += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_x_max += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_ptr_row0 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row1 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row2 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row3 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row4 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        out_ptr_col0 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+        out_ptr_col4 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+      }
     }
-    for (; out_x < output_width; ++out_x) {
-      const int in_x_origin = (out_x * stride_width) - pad_width;
-      CALCULATE_IN_X(in_x_origin);
-      PAD_OR_LOAD_ROW_0(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_1(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_2(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_3(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      PAD_OR_LOAD_ROW_4(p_input, input_height, input_width, in_y, in_x,
-                        input_offset);
-      COMPUTE(cmds, swizzled_bias_data);
-      OUTPUT(output_activation_min, output_activation_max, output_offset,
-             local_output_data, n_channels);
-      local_output_data += output_depth;
+
+    // Bottom loop.
+    for (; out_y < output_height;
+         ++out_y, in_y_min += stride_height, in_y_max += stride_height) {
+      int out_x_min = 0;
+      int out_x_max = patches_per_iteration - 1;
+      int in_x_min = -pad_width;
+      int in_x_max = in_x_min + load_width - 1;
+      // Bottom padding is active so we're not going to read the last row.
+      const int8_t* in_ptr_row0 = &p_input[row_start_offset(
+          in_y_min, input_width /*=5*/, input_depth /*=1*/)];
+      const int8_t* in_ptr_row1 = &p_input[row_start_offset(
+          in_y_min + 1, input_width /*=5*/, input_depth /*=1*/)];
+      const int8_t* in_ptr_row2 = &p_input[row_start_offset(
+          in_y_min + 2, input_width /*=5*/, input_depth /*=1*/)];
+      const int8_t* in_ptr_row3 = &p_input[row_start_offset(
+          in_y_min + 3, input_width /*=5*/, input_depth /*=1*/)];
+      int8_t* out_ptr_col0 = &p_output[row_start_offset(out_y, output_width,
+                                                        output_depth /*=24*/)];
+      int8_t* out_ptr_col4 = out_ptr_col0 + 4 * output_depth /*=24*/;
+
+      // Bottom left corner.
+      // This could only happen [0:1] times.
+      if (pad_width > 0) {
+        // Same as in_x_max + 1.
+        const int true_load_width = load_width - pad_width;
+
+        // Loads all needed rows.
+        switch (in_y_max - input_height) {
+          case 0:
+            vld_b_l_xx(v33, in_ptr_row3, true_load_width);
+            [[fallthrough]];
+          case 1:
+            vld_b_l_xx(v32, in_ptr_row2, true_load_width);
+            [[fallthrough]];
+          case 2:
+            vld_b_l_xx(v23, in_ptr_row1, true_load_width);
+            [[fallthrough]];
+          case 3:
+            vld_b_l_xx(v22, in_ptr_row0, true_load_width);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+        // Fills padding rows.
+        vdup_b_x(v53, -input_offset);
+        switch (input_height - in_y_max) {
+          case -3:
+            vmv_v(v23, v53);
+            [[fallthrough]];
+          case -2:
+            vmv_v(v32, v53);
+            [[fallthrough]];
+          case -1:
+            vmv_v(v33, v53);
+            [[fallthrough]];
+          case 0:
+            // The last row always padding-only.
+            vmv_v(v52, v53);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+
+        // Applies left padding as needed.
+        // Can't pass pad_width because the vslide* wants imm.
+        // Can't use vx encoding because scalar has to be on the RHS.
+        // v53 is -input_offset broadcasted to all lanes.
+        switch (pad_width) {
+          case 1:
+            vslidep_b_1_vv(v48, v53, v22);
+            vslidep_b_1_vv(v49, v53, v23);
+            vslidep_b_1_vv(v50, v53, v32);
+            vslidep_b_1_vv(v51, v53, v33);
+            break;
+          case 2:
+            vslidep_b_2_vv(v48, v53, v22);
+            vslidep_b_2_vv(v49, v53, v23);
+            vslidep_b_2_vv(v50, v53, v32);
+            vslidep_b_2_vv(v51, v53, v33);
+            break;
+          case 3:
+            vslidep_b_3_vv(v48, v53, v22);
+            vslidep_b_3_vv(v49, v53, v23);
+            vslidep_b_3_vv(v50, v53, v32);
+            vslidep_b_3_vv(v51, v53, v33);
+            break;
+          case 4:
+            vslidep_b_4_vv(v48, v53, v22);
+            vslidep_b_4_vv(v49, v53, v23);
+            vslidep_b_4_vv(v50, v53, v32);
+            vslidep_b_4_vv(v51, v53, v33);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+
+        // Rearranges input data into place.
+        ConvPerChannelD1OD24_5x5_inputshuffle();
+
+        // Computes OD[0..7]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v16, v17);  // Also writes v49.
+        vmvp_vv(v50, v16, v17);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v8);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(output_multiplier, output_shift,
+                                          output_offset, output_activation_min,
+                                          output_activation_max, out_ptr_col0,
+                                          out_ptr_col4);
+
+        // Computes OD[8..15]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v18, v19);  // Also writes v49.
+        vmvp_vv(v50, v18, v19);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v24);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 8, output_shift + 8, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 8,
+            out_ptr_col4 + 8);
+
+        // Computes OD[16..23]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v20, v21);  // Also writes v49.
+        vmvp_vv(v50, v20, v21);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v40);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 16, output_shift + 16, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 16,
+            out_ptr_col4 + 16);
+
+        // Proceed to next block.
+        out_x_min += patches_per_iteration /*=8*/;
+        out_x_max += patches_per_iteration /*=8*/;
+        in_x_min += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_x_max += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_ptr_row0 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        in_ptr_row1 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        in_ptr_row2 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        in_ptr_row3 +=
+            (patches_per_iteration /*=8*/ * stride_width /*=2*/ - pad_width) *
+            input_depth /*=1*/;
+        out_ptr_col0 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+        out_ptr_col4 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+      }
+
+      // Bottom edge.
+      while (out_x_max < output_width && in_x_max < input_width) {
+        // Loads all needed rows.
+        switch (in_y_max - input_height) {
+          case 0:
+            vld_b_l_xx(v51, in_ptr_row3, load_width);
+            [[fallthrough]];
+          case 1:
+            vld_b_l_xx(v50, in_ptr_row2, load_width);
+            [[fallthrough]];
+          case 2:
+            vld_b_l_xx(v49, in_ptr_row1, load_width);
+            [[fallthrough]];
+          case 3:
+            vld_b_l_xx(v48, in_ptr_row0, load_width);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+        // Fills padding rows.
+        switch (input_height - in_y_max) {
+          case -3:
+            vdup_b_x(v49, -input_offset);
+            [[fallthrough]];
+          case -2:
+            vdup_b_x(v50, -input_offset);
+            [[fallthrough]];
+          case -1:
+            vdup_b_x(v51, -input_offset);
+            [[fallthrough]];
+          case 0:
+            vdup_b_x(v52, -input_offset);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+
+        // Rearranges input data into place.
+        ConvPerChannelD1OD24_5x5_inputshuffle();
+
+        // Computes OD[0..7]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v16, v17);  // Also writes v49.
+        vmvp_vv(v50, v16, v17);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v8);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(output_multiplier, output_shift,
+                                          output_offset, output_activation_min,
+                                          output_activation_max, out_ptr_col0,
+                                          out_ptr_col4);
+
+        // Computes OD[8..15]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v18, v19);  // Also writes v49.
+        vmvp_vv(v50, v18, v19);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v24);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 8, output_shift + 8, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 8,
+            out_ptr_col4 + 8);
+
+        // Computes OD[16..23]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v20, v21);  // Also writes v49.
+        vmvp_vv(v50, v20, v21);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v40);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 16, output_shift + 16, output_offset,
+            output_activation_min, output_activation_max, out_ptr_col0 + 16,
+            out_ptr_col4 + 16);
+
+        // Proceed to next block.
+        out_x_min += patches_per_iteration /*=8*/;
+        out_x_max += patches_per_iteration /*=8*/;
+        in_x_min += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_x_max += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_ptr_row0 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row1 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row2 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row3 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        out_ptr_col0 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+        out_ptr_col4 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+      }
+
+      // Bottom right corner.
+      while (out_x_min < output_width) {
+        const int true_patches =
+            std::min(output_width - out_x_min, patches_per_iteration);
+        const int true_load_width = std::max(input_width - in_x_min, 0);
+
+        // Prepare the selector vector for right padding.
+        {
+          int8_t selector[32];
+          memset(selector, 1, true_load_width);
+          memset(selector + true_load_width, 0, 32 - true_load_width);
+          vld_b_x(v53, selector);
+        }
+        // Loads all needed rows and applies right padding.
+        switch (in_y_max - input_height) {
+          case 0:
+            vld_b_l_xx(v51, in_ptr_row3, true_load_width);
+            vsel_b_vx(v51, v53, -input_offset);
+            [[fallthrough]];
+          case 1:
+            vld_b_l_xx(v50, in_ptr_row2, true_load_width);
+            vsel_b_vx(v50, v53, -input_offset);
+            [[fallthrough]];
+          case 2:
+            vld_b_l_xx(v49, in_ptr_row1, true_load_width);
+            vsel_b_vx(v49, v53, -input_offset);
+            [[fallthrough]];
+          case 3:
+            vld_b_l_xx(v48, in_ptr_row0, true_load_width);
+            vsel_b_vx(v48, v53, -input_offset);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+        // Fills padding rows.
+        switch (input_height - in_y_max) {
+          case -3:
+            vdup_b_x(v49, -input_offset);
+            [[fallthrough]];
+          case -2:
+            vdup_b_x(v50, -input_offset);
+            [[fallthrough]];
+          case -1:
+            vdup_b_x(v51, -input_offset);
+            [[fallthrough]];
+          case 0:
+            vdup_b_x(v52, -input_offset);
+            break;
+          default:
+            __builtin_unreachable();
+        }
+
+        // Rearranges input data into place.
+        ConvPerChannelD1OD24_5x5_inputshuffle();
+
+        // We added enough padding to complete a full block, but VSTQ
+        // cannot have a write-limiter attached. To workaround this we
+        // store to a large-enough buffer and then copy as needed.
+        int8_t vstq_buffer[patches_per_iteration /*=8*/ * output_depth /*=24*/]
+            __attribute__((aligned(32)));
+        int8_t* temp_out_ptr_col0 = &vstq_buffer[0];
+        int8_t* temp_out_ptr_col4 = &vstq_buffer[4 * output_depth /*=24*/];
+
+        // Computes OD[0..7]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v16, v17);  // Also writes v49.
+        vmvp_vv(v50, v16, v17);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v8);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(output_multiplier, output_shift,
+                                          output_offset, output_activation_min,
+                                          output_activation_max,
+                                          temp_out_ptr_col0, temp_out_ptr_col4);
+
+        // Computes OD[8..15]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v18, v19);  // Also writes v49.
+        vmvp_vv(v50, v18, v19);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v24);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 8, output_shift + 8, output_offset,
+            output_activation_min, output_activation_max, temp_out_ptr_col0 + 8,
+            temp_out_ptr_col4 + 8);
+
+        // Computes OD[16..23]
+        // Initializes the accumulators from bias.
+        vmvp_vv(v48, v20, v21);  // Also writes v49.
+        vmvp_vv(v50, v20, v21);  // Also writes v51.
+        vmv_v_m(v52, v48);
+        actr_v(v48, v48);  // v48 is read but not written.
+        // Performs matmul.
+        aconv_vxv(v48, v0, aconv_cmd, v40);  // v48 is not actually written.
+        ConvPerChannelD1OD24_5x5_postproc(
+            output_multiplier + 16, output_shift + 16, output_offset,
+            output_activation_min, output_activation_max,
+            temp_out_ptr_col0 + 16, temp_out_ptr_col4 + 16);
+
+        // Copies useful results back.
+        ::memcpy(out_ptr_col0, temp_out_ptr_col0,
+                 true_patches * output_depth /*=24*/);
+
+        // Proceed to next block.
+        // It is easier to use patches_per_iteration instead of true_patches
+        // here because the former one is constexpr. These two values will
+        // differ iff we've just finished the last block on the row.
+        out_x_min += patches_per_iteration /*=8*/;
+        out_x_max += patches_per_iteration /*=8*/;
+        in_x_min += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_x_max += patches_per_iteration /*=8*/ * stride_width /*=2*/;
+        in_ptr_row0 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row1 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row2 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        in_ptr_row3 += (patches_per_iteration /*=8*/ * stride_width /*=2*/) *
+                       input_depth /*=1*/;
+        out_ptr_col0 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+        out_ptr_col4 += patches_per_iteration /*=8*/ * output_depth /*=24*/;
+      }
     }
   }
 }
 
-#undef PAD_OR_LOAD_ROW_0
-#undef PAD_OR_LOAD_ROW_1
-#undef PAD_OR_LOAD_ROW_2
-#undef PAD_OR_LOAD_ROW_3
-#undef PAD_OR_LOAD_ROW_4
-#undef _PAD_OR_LOAD_ROW
-#undef _H_PAD_OR_LOAD_ROW
-#undef H_PAD_OR_LOAD_ROW_0
-#undef H_PAD_OR_LOAD_ROW_1
-#undef H_PAD_OR_LOAD_ROW_2
-#undef H_PAD_OR_LOAD_ROW_3
-#undef H_PAD_OR_LOAD_ROW_4
-#undef LOAD_ROW_0
-#undef LOAD_ROW_1
-#undef LOAD_ROW_2
-#undef LOAD_ROW_3
-#undef LOAD_ROW_4
-#undef PAD_ROW_0
-#undef PAD_ROW_1
-#undef PAD_ROW_2
-#undef PAD_ROW_3
-#undef PAD_ROW_4
-#undef CALCULATE_IN_X
-#undef CALCULATE_IN_Y
-#undef INPUT_0_0
-#undef INPUT_0_1
-#undef INPUT_0_2
-#undef INPUT_0_3
-#undef INPUT_0_4
-#undef INPUT_1_0
-#undef INPUT_1_1
-#undef INPUT_1_2
-#undef INPUT_1_3
-#undef INPUT_1_4
-#undef INPUT_2_0
-#undef INPUT_2_1
-#undef INPUT_2_2
-#undef INPUT_2_3
-#undef INPUT_2_4
-#undef INPUT_3_0
-#undef INPUT_3_1
-#undef INPUT_3_2
-#undef INPUT_3_3
-#undef INPUT_3_4
-#undef INPUT_4_0
-#undef INPUT_4_1
-#undef INPUT_4_2
-#undef INPUT_4_3
-#undef INPUT_4_4
-#undef INPUT_0_5
-#undef INPUT_1_5
-#undef INPUT_2_5
-#undef INPUT_3_5
-#undef INPUT_4_5
-#undef FLT_0_0
-#undef FLT_0_1
-#undef FLT_0_2
-#undef FLT_0_3
-#undef FLT_0_4
-#undef FLT_1_0
-#undef FLT_1_1
-#undef FLT_1_2
-#undef FLT_1_3
-#undef FLT_1_4
-#undef FLT_2_0
-#undef FLT_2_1
-#undef FLT_2_2
-#undef FLT_2_3
-#undef FLT_2_4
-#undef FLT_3_0
-#undef FLT_3_1
-#undef FLT_3_2
-#undef FLT_3_3
-#undef FLT_3_4
-#undef FLT_HOLE
-#undef FLT_4_0
-#undef FLT_4_1
-#undef FLT_4_2
-#undef FLT_4_3
-#undef FLT_4_4
-
 void ConvPerChannelD1(
     const tflite::ConvParams& params, const int32_t* output_multiplier,
     const int32_t* output_shift, const tflite::RuntimeShape& input_shape,
@@ -1106,4 +1597,4 @@
   }
 }
 
-}  // namespace kelvin::opt
\ No newline at end of file
+}  // namespace kelvin::opt