Fix processing of preprocessing flags. (#12029)
Fixes https://github.com/iree-org/iree/issues/12028
diff --git a/tests/compiler_driver/BUILD b/tests/compiler_driver/BUILD
index 91d73f8..858e0b8 100644
--- a/tests/compiler_driver/BUILD
+++ b/tests/compiler_driver/BUILD
@@ -20,6 +20,7 @@
[
"executable_benchmarks.mlir",
"hal_executable.mlir",
+ "preprocessing_flags.mlir",
"smoketest.mlir",
"streams.mlir",
],
diff --git a/tests/compiler_driver/CMakeLists.txt b/tests/compiler_driver/CMakeLists.txt
index 2e20917..676ded9 100644
--- a/tests/compiler_driver/CMakeLists.txt
+++ b/tests/compiler_driver/CMakeLists.txt
@@ -16,6 +16,7 @@
SRCS
"executable_benchmarks.mlir"
"hal_executable.mlir"
+ "preprocessing_flags.mlir"
"smoketest.mlir"
"streams.mlir"
TOOLS
diff --git a/tests/compiler_driver/preprocessing_flags.mlir b/tests/compiler_driver/preprocessing_flags.mlir
new file mode 100644
index 0000000..e2cb536
--- /dev/null
+++ b/tests/compiler_driver/preprocessing_flags.mlir
@@ -0,0 +1,25 @@
+// RUN: iree-compile --iree-hal-target-backends=llvm-cpu --output-format=vm-asm \
+// RUN: --iree-preprocessing-pass-pipeline="builtin.module(func.func(iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=16}))" \
+// RUN: --mlir-print-ir-after=iree-preprocessing-convert-conv2d-to-img2col --mlir-print-ir-after=iree-preprocessing-pad-linalg-ops %s 2>&1 \
+// RUN: | FileCheck %s
+
+func.func @test(%arg0 : tensor<10x20xf32>, %arg1 : tensor<20x30xf32>, %arg2 : tensor<10x30xf32>) -> tensor<10x30xf32> {
+ %0 = linalg.matmul ins(%arg0, %arg1 : tensor<10x20xf32>, tensor<20x30xf32>)
+ outs(%arg2 : tensor<10x30xf32>) -> tensor<10x30xf32>
+ return %0 : tensor<10x30xf32>
+}
+// Just check that the pass runs, and that the compilation finishes
+// CHECK: ConvertConv2DToImg2Col (iree-preprocessing-convert-conv2d-to-img2col)
+// CHECK: PadLinalgOps (iree-preprocessing-pad-linalg-ops)
+// CHECK-DAG: %[[ARG0:.+]] = hal.tensor.import %{{[a-zA-Z0-9]+}} : !hal.buffer_view -> tensor<10x20xf32>
+// CHECK-DAG: %[[ARG1:.+]] = hal.tensor.import %{{[a-zA-Z0-9]+}} : !hal.buffer_view -> tensor<20x30xf32>
+// CHECK-DAG: %[[ARG2:.+]] = hal.tensor.import %{{[a-zA-Z0-9]+}} : !hal.buffer_view -> tensor<10x30xf32>
+// CHECK-DAG: %[[PAD0:.+]] = tensor.pad %[[ARG0]] low[0, 0] high[6, 12]
+// CHECK-DAG: %[[PAD1:.+]] = tensor.pad %[[ARG1]] low[0, 0] high[12, 2]
+// CHECK-DAG: %[[PAD2:.+]] = tensor.pad %[[ARG2]] low[0, 0] high[6, 2]
+// CHECK: %[[PADDED:.+]] = linalg.matmul
+// CHECK-SAME: ins(%[[PAD0]], %[[PAD1]] :
+// CHECK-SAME: outs(%[[PAD2]] :
+// CHECK: %[[SLICE:.+]] = tensor.extract_slice %[[PADDED]][0, 0] [10, 30] [1, 1]
+// CHECK: hal.tensor.export %[[SLICE]]
+// CHECK: vm.module