[GlobalOpt] Disable pack->expand_shape propagation. (#17739)
It is not enabled until the upstream commit:
https://github.com/llvm/llvm-project/commit/a945f55d3e6af6be6648fb92a20c80e88e3fc2b2
The upstream commit triggers a bug in IREE, so the revision disables the
propagation.
See https://github.com/iree-org/iree/issues/17734 for more details.
---------
Signed-off-by: hanhanW <hanhan0912@gmail.com>
diff --git a/compiler/src/iree/compiler/GlobalOptimization/DataLayoutPropagation.cpp b/compiler/src/iree/compiler/GlobalOptimization/DataLayoutPropagation.cpp
index 9119ed1..43d6800 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/DataLayoutPropagation.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/DataLayoutPropagation.cpp
@@ -26,7 +26,10 @@
linalg::populateDataLayoutPropagationPatterns(patterns, [](Operation *op) {
// Currently only bubble up/push down pack/unpack through collapse/expand
// shape ops.
- return isa<tensor::CollapseShapeOp, tensor::ExpandShapeOp>(op);
+ // TODO(#17734): The propagation through expand_shape ops is broken.
+ // Enable the propagation once we find it useful and the upstream issue is
+ // fixed.
+ return isa<tensor::CollapseShapeOp>(op);
});
if (failed(applyPatternsAndFoldGreedily(funcOp, std::move(patterns)))) {
funcOp.emitOpError("folding patterns failed");
diff --git a/compiler/src/iree/compiler/GlobalOptimization/test/data_layout_propagation.mlir b/compiler/src/iree/compiler/GlobalOptimization/test/data_layout_propagation.mlir
index 556cfed..bd262cf 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/test/data_layout_propagation.mlir
+++ b/compiler/src/iree/compiler/GlobalOptimization/test/data_layout_propagation.mlir
@@ -27,9 +27,12 @@
// CHECK-LABEL: func.func @push_down_unpack_through_expand
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
-// CHECK: %[[C0:.+]] = arith.constant 0 : index
-// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3], [4]] output_shape {{.*}} : tensor<?x32x8x8xf32> into tensor<?x32x32x8x8xf32>
-// CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x32x32x8x8xf32>
-// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>
-// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[EXPANDED:.+]] outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [8, 8] into %[[EMPTY]] : tensor<?x32x32x8x8xf32> -> tensor<?x256x256xf32>
-// CHECK: return %[[UNPACK]] : tensor<?x256x256xf32>
+// TODO(#17734): Flip the check after we have better control function support.
+// CHECK: tensor.unpack
+// CHECK: tensor.expand_shape
+// NO-CHECK: %[[C0:.+]] = arith.constant 0 : index
+// NO-CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3], [4]] output_shape {{.*}} : tensor<?x32x8x8xf32> into tensor<?x32x32x8x8xf32>
+// NO-CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x32x32x8x8xf32>
+// NO-CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>
+// NO-CHECK: %[[UNPACK:.+]] = tensor.unpack %[[EXPANDED:.+]] outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [8, 8] into %[[EMPTY]] : tensor<?x32x32x8x8xf32> -> tensor<?x256x256xf32>
+// NO-CHECK: return %[[UNPACK]] : tensor<?x256x256xf32>