Disable folding slice op with it's consumers. (#4559)

After Linalg fusion on tensors, we sometimes push up some
linalg.tensor_reshape ops, which causes `slice + reshape + generic`. We
do not know how to handle this kind of case correctly. Disable it to
unblock other works for now.
diff --git a/iree/compiler/Dialect/Flow/Transforms/DispatchConfig.cpp b/iree/compiler/Dialect/Flow/Transforms/DispatchConfig.cpp
index 008e1eb..0d7f275 100644
--- a/iree/compiler/Dialect/Flow/Transforms/DispatchConfig.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/DispatchConfig.cpp
@@ -224,15 +224,15 @@
 // TODO(b/144530470): replace with tablegen attributes/interfaces.
 bool OpDispatchPolicy::isUnsupportedFusionOp(Operation *op) {
   return isa<linalg::IndexedGenericOp, linalg::GenericOp, mhlo::ConcatenateOp,
-             mhlo::ConvOp, mhlo::PadOp, mhlo::ReduceOp, mhlo::ReduceWindowOp>(
-             op) ||
+             mhlo::ConvOp, mhlo::PadOp, mhlo::ReduceOp, mhlo::ReduceWindowOp,
+             mhlo::SliceOp>(op) ||
          (!clEnableConsumerOnlyFusion &&
           isa<mhlo::DotOp, mhlo::DotGeneralOp>(op)) ||
          isLeafOnlyOp(op);
 }
 
 bool OpDispatchPolicy::isLeafOnlyOp(Operation *op) {
-  return isa<mhlo::SliceOp, mhlo::TorchIndexSelectOp>(op);
+  return isa<mhlo::TorchIndexSelectOp>(op);
 }
 
 }  // namespace Flow
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/fold_compatible_dispatch_regions.mlir b/iree/compiler/Dialect/Flow/Transforms/test/fold_compatible_dispatch_regions.mlir
index e0ba42b..292b280 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/fold_compatible_dispatch_regions.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/fold_compatible_dispatch_regions.mlir
@@ -135,36 +135,6 @@
 
 // -----
 
-// Test if the op that only can be a leaf op fuse with consumer but not
-// producer. This test use a dummy workload to test on leaf only op
-// functionality.
-module {
-  func @leafOnlyOp(%arg0: tensor<3x4xi32>, %arg1: tensor<1x2xi32>) -> tensor<1x2xi32> {
-    %c0 = constant 0 : index
-    %0 = flow.dispatch.region[%c0 : index](%arg2 = %arg0 : tensor<3x4xi32>) -> tensor<3x4xi32> {
-      %3 = mhlo.add %arg2, %arg2 : tensor<3x4xi32>
-      flow.return %3 : tensor<3x4xi32>
-    }
-    %1 = flow.dispatch.region[%c0 : index](%arg2 = %0 : tensor<3x4xi32>) -> tensor<1x2xi32> {
-      %3 = "mhlo.slice"(%arg2) {limit_indices = dense<[2, 3]> : tensor<2xi64>, start_indices = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<3x4xi32>) -> tensor<1x2xi32>
-      flow.return %3 : tensor<1x2xi32>
-    }
-    %2 = flow.dispatch.region[%c0 : index](%arg2 = %1 : tensor<1x2xi32>, %arg3 = %arg1 : tensor<1x2xi32>) -> tensor<1x2xi32> {
-      %3 = mhlo.multiply %arg2, %arg3 : tensor<1x2xi32>
-      flow.return %3 : tensor<1x2xi32>
-    }
-    return %2 : tensor<1x2xi32>
-  }
-}
-// CHECK-LABEL: func @leafOnlyOp
-//       CHECK: flow.dispatch.region
-//  CHECK-NEXT:   mhlo.add
-//       CHECK: flow.dispatch.region
-//  CHECK-NEXT:   mhlo.slice
-//  CHECK-NEXT:   mhlo.multiply
-
-// -----
-
 module {
   func @torch_index_select_producer(%arg0: tensor<5x1x5xi32>,
                                     %arg1: tensor<2xi32>) -> tensor<2x1x5xi32> {
diff --git a/iree/test/e2e/structural/BUILD b/iree/test/e2e/structural/BUILD
index fd9d7af..9ffecf2 100644
--- a/iree/test/e2e/structural/BUILD
+++ b/iree/test/e2e/structural/BUILD
@@ -33,7 +33,6 @@
     srcs = [
         "gather_add.mlir",
         "matmul_add.mlir",
-        "slice_add.mlir",
     ],
     driver = "vulkan",
     target_backend = "vulkan-spirv",
@@ -44,7 +43,6 @@
     srcs = [
         "gather_add.mlir",
         "matmul_add.mlir",
-        "slice_add.mlir",
     ],
     driver = "dylib",
     target_backend = "dylib-llvm-aot",
diff --git a/iree/test/e2e/structural/CMakeLists.txt b/iree/test/e2e/structural/CMakeLists.txt
index db8ed1d..4932aa1 100644
--- a/iree/test/e2e/structural/CMakeLists.txt
+++ b/iree/test/e2e/structural/CMakeLists.txt
@@ -32,7 +32,6 @@
   SRCS
     "gather_add.mlir"
     "matmul_add.mlir"
-    "slice_add.mlir"
   TARGET_BACKEND
     "vulkan-spirv"
   DRIVER
@@ -45,7 +44,6 @@
   SRCS
     "gather_add.mlir"
     "matmul_add.mlir"
-    "slice_add.mlir"
   TARGET_BACKEND
     "dylib-llvm-aot"
   DRIVER
diff --git a/iree/test/e2e/structural/slice_add.mlir b/iree/test/e2e/structural/slice_add.mlir
deleted file mode 100644
index d6d301b..0000000
--- a/iree/test/e2e/structural/slice_add.mlir
+++ /dev/null
@@ -1,84 +0,0 @@
-func @slice_whole_buffer() attributes { iree.module.export } {
-  %input0 = iree.unfoldable_constant dense<[
-    [01, 02, 03, 04],
-    [05, 06, 07, 08],
-    [09, 10, 11, 12]]> : tensor<3x4xi32>
-  %input1 = iree.unfoldable_constant dense<10> : tensor<3x4xi32>
-  %workload = constant 12 : index
-  %result = flow.dispatch.region[%workload: index](%arg0 = %input0 : tensor<3x4xi32>, %arg1 = %input1 : tensor<3x4xi32>) -> tensor<3x4xi32> {
-    %0 = "mhlo.slice"(%arg0) {
-      start_indices = dense<[0, 0]> : tensor<2xi64>,
-      limit_indices = dense<[3, 4]> : tensor<2xi64>,
-      strides = dense<1> : tensor<2xi64>
-    } : (tensor<3x4xi32>) -> tensor<3x4xi32>
-    %1 = mhlo.add %0, %arg1 : tensor<3x4xi32>
-    flow.return %1 : tensor<3x4xi32>
-  }
-  check.expect_eq_const(%result, dense<[
-      [11, 12, 13, 14],
-      [15, 16, 17, 18],
-      [19, 20, 21, 22]]> : tensor<3x4xi32>) : tensor<3x4xi32>
-  return
-}
-
-func @slice_whole_stride() attributes { iree.module.export } {
-  %input0 = iree.unfoldable_constant dense<[
-    [01, 02, 03, 04],
-    [05, 06, 07, 08],
-    [09, 10, 11, 12]]> : tensor<3x4xi32>
-  %input1 = iree.unfoldable_constant dense<10> : tensor<1x4xi32>
-  %workload = constant 4 : index
-  %result = flow.dispatch.region[%workload: index](%arg0 = %input0 : tensor<3x4xi32>, %arg1 = %input1 : tensor<1x4xi32>) -> tensor<1x4xi32> {
-    %0 = "mhlo.slice"(%arg0) {
-      start_indices = dense<[1, 0]> : tensor<2xi64>,
-      limit_indices = dense<[2, 4]> : tensor<2xi64>,
-      strides = dense<1> : tensor<2xi64>
-    } : (tensor<3x4xi32>) -> tensor<1x4xi32>
-    %1 = mhlo.add %0, %arg1 : tensor<1x4xi32>
-    flow.return %1 : tensor<1x4xi32>
-  }
-  check.expect_eq_const(%result, dense<[[15, 16, 17, 18]]> : tensor<1x4xi32>) : tensor<1x4xi32>
-  return
-}
-
-func @slice_stride_part() attributes { iree.module.export } {
-  %input0 = iree.unfoldable_constant dense<[
-    [01, 02, 03, 04],
-    [05, 06, 07, 08],
-    [09, 10, 11, 12]]> : tensor<3x4xi32>
-  %input1 = iree.unfoldable_constant dense<10> : tensor<1x2xi32>
-  %workload = constant 2 : index
-  %result = flow.dispatch.region[%workload: index](%arg0 = %input0 : tensor<3x4xi32>, %arg1 = %input1 : tensor<1x2xi32>) -> tensor<1x2xi32> {
-    %0 = "mhlo.slice"(%arg0) {
-      start_indices = dense<[1, 1]> : tensor<2xi64>,
-      limit_indices = dense<[2, 3]> : tensor<2xi64>,
-      strides = dense<1> : tensor<2xi64>
-    } : (tensor<3x4xi32>) -> tensor<1x2xi32>
-    %1 = mhlo.add %0, %arg1 : tensor<1x2xi32>
-    flow.return %1 : tensor<1x2xi32>
-  }
-  check.expect_eq_const(%result, dense<[[16, 17]]> : tensor<1x2xi32>) : tensor<1x2xi32>
-  return
-}
-
-func @slice_multi_stride() attributes { iree.module.export } {
-  %input0 = iree.unfoldable_constant dense<[
-    [01, 02, 03, 04],
-    [05, 06, 07, 08],
-    [09, 10, 11, 12]]> : tensor<3x4xi32>
-  %input1 = iree.unfoldable_constant dense<10> : tensor<2x4xi32>
-  %workload = constant 8 : index
-  %result = flow.dispatch.region[%workload: index](%arg0 = %input0 : tensor<3x4xi32>, %arg1 = %input1 : tensor<2x4xi32>) -> tensor<2x4xi32> {
-    %0 = "mhlo.slice"(%arg0) {
-      start_indices = dense<[1, 0]> : tensor<2xi64>,
-      limit_indices = dense<[3, 4]> : tensor<2xi64>,
-      strides = dense<1> : tensor<2xi64>
-    } : (tensor<3x4xi32>) -> tensor<2x4xi32>
-    %1 = mhlo.add %0, %arg1 : tensor<2x4xi32>
-    flow.return %1 : tensor<2x4xi32>
-  }
-  check.expect_eq_const(%result, dense<[
-      [15, 16, 17, 18],
-      [19, 20, 21, 22]]> : tensor<2x4xi32>) : tensor<2x4xi32>
-  return
-}