Sandbox integrate (#8581)
* Bring LinalgTransform dialect from the sandbox to iree-dialects.
Temporarily name the dialect "iree_linalg_transform" instead of "linalg_transform" to avoid name conflicts during transition and thus ease it.
* LinalgTransform python bindings
Temporarily name the dialect "iree_linalg_transform" instead of "linalg_transform" to avoid name conflicts during transition and thus ease it.
* [NFC] Add the MLIR clang-format and format iree-dialects
* Update to sandbox 77ca66e88d130b195b2eac169f17b95305a98577.
* Move Dialect tests to a location consistent with core MLIR
* Update sandbox to 3738d5792a3da6f03628c4375183cb39e3a82d51
* Format
* Drop spurious dependency
* clang-format
* Build fixes
* Move include/Transforms -> include/iree-dialects/Transforms
* Disable pytype on _iree_linalg_transforms_ops_ext.py
* clang-format
* More BUILD fixes
* Fix unit test
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir
new file mode 100644
index 0000000..5ca985e
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir
@@ -0,0 +1,34 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms %s | FileCheck %s
+
+// CHECK-LABEL: func @matmul_tensors(
+// CHECK-SAME: %[[TA:[0-9a-z]+]]: memref<128x128xf32
+// CHECK-SAME: %[[TB:[0-9a-z]+]]: memref<128x128xf32
+// CHECK-SAME: %[[TC:[0-9a-z]+]]: memref<128x128xf32
+// CHECK-NOT: -> tensor
+func @matmul_tensors(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ // CHECK: linalg.matmul ins(%[[TA]], %[[TB]] : memref{{.*}}, memref{{.*}} outs(%[[TC]] : memref{{.*}})
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+
+ // CHECK: return
+ // CHECK-NOT: %{{.*}}
+ return %0 : tensor<128x128xf32>
+// CHECK: }
+}
+
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@matmul_tensors](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ bufferize
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir
new file mode 100644
index 0000000..74d6cff
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir
@@ -0,0 +1,43 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms %s | FileCheck %s
+
+// This test is verifying that a non-trivial 2*tiling+padding+vectorization transformation completes successfully
+
+// CHECK-LABEL: func @matmul_tensors(
+func @matmul_tensors(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ // Pack transposed padding of 1st operand.
+ // CHECK: tensor.pad
+ // CHECK: linalg.generic
+
+ // Pack padding of 2nd operand.
+ // CHECK: tensor.pad
+
+ // CHECK: scf.for
+ // CHECK: scf.for
+ // CHECK: scf.for
+ // CHECK: scf.for
+ // CHECK: scf.for
+ // CHECK: linalg.generic
+ // CHECK: vector.contract
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+
+ return %0 : tensor<128x128xf32>
+}
+
+pdl.pattern @pdl_target: benefit(1) {
+ %args = operands
+ %results= types
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@matmul_tensors](%0 : !pdl.operation)
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ %1 = tile %0 {interchange = [0, 2, 1], peel = [], scalarize_dyn_dims = false, sizes = [32, 32, 32]}
+ %2 = tile %1 {interchange = [0, 1, 2], peel = [], scalarize_dyn_dims = false, sizes = [4, 4, 1]}
+ %3 = pad %2 {pack_paddings = [1, 1, 1], hoist_paddings = [6, 6, 0], transpose_paddings = [[1, 0], [0, 1]]}
+ %4 = vectorize %3 {vectorize_padding = true}
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir
new file mode 100644
index 0000000..c82252b
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir
@@ -0,0 +1,26 @@
+// RUN: iree-dialects-opt -linalg-drop-schedule %s | FileCheck %s
+
+func @matmul_tensors(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+ return %0 : tensor<128x128xf32>
+}
+
+// CHECK-NOT: pdl.pattern
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@matmul_tensors](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+// CHECK-NOT: iree_linalg_transform.sequence
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ tile %0 {sizes = [4, 4, 4], pad = false}
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/expert.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/expert.mlir
new file mode 100644
index 0000000..b5825ee
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/expert.mlir
@@ -0,0 +1,164 @@
+// RUN: iree-dialects-opt -linalg-transform-expert-expansion -split-input-file %s | FileCheck %s --check-prefix=EXPAND
+// RUN: iree-dialects-opt -linalg-transform-expert-expansion -linalg-interp-transforms -split-input-file %s | FileCheck %s
+
+// CHECK-LABEL: func @matmul_tensors
+// CHECK-NOT: linalg
+// CHECK: llvm
+func @matmul_tensors(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+
+ return %0 : tensor<128x128xf32>
+}
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@matmul_tensors](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ // This should match the strategy below.
+ // EXPAND-NOT: expert apply
+ // EXPAND: %[[OP:.*]] = match @pdl_target
+ // EXPAND: %[[HANDLE:.*]] = tile %[[OP]] {sizes = [4, 4, 4]}
+ // EXPAND: %[[HANDLE2:.*]] = vectorize %[[HANDLE]] {vectorize_padding = true}
+ // EXPAND: bufferize
+ // EXPAND: lower_vectors {multireduction_lowering = "innerreduce"}
+ // EXPAND: lower_to_llvm
+ %0 = match @pdl_target
+ expert apply "single_tiling" to %0
+ {
+ tile_sizes = [4, 4, 4],
+ vectorize_padding = true,
+ multireduction_lowering = "innerreduce"
+ }
+}
+
+// CHECK-NOT: @strategies
+// EXPAND-NOT: @strategies
+module @strategies {
+ pdl.pattern @single_tiling_matcher : benefit(1) {
+ %tile_sizes = attribute
+ %vectorize_padding = attribute
+ %multireduction_lowering = attribute
+ %name = attribute : "single_tiling"
+ %type = type : !pdl.operation
+ %target = operand : %type
+ %transformed = type
+ %root = operation "iree_linalg_transform.expert"(%target : !pdl.value) {
+ "expertName" = %name,
+ "tile_sizes" = %tile_sizes,
+ "vectorize_padding" = %vectorize_padding,
+ "multireduction_lowering" = %multireduction_lowering
+ } -> (%transformed : !pdl.type)
+
+ rewrite %root {
+ %tile = operation "iree_linalg_transform.tile"(%target : !pdl.value) {
+ "sizes" = %tile_sizes
+ } -> (%transformed : !pdl.type)
+ %handle = result 0 of %tile
+
+ %vectorize = operation "iree_linalg_transform.vectorize"(%handle : !pdl.value) {
+ "vectorize_padding" = %vectorize_padding
+ } -> (%transformed : !pdl.type)
+ %handle2 = result 0 of %vectorize
+
+ %bufferize = operation "iree_linalg_transform.bufferize"
+ %lower_vectors = operation "iree_linalg_transform.lower_vectors" {
+ "multireduction_lowering" = %multireduction_lowering
+ }
+ %lower_to_llvm = operation "iree_linalg_transform.lower_to_llvm"
+
+ replace %root with (%handle2 : !pdl.value)
+ }
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @matmul_tensors2
+// CHECK-NOT: linalg
+// CHECK: llvm
+func @matmul_tensors2(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+
+ return %0 : tensor<128x128xf32>
+}
+
+pdl.pattern @pdl_target2 : benefit(1) {
+ %args = pdl.operands
+ %results = pdl.types
+ %0 = pdl.operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ pdl.apply_native_constraint "nestedInFunc"[@matmul_tensors2](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ pdl.rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ // This should match the strategy below.
+ // EXPAND-NOT: expert apply
+ // EXPAND: %[[OP:.*]] = match @pdl_target2
+ // EXPAND: %[[HANDLE:.*]] = tile %[[OP]] {sizes = [32, 8, 8]}
+ // EXPAND: %[[HANDLE2:.*]] = tile %[[HANDLE]] {sizes = [4, 4, 4]}
+ // EXPAND: %[[HANDLE3:.*]] = vectorize %[[HANDLE2]] {vectorize_padding = false}
+ // EXPAND: bufferize
+ // EXPAND: lower_vectors {multireduction_lowering = "innerparallel"}
+ // EXPAND: lower_to_llvm
+ %0 = match @pdl_target2
+ %1 = tile %0 {sizes = [32, 8, 8]}
+ expert apply "single_tiling" to %1
+ {
+ tile_sizes = [4, 4, 4],
+ vectorize_padding = false,
+ multireduction_lowering = "innerparallel"
+ }
+}
+
+module @strategies {
+ pdl.pattern @single_tiling_operand : benefit(1) {
+ %tile_sizes = attribute
+ %vectorize_padding = attribute
+ %multireduction_lowering = attribute
+ %name = attribute : "single_tiling"
+ %type = type : !pdl.operation
+ %target = operand : %type
+ %transformed = type
+ %root = operation "iree_linalg_transform.expert"(%target : !pdl.value) {
+ "expertName" = %name,
+ "tile_sizes" = %tile_sizes,
+ "vectorize_padding" = %vectorize_padding,
+ "multireduction_lowering" = %multireduction_lowering
+ } -> (%transformed : !pdl.type)
+
+ rewrite %root {
+ %tile = operation "iree_linalg_transform.tile"(%target : !pdl.value) {
+ "sizes" = %tile_sizes
+ } -> (%transformed : !pdl.type)
+ %handle = result 0 of %tile
+
+ %vectorize = operation "iree_linalg_transform.vectorize"(%handle : !pdl.value) {
+ "vectorize_padding" = %vectorize_padding
+ } -> (%transformed : !pdl.type)
+ %handle2 = result 0 of %vectorize
+
+ %bufferize = operation "iree_linalg_transform.bufferize"
+ %lower_vectors = operation "iree_linalg_transform.lower_vectors" {
+ "multireduction_lowering" = %multireduction_lowering
+ }
+ %lower_to_llvm = operation "iree_linalg_transform.lower_to_llvm"
+
+ replace %root with (%handle2 : !pdl.value)
+ }
+ }
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/failure.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/failure.mlir
new file mode 100644
index 0000000..f0ecf7c
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/failure.mlir
@@ -0,0 +1,176 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms -split-input-file -verify-diagnostics -allow-unregistered-dialect %s
+
+// This cannot be vectorized because of dynamic tensor shapes. We expect the
+// pass fail and report an error at the vectorization operation below.
+func public @non_vectorizable(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
+ %0 = linalg.generic {
+ indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
+ iterator_types = ["parallel"]}
+ ins(%arg0: tensor<?xf32>) outs(%arg1: tensor<?xf32>) {
+ ^bb0(%arg2: f32, %arg3: f32):
+ %1 = arith.mulf %arg2, %arg2 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?xf32>
+ return %0 : tensor<?xf32>
+}
+
+pdl.pattern @target_pattern : benefit(1) {
+ %0 = operands
+ %1 = types
+ %2 = operation "linalg.generic"(%0 : !pdl.range<value>) -> (%1 : !pdl.range<type>)
+ rewrite %2 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @target_pattern
+ // expected-error@below {{failed to apply}}
+ vectorize %0
+}
+
+// -----
+
+func public @no_loop(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
+ %0 = linalg.generic {
+ indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
+ iterator_types = ["parallel"]}
+ ins(%arg0: tensor<?xf32>) outs(%arg1: tensor<?xf32>) {
+ ^bb0(%arg2: f32, %arg3: f32):
+ %1 = arith.mulf %arg2, %arg2 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?xf32>
+ return %0 : tensor<?xf32>
+}
+
+pdl.pattern @target_pattern : benefit(1) {
+ %0 = operands
+ %1 = types
+ %2 = operation "linalg.generic"(%0 : !pdl.range<value>) -> (%1 : !pdl.range<type>)
+ rewrite %2 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @target_pattern
+ // expected-error@below {{the transformed op is enclosed by 0 loops, but 1 expected}}
+ // expected-error@below {{failed to apply}}
+ get_parent_loop %0
+}
+
+// -----
+
+func private @prevent_dce()
+
+pdl.pattern @something : benefit(1) {
+ %0 = operands
+ %2 = operation "scf.for"(%0 : !pdl.range<value>)
+ rewrite %2 with "iree_linalg_transform.apply"
+}
+
+func public @loop(%lb: index, %ub: index, %step: index) {
+ scf.for %i = %lb to %ub step %step {
+ call @prevent_dce() : () -> ()
+ }
+ return
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @something
+ // expected-error@below {{NYI: cannot target the result of pipelining}}
+ // expected-error@below {{failed to apply}}
+ %1 = pipeline_loop %0
+ // expected-note@below {{use here}}
+ get_parent_loop %1
+}
+
+// -----
+
+func public @no_outlining() {
+ "some.operation"() ({}, {}) : () -> ()
+ return
+}
+
+pdl.pattern @some_operation : benefit(1) {
+ %0 = operation "some.operation"
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @some_operation
+ // Make sure we don't crash on wrong operation type.
+ // expected-error@below {{failed to apply}}
+ outline_loop %0 {func_name = "outlined"}
+}
+
+// -----
+
+func @no_replacement(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
+ %arg2: tensor<128x128xf32> {linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ // expected-error @below {{could not find replacement for tracked op}}
+ %0 = linalg.matmul {test.attrA}
+ ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+ return %0 : tensor<128x128xf32>
+}
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@no_replacement](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ // expected-error @below {{failed to apply}}
+ vectorize
+ tile %0
+}
+
+// -----
+
+func @repeated_match(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
+ %arg2: tensor<128x128xf32> {linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ // expected-error @below {{operation tracked by two handles}}
+ %0 = linalg.matmul {test.attrA}
+ ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+ return %0 : tensor<128x128xf32>
+}
+
+pdl.pattern @pdl_target1 : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@repeated_match](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+// An exact copy of the above, but with a different name.
+pdl.pattern @pdl_target2 : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@repeated_match](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ // expected-note @below {{handle}}
+ %0 = match @pdl_target1
+ // expected-error @below {{failed to apply}}
+ // expected-note @below {{handle}}
+ %1 = match @pdl_target2
+
+ // Add references to handles produced by match so that they are not DCE'd.
+ tile %0
+ tile %1
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse.mlir
new file mode 100644
index 0000000..6a78eb3
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse.mlir
@@ -0,0 +1,31 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms %s | FileCheck %s
+
+
+// CHECK-LABEL: func @fuse_unary
+func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+
+ // CHECK: scf.for
+ // CHECK: scf.for
+ // CHECK: linalg.elemwise_unary
+ // CHECK: linalg.elemwise_binary
+ %0 = linalg.elemwise_unary ins(%arg0 : tensor<?x?xf32>)
+ outs(%arg1: tensor<?x?xf32>) -> tensor<?x?xf32>
+ %1 = linalg.elemwise_binary ins(%0, %arg0 : tensor<?x?xf32>, tensor<?x?xf32>)
+ outs(%arg1: tensor<?x?xf32>) -> tensor<?x?xf32>
+ return %1 : tensor<?x?xf32>
+}
+
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = pdl.operation "linalg.elemwise_binary"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@fuse_unary](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ %1 = fuse %0 {tile_sizes = [32, 32], tile_interchange = [0, 1]}
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/generalize.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/generalize.mlir
new file mode 100644
index 0000000..ea12b9a
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/generalize.mlir
@@ -0,0 +1,27 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms %s | FileCheck %s
+
+
+// CHECK-LABEL: func @generalize_unary
+func @generalize_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+
+ // CHECK-NOT: linalg.elemwise_unary
+ // CHECK: linalg.generic
+ %0 = linalg.elemwise_unary ins(%arg0 : tensor<?x?xf32>)
+ outs(%arg1: tensor<?x?xf32>) -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = pdl.operation "linalg.elemwise_unary"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@generalize_unary](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ generalize %0
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/interchange.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/interchange.mlir
new file mode 100644
index 0000000..e988133
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/interchange.mlir
@@ -0,0 +1,34 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms %s | FileCheck %s
+
+// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1) -> (d1, d0)>
+
+// CHECK-LABEL: func @interchange_generic
+func @interchange_generic(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+
+ // CHECK: linalg.generic
+ // CHECK-SAME: indexing_maps = [#[[$MAP]], #[[$MAP]]
+ %0 = linalg.generic {
+ indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>],
+ iterator_types = ["parallel", "parallel"]
+ } ins(%arg0 : tensor<?x?xf32>) outs(%arg1 : tensor<?x?xf32>) {
+ ^bb0(%arg2: f32, %arg3: f32):
+ %1 = math.exp %arg2 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = pdl.operation "linalg.generic"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@interchange_generic](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ interchange %0 {iterator_interchange = [1, 0]}
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/invalid.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/invalid.mlir
new file mode 100644
index 0000000..d9c7e28
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/invalid.mlir
@@ -0,0 +1,59 @@
+// RUN: iree-dialects-opt %s -split-input-file -verify-diagnostics
+
+iree_linalg_transform.sequence {
+ %0 = match @match
+ // expected-error@below {{result #0 has more than one use}}
+ %1 = tile %0
+ // expected-note@below {{used here as operand #0}}
+ tile %1
+ // expected-note@below {{used here as operand #0}}
+ vectorize %1
+}
+
+// -----
+
+iree_linalg_transform.sequence {
+ %0 = match @match
+ // expected-error@below {{"sizes" and "scalarize_dyn_dims" attributes are mutually exclusive}}
+ tile %0 {sizes = [1,2,3], scalarize_dyn_dims = true}
+}
+
+// -----
+
+iree_linalg_transform.sequence {
+ %0 = match @match
+ // expected-error@below {{expects iterator_interchange to be a permutation, found [1, 1]}}
+ interchange %0 {iterator_interchange = [1, 1]}
+}
+
+// -----
+
+iree_linalg_transform.sequence {
+ %0 = match @match
+ // expected-error@below {{expects interchange to be a permutation, found [1, 1]}}
+ fuse %0 {tile_sizes=[0, 1], tile_interchange = [1, 1]}
+}
+
+// -----
+
+iree_linalg_transform.sequence {
+ %0 = match @match
+ // expected-error@below {{expects pack_paddings to contain booleans (0/1), found [1, 7]}}
+ pad %0 {pack_paddings=[1, 7]}
+}
+
+// -----
+
+iree_linalg_transform.sequence {
+ %0 = match @match
+ // expected-error@below {{expects hoist_paddings to contain positive integers, found [1, -7]}}
+ pad %0 {hoist_paddings=[1, -7]}
+}
+
+// -----
+
+iree_linalg_transform.sequence {
+ %0 = match @match
+ // expected-error@below {{expects transpose_paddings to be a permutation, found [1, 1]}}
+ pad %0 {transpose_paddings=[[1, 1]]}
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/pad.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/pad.mlir
new file mode 100644
index 0000000..d6d627b
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/pad.mlir
@@ -0,0 +1,42 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms %s | FileCheck %s
+
+
+// CHECK-LABEL: func @pad_unary
+func @pad_unary(%arg0: tensor<24x12xf32>,
+ %arg1: tensor<24x12xf32>) -> tensor<24x12xf32> {
+ %c0 = arith.constant 0 : index
+ %c12 = arith.constant 12 : index
+ %c4 = arith.constant 4 : index
+
+ // CHECK: scf.for
+ // CHECK: tensor.pad
+ // CHECK: linalg.generic
+ // CHECK: scf.for
+ %0 = scf.for %arg3 = %c0 to %c12 step %c4 iter_args(%arg2 = %arg1) -> (tensor<24x12xf32>) {
+ %1 = tensor.extract_slice %arg0[0, %arg3] [24, 4] [1, 1] : tensor<24x12xf32> to tensor<24x4xf32>
+ %2 = tensor.extract_slice %arg2[0, %arg3] [24, 4] [1, 1] : tensor<24x12xf32> to tensor<24x4xf32>
+
+ // CHECK: linalg.generic
+ // CHECK: tensor.pad
+ // CHECK: linalg.elemwise_unary
+ %3 = linalg.elemwise_unary ins(%1 : tensor<24x4xf32>)
+ outs(%2: tensor<24x4xf32>) -> tensor<24x4xf32>
+ %4 = tensor.insert_slice %3 into %arg2[0, %arg3] [24, 4] [1, 1] : tensor<24x4xf32> into tensor<24x12xf32>
+ scf.yield %4 : tensor<24x12xf32>
+ }
+ return %0 : tensor<24x12xf32>
+}
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = pdl.operation "linalg.elemwise_unary"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@pad_unary](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ %1 = pad %0 {pack_paddings=[1, 1], hoist_paddings=[1, 0], transpose_paddings=[[1, 0], [0, 1]]}
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/roundtrip.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/roundtrip.mlir
new file mode 100644
index 0000000..7ff0112
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/roundtrip.mlir
@@ -0,0 +1,33 @@
+// RUN: iree-dialects-opt %s | FileCheck %s
+
+// CHECK: iree_linalg_transform.sequence
+iree_linalg_transform.sequence {
+ // CHECK: %[[OPS:.*]] = match @{{.*}}
+ %0 = match @match1
+ // CHECK: %[[TILED:.*]] = tile %[[OPS]] {
+ // CHECK-DAG: sizes = [4, 4, 4]
+ // CHECK: }
+ %1 = tile %0 {sizes = [4, 4, 4]}
+ // CHECK: %[[TILED2:.*]] = tile %[[TILED]]
+ %2 = tile %1 {sizes = [2, 2, 2]}
+ // CHECK: %[[PADDED:.*]] = pad %[[TILED2]] {pack_paddings = [1, 1, 0]}
+ %3 = pad %2 {pack_paddings = [1, 1, 0]}
+ // CHECK: decompose
+ decompose
+ // CHECK: %{{.*}} = vectorize %[[PADDED]] {vectorize_padding = true}
+ %4 = vectorize %3 {vectorize_padding = true}
+ // CHECK: %[[OPS2:.*]] = match @{{.*}}
+ %5 = match @match2
+ // CHECK: %{{.*}} = vectorize %[[OPS2]]
+ vectorize %5
+ // CHECK-NOT: %
+ // CHECK: vectorize
+ // CHECK-NOT: %
+ vectorize
+ // CHECK: bufferize
+ bufferize
+ // CHECK: lower_vectors {multireduction_lowering = "innerreduce"}
+ lower_vectors { multireduction_lowering = "innerreduce"}
+ // CHECK: lower_to_llvm
+ lower_to_llvm
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scoped.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scoped.mlir
new file mode 100644
index 0000000..6964ef1
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scoped.mlir
@@ -0,0 +1,30 @@
+// RUN: iree-dialects-opt -test-wrap-scope='opname=arith.addi' %s | FileCheck %s --check-prefix WRAP
+// RUN: iree-dialects-opt -test-unwrap-scope %s | FileCheck %s --check-prefix UNWRAP
+
+// WRAP-LABEL: @test_wrap
+// WRAP-SAME: (%[[ARG0:.*]]: i32) -> i32
+func @test_wrap(%arg0: i32) -> i32 {
+ // WRAP: %[[V:.*]] = iree_linalg_transform.util.scope(%[[ARG0]], %[[ARG0]]) {
+ // WRAP-NEXT: ^[[B:.*]](%[[ARG1:.*]]: i32, %[[ARG2:.*]]: i32):
+ // WRAP-NEXT: %[[ADD:.*]] = arith.addi %[[ARG2]], %[[ARG2]]
+ // WRAP-NEXT: iree_linalg_transform.util.forward %[[ADD]]
+ // WRAP-NEXT: } : (i32, i32) -> i32
+ %0 = arith.addi %arg0, %arg0 : i32
+ // WRAP: return %[[V]]
+ return %0 : i32
+}
+
+// UNWRAP-LABEL: @test_unwrap
+// UNWRAP-SAME: (%[[ARG0:.*]]: i32) -> (i32, i32)
+func @test_unwrap(%arg0: i32) -> (i32, i32) {
+ // UNWRAP: %[[V0:.*]] = arith.addi %[[ARG0]], %[[ARG0]]
+ // UNWRAP-NEXT: %[[V1:.*]] = arith.addi %[[V0]], %[[ARG0]]
+ %0:2 = iree_linalg_transform.util.scope(%arg0) {
+ ^bb0(%arg1: i32):
+ %1 = arith.addi %arg1, %arg1 : i32
+ %2 = arith.addi %1, %arg1 : i32
+ iree_linalg_transform.util.forward %1, %2 : i32, i32
+ } : (i32) -> (i32, i32)
+ // UNWRAP-NEXT: return %[[V0]], %[[V1]]
+ return %0#0, %0#1 : i32, i32
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir
new file mode 100644
index 0000000..fdcd2f9
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir
@@ -0,0 +1,134 @@
+// RUN: iree-dialects-opt %s -linalg-interp-transforms -split-input-file | FileCheck %s
+
+// CHECK-LABEL: func @matmul_tensors(
+func @matmul_tensors(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>,
+ %arg3: tensor<128x128xf32>, %arg4: tensor<128x128xf32>, %arg5: tensor<128x128xf32>,
+ %arg6: tensor<128x128xf32> {linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ // This operation is marked for tiling only.
+ // CHECK-COUNT-3: scf.for
+ // CHECK-COUNT-3: tensor.extract_slice
+ // CHECK: linalg.matmul
+ // CHECK-SAME: -> tensor<4x4xf32>
+ %0 = linalg.matmul { test.attrA}
+ ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+
+ // This operation is marked for tiling and vectorization.
+ // Note that the loop-invariant read is hoisted out of the innermost loop.
+ // CHECK: scf.for
+ // CHECK: scf.for
+ // CHECK: vector.transfer_read
+ // CHECK: scf.for
+ // CHECK: vector.transfer_read
+ // CHECK: vector.transfer_read
+ // CHECK: vector.contract
+ // CHECK-NOT: linalg.matmul
+ // CHECK: vector.transfer_write
+ %1 = linalg.matmul { test.attrA, test.attrC}
+ ins(%arg3, %arg4: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg5: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+
+ // This operation is marked for vectorization only.
+ // CHECK-NOT: scf.for
+ // CHECK-COUNT-3: vector.transfer_read
+ // CHECK: vector.contract
+ // CHECK-SAME: into vector<128x128xf32>
+ // CHECK: vector.transfer_write
+ %2 = linalg.matmul { test.attrC}
+ ins(%0, %1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg6: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+
+ return %2 : tensor<128x128xf32>
+}
+
+// Match matmul operations inside @matmul_tensors with test.attrA set.
+pdl.pattern @pdl_target_attrA : benefit(1) {
+ %args = operands
+ %results = types
+ %attr = attribute
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) {"test.attrA" = %attr}-> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@matmul_tensors](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+// Match matmul operations inside @matmul_tensors with test.attrC set.
+pdl.pattern @pdl_target_attrC : benefit(1) {
+ %args = operands
+ %results = types
+ %attr = attribute
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) {"test.attrC" = %attr}-> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@matmul_tensors](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target_attrA
+ tile %0 {sizes = [4, 4, 4]}
+ %1 = match @pdl_target_attrC
+ vectorize %1
+}
+
+// -----
+
+// CHECK-LABEL: @vectorize_one
+func @vectorize_one(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>,
+ %arg3: tensor<128x128xf32> {linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ // CHECK: vector.contract
+ %0 = linalg.matmul {test.attrA}
+ ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+ // CHECK: linalg.matmul
+ %1 = linalg.matmul ins(%arg0, %0: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg3: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+ return %1 : tensor<128x128xf32>
+}
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %attr = attribute
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) {"test.attrA" = %attr}-> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@vectorize_one](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ vectorize %0
+}
+
+
+// -----
+
+// CHECK-LABEL: @vectorize_all
+func @vectorize_all(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>,
+ %arg3: tensor<128x128xf32> {linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ // CHECK: vector.contract
+ %0 = linalg.matmul {test.attrA}
+ ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+ // CHECK: vector.contract
+ %1 = linalg.matmul ins(%arg0, %0: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg3: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+ return %1 : tensor<128x128xf32>
+}
+
+iree_linalg_transform.sequence {
+ vectorize
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir
new file mode 100644
index 0000000..adffa86
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir
@@ -0,0 +1,33 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms %s | FileCheck %s
+
+// CHECK-LABEL: func @matmul_tensors
+// CHECK-NOT: linalg
+// CHECK: llvm
+func @matmul_tensors(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+
+ return %0 : tensor<128x128xf32>
+}
+
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = pdl.operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@matmul_tensors](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ %1 = tile %0 {sizes = [4, 4, 4]}
+ %2 = vectorize %1 {vectorize_padding = true}
+ bufferize
+ lower_vectors { multireduction_lowering = "innerreduce"}
+ lower_to_llvm
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir
new file mode 100644
index 0000000..88286aa
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir
@@ -0,0 +1,72 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms -split-input-file %s | FileCheck %s
+
+#map0 = affine_map<(d0, d1, d2) -> (d0, d2)>
+#map1 = affine_map<(d0, d1, d2) -> (d2, d1)>
+#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
+
+// Check that vectorization applies after interchange+tiling.
+
+// CHECK-LABEL: @matmul_021
+// CHECK-NOT: linalg.generic
+// CHECK: vector.contract
+func public @matmul_021(%arg0: tensor<39x154xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<154x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<39x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<39x5xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
+ %0 = linalg.generic {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<39x154xf32>, tensor<154x5xf32>) outs(%arg2 : tensor<39x5xf32>) {
+ ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
+ %1 = arith.mulf %arg3, %arg4 : f32
+ %2 = arith.addf %arg5, %1 : f32
+ linalg.yield %2 : f32
+ } -> tensor<39x5xf32>
+ return %0 : tensor<39x5xf32>
+}
+
+pdl.pattern @target_pattern : benefit(1) {
+ %0 = operands
+ %1 = types
+ %2 = operation "linalg.generic"(%0 : !pdl.range<value>) -> (%1 : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc" [@matmul_021](%2 : !pdl.operation)
+ rewrite %2 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @target_pattern
+ %1 = tile %0 {interchange = [0, 2, 1], sizes = [3, 5, 14]}
+ %2 = tile %1 {sizes = [3, 5, 2]}
+ %3 = vectorize %2 {vectorize_padding = true}
+}
+
+
+// -----
+
+#map0 = affine_map<(d0, d1, d2) -> (d0, d2)>
+#map1 = affine_map<(d0, d1, d2) -> (d2, d1)>
+#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
+
+// Check that vectorization applies after interchange+tiling.
+
+// CHECK-LABEL: @matmul_210
+// CHECK-NOT: linalg.generic
+// CHECK: vector.contract
+func public @matmul_210(%arg0: tensor<39x154xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<154x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<39x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<39x5xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
+ %0 = linalg.generic {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<39x154xf32>, tensor<154x5xf32>) outs(%arg2 : tensor<39x5xf32>) {
+ ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
+ %1 = arith.mulf %arg3, %arg4 : f32
+ %2 = arith.addf %arg5, %1 : f32
+ linalg.yield %2 : f32
+ } -> tensor<39x5xf32>
+ return %0 : tensor<39x5xf32>
+}
+
+pdl.pattern @target_pattern : benefit(1) {
+ %0 = operands
+ %1 = types
+ %2 = operation "linalg.generic"(%0 : !pdl.range<value>) -> (%1 : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc" [@matmul_210](%2 : !pdl.operation)
+ rewrite %2 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @target_pattern
+ %1 = tile %0 {interchange = [2, 1, 0], sizes = [3, 5, 14]}
+ %2 = tile %1 {sizes = [3, 5, 2]}
+ %3 = vectorize %2 {vectorize_padding = true}
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile.mlir
new file mode 100644
index 0000000..ba94d44
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile.mlir
@@ -0,0 +1,44 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms %s | FileCheck %s
+
+// CHECK-LABEL: func @matmul_tensors(
+// CHECK-SAME: %[[TA:[0-9a-z]+]]: tensor<128x128xf32>
+// CHECK-SAME: %[[TB:[0-9a-z]+]]: tensor<128x128xf32>
+// CHECK-SAME: %[[TC:[0-9a-z]+]]: tensor<128x128xf32>
+// CHECK-SAME: -> tensor<128x128xf32> {
+func @matmul_tensors(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+// CHECK: %[[TD0:.*]] = scf.for {{.*}} to {{.*}} step {{.*}} iter_args(%[[TC0:.*]] = %[[TC]]) -> (tensor<128x128xf32>) {
+// CHECK: %[[TD1:.*]] = scf.for {{.*}} to {{.*}} step {{.*}} iter_args(%[[TC1:.*]] = %[[TC0]]) -> (tensor<128x128xf32>) {
+// CHECK: %[[TD2:.*]] = scf.for {{.*}} to {{.*}} step {{.*}} iter_args(%[[TC2:.*]] = %[[TC1]]) -> (tensor<128x128xf32>) {
+// CHECK: %[[sTA:.*]] = tensor.extract_slice %[[TA]][{{.*}}] : tensor<128x128xf32> to tensor<4x4xf32>
+// CHECK: %[[sTB:.*]] = tensor.extract_slice %[[TB]][{{.*}}] : tensor<128x128xf32> to tensor<4x4xf32>
+// CHECK: %[[sTC:.*]] = tensor.extract_slice %[[TC2]][{{.*}}] : tensor<128x128xf32> to tensor<4x4xf32>
+// CHECK: %[[sTD:.*]] = linalg.matmul {{.*}} ins(%[[sTA]], %[[sTB]] : tensor<4x4xf32>, tensor<4x4xf32>)
+// CHECK-SAME: outs(%[[sTC]] : tensor<4x4xf32>) -> tensor<4x4xf32>
+// CHECK: %[[TD:.*]] = tensor.insert_slice %[[sTD]] into %[[TC2]][{{.*}}] : tensor<4x4xf32> into tensor<128x128xf32>
+// CHECK: scf.yield %[[TD]] : tensor<128x128xf32>
+// CHECK: scf.yield %[[TD2]] : tensor<128x128xf32>
+// CHECK: scf.yield %[[TD1]] : tensor<128x128xf32>
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+
+// CHECK: return %[[TD0]] : tensor<128x128xf32>
+ return %0 : tensor<128x128xf32>
+}
+
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@matmul_tensors](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ tile %0 {sizes = [4, 4, 4]}
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/vectorize-transforms.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/vectorize-transforms.mlir
new file mode 100644
index 0000000..60864ee
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/vectorize-transforms.mlir
@@ -0,0 +1,16 @@
+// This test only checks the content of the file parses.
+// RUN: iree-dialects-opt %s
+
+pdl.pattern @pdl_target : benefit(1) {
+ %args = operands
+ %results = types
+ %0 = operation "linalg.matmul"(%args : !pdl.range<value>) -> (%results : !pdl.range<type>)
+ apply_native_constraint "nestedInFunc"[@matmul_tensors](%0 : !pdl.operation)
+ // TODO: we don't want this, but it is the required terminator for pdl.pattern
+ rewrite %0 with "iree_linalg_transform.apply"
+}
+
+iree_linalg_transform.sequence {
+ %0 = match @pdl_target
+ vectorize %0 {vectorize_padding = true}
+}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir
new file mode 100644
index 0000000..303ff83
--- /dev/null
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir
@@ -0,0 +1,21 @@
+// RUN: iree-dialects-opt -linalg-interp-transforms -linalg-transform-file-name=%p/vectorize-transforms.mlir %s | FileCheck %s
+
+// CHECK-LABEL: func @matmul_tensors(
+// CHECK-SAME: %[[TA:[0-9a-z]+]]: tensor<128x128xf32>
+// CHECK-SAME: %[[TB:[0-9a-z]+]]: tensor<128x128xf32>
+// CHECK-SAME: %[[TC:[0-9a-z]+]]: tensor<128x128xf32>
+// CHECK-SAME: -> tensor<128x128xf32> {
+func @matmul_tensors(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
+ -> tensor<128x128xf32> {
+ // CHECK: %[[VA:.*]] = vector.transfer_read %[[TA]]
+ // CHECK: %[[VB:.*]] = vector.transfer_read %[[TB]]
+ // CHECK: %[[VC:.*]] = vector.transfer_read %[[TC]]
+ // CHECK: %[[VCU:.*]] = vector.contract {{.*}} %[[VA]], %[[VB]], %[[VC]]
+ // CHECK: vector.transfer_write %[[VCU]], %[[TC]]
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32>
+
+ return %0 : tensor<128x128xf32>
+}