Remove linalg.softmax related tests from LinalgExt. (#15897)
The op is upstreamed and they are all tested upstream (through
structured op interface). The revision deletes the tests to reduce
maintenance burden.
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir
index 4653251..5c0e0a5 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir
@@ -723,15 +723,6 @@
// -----
-func.func @illegal_softmax_output_shape(%arg0: tensor<2x16x32xf32>) -> tensor<2x16xf32> {
- %0 = tensor.empty() : tensor<2x16xf32>
- // expected-error @+1 {{incompatible output shape}}
- %1 = linalg.softmax dimension(2) ins(%arg0 : tensor<2x16x32xf32>) outs(%0: tensor<2x16xf32>) -> tensor<2x16xf32>
- return %1 : tensor<2x16xf32>
-}
-
-// -----
-
func.func @illegal_attention_inputs(%query: tensor<6x12x20x8xf32>, %key: tensor<6x12x20x8xf32>, %value: tensor<6x12x20x8xf32>) {
%0 = tensor.empty() : tensor<6x12x20x8xf32>
// expected-error @+1 {{'iree_linalg_ext.attention' op expected query to have rank 3 but found 4}}
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir
index d9e627b..7a3309e 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir
@@ -1081,20 +1081,6 @@
// -----
-func.func @softmax(%arg0: tensor<2x16x32xf32>) -> tensor<2x16x32xf32> {
- %0 = tensor.empty() : tensor<2x16x32xf32>
- %1 = linalg.softmax dimension(2) ins(%arg0 : tensor<2x16x32xf32>) outs(%0: tensor<2x16x32xf32>) -> tensor<2x16x32xf32>
- return %1 : tensor<2x16x32xf32>
-}
-// CHECK: func.func @softmax(%[[ARG0:[a-zA-Z0-9_]+]]: tensor<2x16x32xf32>) -> tensor<2x16x32xf32> {
-// CHECK: %[[D0:.+]] = tensor.empty() : tensor<2x16x32xf32>
-// CHECK: %[[D1:.+]] = linalg.softmax dimension(2) ins(%[[ARG0]] : tensor<2x16x32xf32>) outs(%[[D0]] :
-// CHECK-SAME: tensor<2x16x32xf32>) -> tensor<2x16x32xf32>
-// CHECK: return %[[D1]] : tensor<2x16x32xf32>
-// CHECK: }
-
-// -----
-
func.func @attention(%query: tensor<192x1024x64xf32>, %key: tensor<192x1024x64xf32>, %value: tensor<192x1024x64xf32>) -> tensor<192x1024x64xf32> {
%0 = tensor.empty() : tensor<192x1024x64xf32>
%1 = iree_linalg_ext.attention ins(%query, %key, %value : tensor<192x1024x64xf32>, tensor<192x1024x64xf32>, tensor<192x1024x64xf32>) outs(%0 : tensor<192x1024x64xf32>) -> tensor<192x1024x64xf32>
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir
index 1aa7269..f86e5a3 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir
@@ -1154,95 +1154,6 @@
// -----
-func.func @softmax(%arg0: tensor<16x64x256xf32>) -> tensor<16x64x256xf32> {
- %0 = tensor.empty() : tensor<16x64x256xf32>
- %1 = linalg.softmax {__internal_linalg_transform__ = "distribute_input"}
- dimension(1) ins(%arg0 : tensor<16x64x256xf32>) outs(%0 : tensor<16x64x256xf32>) -> tensor<16x64x256xf32>
- return %1 : tensor<16x64x256xf32>
-}
-// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0] -> (s0 * 10)>
-// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
-// CHECK-DAG: #[[MAP2:.+]] = affine_map<()[s0] -> (s0 * 30)>
-// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0)[s0, s1] -> (30, -d0 + s1)>
-// CHECK: func.func @softmax(%[[ARG0:[a-zA-Z0-9_]+]]: tensor<16x64x256xf32>) -> tensor<16x64x256xf32> {
-// CHECK-DAG: %[[C30:.+]] = arith.constant 30 : index
-// CHECK-DAG: %[[C256:.+]] = arith.constant 256 : index
-// CHECK-DAG: %[[C10:.+]] = arith.constant 10 : index
-// CHECK-DAG: %[[C16:.+]] = arith.constant 16 : index
-// CHECK-DAG: %[[D0:.+]] = tensor.empty() : tensor<16x64x256xf32>
-// CHECK-DAG: %[[D1:.+]] = iree_input.dispatch.workgroup.id[0] : index
-// CHECK-DAG: %[[D2:.+]] = iree_input.dispatch.workgroup.count[0] : index
-// CHECK-DAG: %[[D3:.+]] = iree_input.dispatch.workgroup.id[1] : index
-// CHECK-DAG: %[[D4:.+]] = iree_input.dispatch.workgroup.count[1] : index
-// CHECK-DAG: %[[D5:.+]] = affine.apply #[[MAP]]()[%[[D3]]]
-// CHECK-DAG: %[[D6:.+]] = affine.apply #[[MAP]]()[%[[D4]]]
-// CHECK: %[[D7:.+]] = scf.for %[[ARG1:[a-zA-Z0-9_]+]] = %[[D5]] to %[[C16]] step %[[D6]]
-// CHECK-SAME: iter_args(%[[ARG2:[a-zA-Z0-9_]+]] = %[[D0]]) -> (tensor<16x64x256xf32>) {
-// CHECK-DAG: %[[D8:.+]] = affine.min #[[MAP1]](%[[ARG1]])[%[[C10]], %[[C16]]]
-// CHECK-DAG: %[[D9:.+]] = affine.apply #[[MAP2]]()[%[[D1]]]
-// CHECK-DAG: %[[D10:.+]] = affine.apply #[[MAP2]]()[%[[D2]]]
-// CHECK: %[[D11:.+]] = scf.for %[[ARG3:[a-zA-Z0-9_]+]] = %[[D9]] to %[[C256]] step %[[D10]]
-// CHECK-SAME: iter_args(%[[ARG4:[a-zA-Z0-9_]+]] = %[[ARG2]]) -> (tensor<16x64x256xf32>) {
-// CHECK-DAG: %[[D12:.+]] = affine.min #[[MAP3]](%[[ARG3]])[%[[C30]], %[[C256]]]
-// CHECK: %[[EXTRACTED_SLICE:.+]] = tensor.extract_slice %[[ARG0]][%[[ARG1]], 0, %[[ARG3]]] [%[[D8]],
-// CHECK-SAME: 64, %[[D12]]] [1, 1, 1] : tensor<16x64x256xf32> to tensor<?x64x?xf32>
-// CHECK: %[[EXTRACTED_SLICE_0:.+]] = tensor.extract_slice %[[D0]][%[[ARG1]], 0, %[[ARG3]]] [%[[D8]],
-// CHECK-SAME: 64, %[[D12]]] [1, 1, 1] : tensor<16x64x256xf32> to tensor<?x64x?xf32>
-// CHECK: %[[D13:.+]] = linalg.softmax {__internal_linalg_transform__ = "distribute_output"}
-// CHECK-SAME: dimension(1) ins(%[[EXTRACTED_SLICE]] : tensor<?x64x?xf32>) outs(%[[EXTRACTED_SLICE_0]] :
-// CHECK-SAME: tensor<?x64x?xf32>) -> tensor<?x64x?xf32>
-// CHECK: %[[INSERTED_SLICE:.+]] = tensor.insert_slice %[[D13]] into %[[ARG4]][%[[ARG1]], 0, %[[ARG3]]]
-// CHECK-SAME: [%[[D8]], 64, %[[D12]]] [1, 1, 1] : tensor<?x64x?xf32> into tensor<16x64x256xf32>
-// CHECK: scf.yield %[[INSERTED_SLICE]] : tensor<16x64x256xf32>
-// CHECK: }
-// CHECK: scf.yield %[[D11]] : tensor<16x64x256xf32>
-// CHECK: }
-// CHECK: return %[[D7]] : tensor<16x64x256xf32>
-// CHECK: }
-
-// -----
-
-func.func @softmax_memref(%arg0: memref<16x64x256xf32>, %arg1: memref<16x64x256xf32>) {
- linalg.softmax {__internal_linalg_transform__ = "distribute_input"}
- dimension(1) ins(%arg0 : memref<16x64x256xf32>) outs(%arg1 : memref<16x64x256xf32>)
- return
-}
-// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0] -> (s0 * 10)>
-// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
-// CHECK-DAG: #[[MAP2:.+]] = affine_map<()[s0] -> (s0 * 30)>
-// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0)[s0, s1] -> (30, -d0 + s1)>
-// CHECK: func.func @softmax_memref(%[[ARG0:[a-zA-Z0-9_]+]]: memref<16x64x256xf32>, %[[ARG1:[a-zA-Z0-9_]+]]:
-// CHECK-SAME: memref<16x64x256xf32>) {
-// CHECK-DAG: %[[C30:.+]] = arith.constant 30 : index
-// CHECK-DAG: %[[C256:.+]] = arith.constant 256 : index
-// CHECK-DAG: %[[C10:.+]] = arith.constant 10 : index
-// CHECK-DAG: %[[C16:.+]] = arith.constant 16 : index
-// CHECK: %[[D0:.+]] = iree_input.dispatch.workgroup.id[0] : index
-// CHECK: %[[D1:.+]] = iree_input.dispatch.workgroup.count[0] : index
-// CHECK: %[[D2:.+]] = iree_input.dispatch.workgroup.id[1] : index
-// CHECK: %[[D3:.+]] = iree_input.dispatch.workgroup.count[1] : index
-// CHECK-DAG: %[[D4:.+]] = affine.apply #[[MAP]]()[%[[D2]]]
-// CHECK-DAG: %[[D5:.+]] = affine.apply #[[MAP]]()[%[[D3]]]
-// CHECK: scf.for %[[ARG2:[a-zA-Z0-9_]+]] = %[[D4]] to %[[C16]] step %[[D5]] {
-// CHECK-DAG: %[[D6:.+]] = affine.min #[[MAP1]](%[[ARG2]])[%[[C10]], %[[C16]]]
-// CHECK-DAG: %[[D7:.+]] = affine.apply #[[MAP2]]()[%[[D0]]]
-// CHECK-DAG: %[[D8:.+]] = affine.apply #[[MAP2]]()[%[[D1]]]
-// CHECK: scf.for %[[ARG3:[a-zA-Z0-9_]+]] = %[[D7]] to %[[C256]] step %[[D8]] {
-// CHECK-DAG: %[[D9:.+]] = affine.min #[[MAP3]](%[[ARG3]])[%[[C30]], %[[C256]]]
-// CHECK: %[[SUBVIEW:.+]] = memref.subview %[[ARG0]][%[[ARG2]], 0, %[[ARG3]]] [%[[D6]], 64, %[[D9]]]
-// CHECK-SAME: [1, 1, 1] : memref<16x64x256xf32> to memref<?x64x?xf32, strided<[16384, 256, 1], offset: ?>>
-// CHECK: %[[SUBVIEW_0:.+]] = memref.subview %[[ARG1]][%[[ARG2]], 0, %[[ARG3]]] [%[[D6]], 64, %[[D9]]]
-// CHECK-SAME: [1, 1, 1] : memref<16x64x256xf32> to memref<?x64x?xf32, strided<[16384, 256, 1], offset: ?>>
-// CHECK: linalg.softmax {__internal_linalg_transform__ = "distribute_output"} dimension(1)
-// CHECK-SAME: ins(%[[SUBVIEW]] : memref<?x64x?xf32, strided<[16384, 256, 1], offset: ?>>) outs(%[[SUBVIEW_0]] :
-// CHECK-SAME: memref<?x64x?xf32, strided<[16384, 256, 1], offset: ?>>)
-// CHECK: }
-// CHECK: }
-// CHECK: return
-// CHECK: }
-
-// -----
-
func.func @attention(%query: tensor<192x1024x64xf32>, %key: tensor<192x1024x64xf32>, %value: tensor<192x1024x64xf32>) -> tensor<192x1024x64xf32> {
%0 = tensor.empty() : tensor<192x1024x64xf32>
%1 = iree_linalg_ext.attention {__internal_linalg_transform__ = "tiling_attention"} ins(%query, %key, %value : tensor<192x1024x64xf32>, tensor<192x1024x64xf32>, tensor<192x1024x64xf32>) outs(%0 : tensor<192x1024x64xf32>) -> tensor<192x1024x64xf32>