Integrate llvm-project to 25cd6fba983a and bump dependencies. (#9094)
* llvm-project: 25cd6fba983a
* mlir-hlo: 900a4cda2b27b5103587d08d3bf8b9cc4a1b8fa6
* tensorflow: cc0f90465bbc60243fb00f7d96b931f2e33000bd
diff --git a/compiler/src/iree/compiler/API/python/test/tools/compiler_core_test.py b/compiler/src/iree/compiler/API/python/test/tools/compiler_core_test.py
index 9fff6a9..be89284 100644
--- a/compiler/src/iree/compiler/API/python/test/tools/compiler_core_test.py
+++ b/compiler/src/iree/compiler/API/python/test/tools/compiler_core_test.py
@@ -14,7 +14,7 @@
import iree.compiler.tools
SIMPLE_MUL_ASM = """
-func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
+func.func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
%0 = "mhlo.multiply"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
return %0 : tensor<4xf32>
}
diff --git a/compiler/src/iree/compiler/API/test/compile-mhlo-test-main.c b/compiler/src/iree/compiler/API/test/compile-mhlo-test-main.c
index dd44804..55f8ffb 100644
--- a/compiler/src/iree/compiler/API/test/compile-mhlo-test-main.c
+++ b/compiler/src/iree/compiler/API/test/compile-mhlo-test-main.c
@@ -112,7 +112,7 @@
int main(int argc, char** argv) {
// MLIR code that we will compile
iree_string_view_t mlir_code = iree_make_cstring_view(
- "func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> "
+ "func.func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> "
"tensor<4xf32>\n"
" {\n"
" %0 = \"mhlo.multiply\"(%arg0, %arg1) : "
diff --git a/compiler/src/iree/compiler/Bindings/Native/Transforms/test/wrap_entry_points.mlir b/compiler/src/iree/compiler/Bindings/Native/Transforms/test/wrap_entry_points.mlir
index a7777cc..b4ae7d0 100644
--- a/compiler/src/iree/compiler/Bindings/Native/Transforms/test/wrap_entry_points.mlir
+++ b/compiler/src/iree/compiler/Bindings/Native/Transforms/test/wrap_entry_points.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --iree-abi-wrap-entry-points --split-input-file %s | FileCheck %s
-// CHECK-LABEL: func @dynamicEntry(
+// CHECK-LABEL: func.func @dynamicEntry(
// CHECK-SAME: %[[ARG0:.+]]: !hal.buffer_view, %[[ARG1:.+]]: !hal.buffer_view
// CHECK-SAME: -> (
// CHECK-SAME: !hal.buffer_view, !hal.buffer_view
@@ -19,7 +19,7 @@
// CHECK-NEXT: return %[[RET0_VIEW]], %[[RET1_VIEW]] : !hal.buffer_view, !hal.buffer_view
// CHECK-NEXT: }
-// CHECK-LABEL: func private @_dynamicEntry(
+// CHECK-LABEL: func.func private @_dynamicEntry(
func.func @dynamicEntry(%arg0: tensor<?x8x8x3xf32>, %arg1: tensor<?x8x8x3xf32>) ->
(tensor<?x8x8x3xf32>, tensor<?x8x8x3xf32>) {
%0 = "mhlo.add"(%arg0, %arg1) : (tensor<?x8x8x3xf32>, tensor<?x8x8x3xf32>) -> tensor<?x8x8x3xf32>
@@ -29,7 +29,7 @@
// -----
-// CHECK-LABEL: func @outputStorage(
+// CHECK-LABEL: func.func @outputStorage(
// CHECK-SAME: %[[ARG0:.+]]: !hal.buffer_view,
// CHECK-SAME: %[[RET1_STORAGE:.+]]: !hal.buffer
// CHECK-SAME: -> (
@@ -47,7 +47,7 @@
// CHECK-NEXT: return %[[RET0_VIEW]], %[[RET1_VIEW]] : !hal.buffer_view, !hal.buffer_view
// CHECK-NEXT: }
-// CHECK-LABEL: func private @_outputStorage(
+// CHECK-LABEL: func.func private @_outputStorage(
func.func @outputStorage(%arg0: tensor<?x8x8x3xf32>, %ret1: !hal.buffer {iree.abi.output = 1 : index}) ->
(tensor<?x8x8x3xf32>, tensor<?x8x8x3xf32>) {
%0 = "mhlo.add"(%arg0, %arg0) : (tensor<?x8x8x3xf32>, tensor<?x8x8x3xf32>) -> tensor<?x8x8x3xf32>
@@ -57,30 +57,30 @@
// -----
-// CHECK-LABEL: func @wrappedAlready
+// CHECK-LABEL: func.func @wrappedAlready
// CHECK-SAME: (%arg0: !hal.buffer_view) -> !hal.buffer_view
// CHECK-SAME: attributes {iree.abi.stub}
func.func @wrappedAlready(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
return %arg0 : !hal.buffer_view
}
-// CHECK-NOT: func @_wrappedAlready
+// CHECK-NOT: func.func @_wrappedAlready
// -----
// Tests that a function calling an exported function is redirected to the
// original unwrapped call.
-// CHECK: func @exportA(%arg0: !hal.buffer_view) -> !hal.buffer_view
+// CHECK: func.func @exportA(%arg0: !hal.buffer_view) -> !hal.buffer_view
// CHECK: call @_exportA
-// CHECK: func private @_exportA(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32>
+// CHECK: func.func private @_exportA(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32>
// CHECK: return %arg0
func.func @exportA(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
return %arg0 : tensor<?x?xi32>
}
-// CHECK: func @exportB(%arg0: !hal.buffer_view) -> !hal.buffer_view
+// CHECK: func.func @exportB(%arg0: !hal.buffer_view) -> !hal.buffer_view
// CHECK: call @_exportB
-// CHECK: func private @_exportB(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32>
+// CHECK: func.func private @_exportB(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32>
// CHECK: call @_exportA
func.func @exportB(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
%0 = call @exportA(%arg0) : (tensor<?x?xi32>) -> tensor<?x?xi32>
diff --git a/compiler/src/iree/compiler/Bindings/TFLite/Transforms/test/wrap_entry_points.mlir b/compiler/src/iree/compiler/Bindings/TFLite/Transforms/test/wrap_entry_points.mlir
index e5464f8..fa33748 100644
--- a/compiler/src/iree/compiler/Bindings/TFLite/Transforms/test/wrap_entry_points.mlir
+++ b/compiler/src/iree/compiler/Bindings/TFLite/Transforms/test/wrap_entry_points.mlir
@@ -14,7 +14,7 @@
-// CHECK-LABEL: func private @_tflite_dynamicEntry_calculate_shapes() {
+// CHECK-LABEL: func.func private @_tflite_dynamicEntry_calculate_shapes() {
// Only recalculate shapes if the shapes are dirty.
// CHECK: %[[IS_DIRTY:.+]] = util.global.load @_tflite_dynamicEntry_shapes_dirty : i1
@@ -52,7 +52,7 @@
-// CHECK-LABEL: func @_tflite_dynamicEntry_query_input_shape
+// CHECK-LABEL: func.func @_tflite_dynamicEntry_query_input_shape
// CHECK-SAME: (%[[INDEX:.+]]: index, %[[LIST:.+]]: !util.list<index>)
// Query input0 shape:
@@ -87,7 +87,7 @@
-// CHECK-LABEL: func @_tflite_dynamicEntry_resize_input_shape
+// CHECK-LABEL: func.func @_tflite_dynamicEntry_resize_input_shape
// CHECK-SAME: (%[[INDEX:.+]]: index, %[[LIST:.+]]: !util.list<index>)
// CHECK: %[[IS_0:.+]] = arith.cmpi eq, %[[INDEX]], %c0 : index
@@ -113,7 +113,7 @@
-// CHECK-LABEL: func @_tflite_dynamicEntry_query_output_shape
+// CHECK-LABEL: func.func @_tflite_dynamicEntry_query_output_shape
// CHECK-SAME: (%[[INDEX:.+]]: index, %[[LIST:.+]]: !util.list<index>)
// Recalculate shapes, if needed.
@@ -150,7 +150,7 @@
-// CHECK-LABEL: func @_tflite_main(
+// CHECK-LABEL: func.func @_tflite_main(
// CHECK-SAME: %[[IN0_BUFFER:.+]]: !hal.buffer {iree.identifier = "input0"},
// CHECK-SAME: %[[IN1_BUFFER:.+]]: !hal.buffer {iree.identifier = "input1"})
// CHECK-SAME: -> (
@@ -192,7 +192,7 @@
-// CHECK-LABEL: func private @dynamicEntry(
+// CHECK-LABEL: func.func private @dynamicEntry(
func.func @dynamicEntry(
%arg0: tensor<?x8x8x3xf32> {iree.identifier = "input0"},
%arg1: tensor<?x8x8x3xf32> {iree.identifier = "input1"}
@@ -210,7 +210,7 @@
// -----
-// CHECK-LABEL: func @_tflite_main(
+// CHECK-LABEL: func.func @_tflite_main(
// CHECK-SAME: %[[IN0_BUFFER:.+]]: !hal.buffer,
// CHECK-SAME: %[[IN1_BUFFER:.+]]: !hal.buffer)
// CHECK-SAME: -> (
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/bufferize_copy_only_dispatches.mlir b/compiler/src/iree/compiler/Codegen/Common/test/bufferize_copy_only_dispatches.mlir
index 49d683b..c59185e 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/bufferize_copy_only_dispatches.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/bufferize_copy_only_dispatches.mlir
@@ -28,7 +28,7 @@
return
}
}
-// CHECK: func @tensor_insert_slice()
+// CHECK: func.func @tensor_insert_slice()
// CHECK-DAG: %[[SLICE_SIZE:.+]] = hal.interface.constant.load[0]
// CHECK-DAG: %[[DEST_OFFSET_Y:.+]] = hal.interface.constant.load[1]
// CHECK-DAG: %[[DEST_OFFSET_X:.+]] = hal.interface.constant.load[2]
@@ -58,7 +58,7 @@
return
}
}
-// CHECK-LABEL: func @UpSampling1D()
+// CHECK-LABEL: func.func @UpSampling1D()
// CHECK-DAG: %[[DEST:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[SOURCE:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK-DAG: %[[SOURCE_SUBVIEW:.+]] = memref.subview %[[SOURCE]][0, 0, 0] [2, 1, 3]
@@ -78,7 +78,7 @@
return
}
}
-// CHECK-LABEL: func @concatenate_cst()
+// CHECK-LABEL: func.func @concatenate_cst()
// CHECK-DAG: %[[CST:.+]] = arith.constant dense<0> : tensor<2x3xi32>
// CHECK-DAG: %[[ZERO:.+]] = bufferization.to_memref %[[CST]] : memref<2x3xi32>
// CHECK-DAG: %[[DEST_BINDING:.+]] = hal.interface.binding.subspan
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/canonicalize_interface_load_store.mlir b/compiler/src/iree/compiler/Codegen/Common/test/canonicalize_interface_load_store.mlir
index ab32721..a8a2512 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/canonicalize_interface_load_store.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/canonicalize_interface_load_store.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --split-input-file --iree-codegen-cleanup-buffer-alloc-view %s | FileCheck %s
-// CHECK-LABEL: func @fold_reshape()
+// CHECK-LABEL: func.func @fold_reshape()
func.func @fold_reshape() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -18,7 +18,7 @@
// -----
-// CHECK-LABEL: func @dont_fold_reshape_with_not_full_load()
+// CHECK-LABEL: func.func @dont_fold_reshape_with_not_full_load()
func.func @dont_fold_reshape_with_not_full_load() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -37,7 +37,7 @@
// -----
-// CHECK-LABEL: func @dont_fold_dynamic_reshape()
+// CHECK-LABEL: func.func @dont_fold_dynamic_reshape()
func.func @dont_fold_dynamic_reshape() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/convert_to_destination_passing_style.mlir b/compiler/src/iree/compiler/Codegen/Common/test/convert_to_destination_passing_style.mlir
index 7250857..629b35b 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/convert_to_destination_passing_style.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/convert_to_destination_passing_style.mlir
@@ -31,7 +31,7 @@
}
return
}
-// CHECK: func @matmul()
+// CHECK: func.func @matmul()
// CHECK-DAG: %[[LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[INIT:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -81,7 +81,7 @@
}
return
}
-// CHECK: func @matmul_fill()
+// CHECK: func.func @matmul_fill()
// CHECK-DAG: %[[LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RESULT:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -130,7 +130,7 @@
}
return
}
-// CHECK: func @matmul_inplace()
+// CHECK: func.func @matmul_inplace()
// CHECK-DAG: %[[LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RESULT:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -159,7 +159,7 @@
flow.dispatch.tensor.store %3, %1, offsets = [0, 0], sizes = [3, 4], strides = [1, 1] : tensor<3x4xi32> -> !flow.dispatch.tensor<writeonly:3x4xi32>
return
}
-// CHECK: func @reshape_simple()
+// CHECK: func.func @reshape_simple()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: %[[SOURCE:.+]] = flow.dispatch.tensor.load %[[ARG0]]
@@ -190,7 +190,7 @@
flow.dispatch.tensor.store %5, %1, offsets = [0, 0], sizes = [3, 4], strides = [1, 1] : tensor<3x4xi32> -> !flow.dispatch.tensor<writeonly:3x4xi32>
return
}
-// CHECK: func @reshape_fused_source()
+// CHECK: func.func @reshape_fused_source()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: %[[TARGET:.+]] = flow.dispatch.tensor.load %[[RET0]]
@@ -227,7 +227,7 @@
flow.dispatch.tensor.store %4, %2, offsets = [0, 0], sizes = [3, 4], strides = [1, 1] : tensor<3x4xi32> -> !flow.dispatch.tensor<writeonly:3x4xi32>
return
}
-// CHECK: func @reshape_fused_source_and_copyout()
+// CHECK: func.func @reshape_fused_source_and_copyout()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RET1:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -264,7 +264,7 @@
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [12], strides = [1] : tensor<12xi32> -> !flow.dispatch.tensor<writeonly:12xi32>
return
}
-// CHECK: func @reshape_fused_target()
+// CHECK: func.func @reshape_fused_target()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[SOURCE:.+]] = flow.dispatch.tensor.load %[[ARG0]]
@@ -313,7 +313,7 @@
}
return
}
-// CHECK: func @cast_followed_by_store()
+// CHECK: func.func @cast_followed_by_store()
// CHECK-DAG: %[[LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RESULT:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -385,7 +385,7 @@
}
return
}
-// CHECK: func @multi_result()
+// CHECK: func.func @multi_result()
// CHECK-DAG: %[[LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RESULT0:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -461,7 +461,7 @@
}
return
}
-// CHECK-LABEL: func @unused_ins_operand()
+// CHECK-LABEL: func.func @unused_ins_operand()
// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(2)
// CHECK-DAG: %[[IN_VIEW:.+]] = flow.dispatch.tensor.load %[[IN]]
@@ -518,7 +518,7 @@
}
return
}
-// CHECK-LABEL: func @three_init_tensor_uses()
+// CHECK-LABEL: func.func @three_init_tensor_uses()
// CHECK: %[[OUTPUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK-NOT: linalg.init_tensor
// CHECK: %[[LOAD:.+]] = flow.dispatch.tensor.load %[[OUTPUT]]
@@ -571,14 +571,14 @@
}
return
}
-// CHECK-LABEL: func @fill_matmul_exp()
+// CHECK-LABEL: func.func @fill_matmul_exp()
// CHECK: %[[MATMUL:.+]] = linalg.matmul
// CHECK: linalg.generic
// CHECK-SAME: outs(%[[MATMUL]]
// -----
-func @cumsum__2x2x2x2x2x2x2() {
+func.func @cumsum__2x2x2x2x2x2x2() {
%cst = arith.constant dense<0.000000e+00> : tensor<2x2x2x2x2x2x2xf32>
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:3x2x2x2x2x2x2xf32>
@@ -599,7 +599,7 @@
return
}
-// CHECK-LABEL: func @cumsum__2x2x2x2x2x2x2()
+// CHECK-LABEL: func.func @cumsum__2x2x2x2x2x2x2()
// CHECK-DAG: %[[CST:.+]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[DST:.+]] = flow.dispatch.tensor.load {{.+}} !flow.dispatch.tensor<writeonly:2x2x2x2x2x2x2xf32> -> tensor<2x2x2x2x2x2x2xf32>
// CHECK: %[[FILL:.+]] = linalg.fill ins(%[[CST]] : f32) outs(%[[DST]]
@@ -608,7 +608,7 @@
// -----
-func @reduce_window_max_4x6xf32() {
+func.func @reduce_window_max_4x6xf32() {
%cst = arith.constant dense<0xFF800000> : tensor<2x2xf32>
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:2x4x6xf32>
@@ -623,7 +623,7 @@
flow.dispatch.tensor.store %4, %1, offsets = [0, 0], sizes = [2, 2], strides = [1, 1] : tensor<2x2xf32> -> !flow.dispatch.tensor<writeonly:2x2xf32>
return
}
-// CHECK-LABEL: func @reduce_window_max_4x6xf32()
+// CHECK-LABEL: func.func @reduce_window_max_4x6xf32()
// CHECK-DAG: %[[CST:.+]] = arith.constant 0xFF800000 : f32
// CHECK: %[[DST:.+]] = flow.dispatch.tensor.load {{.+}} !flow.dispatch.tensor<writeonly:2x2xf32> -> tensor<2x2xf32>
// CHECK: %[[FILL:.+]] = linalg.fill ins(%[[CST]] : f32) outs(%[[DST]]
@@ -658,7 +658,7 @@
}
return
}
-// CHECK: func @linalg_ext_reverse_dim0()
+// CHECK: func.func @linalg_ext_reverse_dim0()
// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: scf.for %[[IV0:.+]] =
@@ -684,7 +684,7 @@
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [4], strides = [1] : tensor<4xi32> -> !flow.dispatch.tensor<readwrite:4xi32>
return
}
-// CHECK: func @sort1D()
+// CHECK: func.func @sort1D()
// CHECK-DAG: %[[BUF:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[IN:.+]] = flow.dispatch.tensor.load %[[BUF]]
// CHECK: %[[SORT:.+]] = iree_linalg_ext.sort
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/dead_alloc.mlir b/compiler/src/iree/compiler/Codegen/Common/test/dead_alloc.mlir
index c289015..6a1448a 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/dead_alloc.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/dead_alloc.mlir
@@ -13,7 +13,7 @@
}
}
-// CHECK-LABEL: func @dead_alloc
+// CHECK-LABEL: func.func @dead_alloc
// CHECK-NOT: alloc
// CHECK-NOT: memref.subview
// CHECK-NOT: vector.transfer_write
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/flatten_memref_subspan.mlir b/compiler/src/iree/compiler/Codegen/Common/test/flatten_memref_subspan.mlir
index 249682c..7e5bba7 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/flatten_memref_subspan.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/flatten_memref_subspan.mlir
@@ -7,7 +7,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1, s2, s3] -> (s0 * 56 + s1 * 8 + s2 + s3 floordiv 4)>
-// CHECK: func @load_subspan_with_offset
+// CHECK: func.func @load_subspan_with_offset
// CHECK-SAME: (%[[OFFSET:.+]]: index, %[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index)
// CHECK-DAG: %[[ZERO:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C336:.+]] = arith.constant 336 : index
@@ -25,7 +25,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1, s2, s3] -> (s0 * 12 + s1 * 4 + s2 + s3 floordiv 4)>
-// CHECK: func @store_subspan_with_offset
+// CHECK: func.func @store_subspan_with_offset
// CHECK-SAME: (%[[VALUE:.+]]: f32, %[[OFFSET:.+]]: index, %[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index)
// CHECK-DAG: %[[ZERO:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C24:.+]] = arith.constant 24 : index
@@ -42,7 +42,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1, s2, s3] -> (s0 * 56 + s1 * 8 + s2 + s3 floordiv 16)>
-// CHECK: func @load_subspan_with_vector_element
+// CHECK: func.func @load_subspan_with_vector_element
// CHECK: affine.apply #[[MAP]]()
// -----
@@ -54,7 +54,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1, s2, s3] -> (s0 * 56 + s1 * 8 + s2 + s3 floordiv 2)>
-// CHECK: func @load_subspan_with_16bit_element
+// CHECK: func.func @load_subspan_with_16bit_element
// CHECK: affine.apply #[[MAP]]()
// -----
@@ -68,7 +68,7 @@
// CHECK: #[[SIZE_MAP:.+]] = affine_map<()[s0] -> (s0 * 12)
// CHECK: #[[OFFSET_MAP:.+]] = affine_map<()[s0, s1, s2, s3] -> (s0 * 12 + s1 * 4 + s2 + s3 floordiv 4)>
-// CHECK: func @store_subspan_with_leading_dynamic_dim
+// CHECK: func.func @store_subspan_with_leading_dynamic_dim
// CHECK-SAME: (%[[VALUE:.+]]: f32, %[[OFFSET:.+]]: index, %[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index)
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM:.+]] = hal.interface.constant.load[0] : index
@@ -91,7 +91,7 @@
// CHECK: #[[SIZE_MAP:.+]] = affine_map<()[s0, s1, s2, s3] -> (((s0 * s1) * s2) * s3)>
// CHECK: #[[OFFSET_MAP:.+]] = affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7] -> (s1 + (s3 + (s6 + s4 * s5) * s2) * s0 + s7 floordiv 4)>
-// CHECK: func @store_subspan_with_all_dynamic_dim
+// CHECK: func.func @store_subspan_with_all_dynamic_dim
// CHECK-SAME: (%[[VALUE:.+]]: f32, %[[OFFSET:.+]]: index, %[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index, %[[I3:.+]]: index)
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM0:.+]] = hal.interface.constant.load[0] : index
@@ -115,7 +115,7 @@
// CHECK: #[[SIZE_MAP:.+]] = affine_map<()[s0, s1] -> ((s0 * s1) * 32)>
// CHECK: #[[OFFSET_MAP:.+]] = affine_map<()[s0, s1, s2, s3, s4, s5] -> (s0 + s2 * 8 + ((s3 * 4 + s4) * s1) * 8 + s5 floordiv 4)>
-// CHECK: func @store_subspan_with_mixed_dynamic_dim
+// CHECK: func.func @store_subspan_with_mixed_dynamic_dim
// CHECK-SAME: (%[[VALUE:.+]]: f32, %[[OFFSET:.+]]: index, %[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index, %[[I3:.+]]: index)
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM0:.+]] = hal.interface.constant.load[0] : index
@@ -138,7 +138,7 @@
// CHECK: #[[SIZE_MAP:.+]] = affine_map<()[s0] -> (s0 * 12)
// CHECK: #[[OFFSET_MAP:.+]] = affine_map<()[s0, s1, s2, s3] -> (s0 * 12 + s1 * 4 + s2 + s3 floordiv 4)>
-// CHECK: func @store_subspan_with_flow_control
+// CHECK: func.func @store_subspan_with_flow_control
// CHECK-SAME: (%[[VALUE:.+]]: f32, %[[OFFSET:.+]]: index, %[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index)
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[DIM:.+]] = hal.interface.constant.load[0] : index
@@ -158,7 +158,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s0 * 12 + s1 * 4 + s2)>
-// CHECK: func @load_store_alloc_static
+// CHECK: func.func @load_store_alloc_static
// CHECK-SAME: (%[[VAL:.+]]: f32, %[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index)
// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<24xf32, 3>
// CHECK: %[[INDEX0:.+]] = affine.apply #[[MAP]]()[%[[I0]], %[[I1]], %[[I2]]]
@@ -179,7 +179,7 @@
// CHECK: #[[SIZE_MAP:.+]] = affine_map<()[s0, s1, s2] -> ((s0 * s1) * s2)>
// CHECK: #[[INDEX_MAP:.+]] = affine_map<()[s0, s1, s2, s3, s4] -> (s1 + (s4 + s2 * s3) * s0)>
-// CHECK: func @load_store_alloca_dynamic
+// CHECK: func.func @load_store_alloca_dynamic
// CHECK-SAME: (%[[VAL:.+]]: f32, %[[DIM0:.+]]: index, %[[DIM1:.+]]: index, %[[DIM2:.+]]: index, %[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index)
// CHECK: %[[SIZE:.+]] = affine.apply #[[SIZE_MAP]]()[%[[DIM0]], %[[DIM1]], %[[DIM2]]]
// CHECK: %[[ALLOC:.+]] = memref.alloca(%[[SIZE]]) : memref<?xf32>
@@ -199,7 +199,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1] -> (s0 + s1 floordiv 4)>
-// CHECK: func @use_subspan_with_unrealized_conversion_cast
+// CHECK: func.func @use_subspan_with_unrealized_conversion_cast
// CHECK-SAME: (%[[OFFSET:.+]]: index, %[[I:.+]]: index)
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[SUBSPAN:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%[[C0]]) : memref<?xf32>
@@ -217,7 +217,7 @@
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1, s2, s3] -> (s0 * 3 + s1 + s2 + s3)>
// CHECK: memref.global "private" constant @constant_3x3x1x1xf32 : memref<9xf32> = dense<[-1.000000e+00, 0.000000e+00, 1.000000e+00, -2.000000e+00, 0.000000e+00, 2.000000e+00, -1.000000e+00, 0.000000e+00, 1.000000e+00]>
-// CHECK: func @load_global_with_offset
+// CHECK: func.func @load_global_with_offset
// CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index, %[[I3:.+]]: index)
// CHECK: %[[GLOBAL:.+]] = memref.get_global @constant_3x3x1x1xf32 : memref<9xf32>
// CHECK: %[[INDEX:.+]] = affine.apply #[[MAP]]()[%[[I0]], %[[I1]], %[[I2]], %[[I3]]]
@@ -235,7 +235,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s0 * 56 + s1 * 8 + s2)>
-// CHECK: func @transfer_read_subspan_with_offset
+// CHECK: func.func @transfer_read_subspan_with_offset
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: index
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: index
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: index
@@ -255,7 +255,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s0 * 56 + s1 * 8 + s2)>
-// CHECK: func @transfer_write_subspan_with_offset
+// CHECK: func.func @transfer_write_subspan_with_offset
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: index
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: index
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: index
@@ -276,7 +276,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 floordiv 4)>
-// CHECK: func @load_store_rank_zero_subspan_with_offset
+// CHECK: func.func @load_store_rank_zero_subspan_with_offset
// CHECK-SAME: (%[[OFFSET:.+]]: index)
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -297,7 +297,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s0 * 42 + s1 + s2 floordiv 4)>
-// CHECK: func @collapse_shape
+// CHECK: func.func @collapse_shape
// CHECK-SAME: (%[[OFFSET:.+]]: index, %[[I0:.+]]: index, %[[I1:.+]]: index)
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[SIZE:.+]] = arith.constant 840 : index
@@ -315,7 +315,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1, s2, s3, s4] -> (s0 * 210 + s1 * 42 + s2 * 7 + s3 + s4 floordiv 4)>
-// CHECK: func @expand_shape
+// CHECK: func.func @expand_shape
// CHECK-SAME: (%[[OFFSET:.+]]: index, %[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index, %[[I3:.+]]: index)
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[SIZE:.+]] = arith.constant 840 : index
@@ -334,7 +334,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1] -> (s0 + s1 floordiv 4)>
-// CHECK: func @load_store_rank_one_static_size_subspan_with_offset
+// CHECK: func.func @load_store_rank_one_static_size_subspan_with_offset
// CHECK-SAME: %[[OFFSET:.+]]: index, %[[I:.+]]: index
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C32:.+]] = arith.constant 32 : index
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/fold_affine_min_in_distributed_loops.mlir b/compiler/src/iree/compiler/Codegen/Common/test/fold_affine_min_in_distributed_loops.mlir
index faa54d5..c65af48 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/fold_affine_min_in_distributed_loops.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/fold_affine_min_in_distributed_loops.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --split-input-file --iree-codegen-fold-affinemin-in-distributed-loops --mlir-print-local-scope %s | FileCheck %s
-// CHECK-LABEL: func @loop_distributed_to_workgroup_x
+// CHECK-LABEL: func.func @loop_distributed_to_workgroup_x
func.func @loop_distributed_to_workgroup_x() -> index {
%c0 = arith.constant 0 : index
// CHECK: %[[C32:.+]] = arith.constant 32 : index
@@ -22,7 +22,7 @@
// -----
-// CHECK-LABEL: func @loop_distributed_to_workgroup_y
+// CHECK-LABEL: func.func @loop_distributed_to_workgroup_y
func.func @loop_distributed_to_workgroup_y() -> index {
%c0 = arith.constant 0 : index
// CHECK: %[[C32:.+]] = arith.constant 32 : index
@@ -44,7 +44,7 @@
// -----
-// CHECK-LABEL: func @loop_distributed_to_workgroup_z
+// CHECK-LABEL: func.func @loop_distributed_to_workgroup_z
func.func @loop_distributed_to_workgroup_z() -> index {
%c0 = arith.constant 0 : index
// CHECK: %[[C32:.+]] = arith.constant 32 : index
@@ -66,7 +66,7 @@
// -----
-// CHECK-LABEL: func @loop_distributed_to_workitem_x
+// CHECK-LABEL: func.func @loop_distributed_to_workitem_x
func.func @loop_distributed_to_workitem_x() -> index {
%c0 = arith.constant 0 : index
// CHECK: %[[C32:.+]] = arith.constant 32 : index
@@ -88,7 +88,7 @@
// -----
-// CHECK-LABEL: func @loop_distributed_to_workitem_y
+// CHECK-LABEL: func.func @loop_distributed_to_workitem_y
func.func @loop_distributed_to_workitem_y() -> index {
%c0 = arith.constant 0 : index
// CHECK: %[[C32:.+]] = arith.constant 32 : index
@@ -110,7 +110,7 @@
// -----
-// CHECK-LABEL: func @loop_distributed_to_workitem_z
+// CHECK-LABEL: func.func @loop_distributed_to_workitem_z
func.func @loop_distributed_to_workitem_z() -> index {
%c0 = arith.constant 0 : index
// CHECK: %[[C32:.+]] = arith.constant 32 : index
@@ -132,7 +132,7 @@
// -----
-// CHECK-LABEL: func @cst_folded_into_affine_map
+// CHECK-LABEL: func.func @cst_folded_into_affine_map
func.func @cst_folded_into_affine_map() -> index {
%c0 = arith.constant 0 : index
// CHECK: %[[C32:.+]] = arith.constant 32 : index
@@ -154,7 +154,7 @@
// -----
-// CHECK-LABEL: func @affine_apply_folded_into_loop
+// CHECK-LABEL: func.func @affine_apply_folded_into_loop
func.func @affine_apply_folded_into_loop() -> index {
%c0 = arith.constant 0 : index
// CHECK: %[[C1:.+]] = arith.constant 1 : index
@@ -174,7 +174,7 @@
// -----
-// CHECK-LABEL: func @unknown_tile_size
+// CHECK-LABEL: func.func @unknown_tile_size
func.func @unknown_tile_size() -> index {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
@@ -198,7 +198,7 @@
// -----
-// CHECK-LABEL: func @mismatched_id_count
+// CHECK-LABEL: func.func @mismatched_id_count
func.func @mismatched_id_count() -> index {
%c0 = arith.constant 0 : index
// CHECK: %[[C32:.+]] = arith.constant 32 : index
@@ -221,7 +221,7 @@
// -----
-// CHECK-LABEL: func @min_over_min
+// CHECK-LABEL: func.func @min_over_min
func.func @min_over_min() -> index {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
@@ -250,7 +250,7 @@
// -----
-// CHECK-LABEL: func @cannot_prove_cst_bound
+// CHECK-LABEL: func.func @cannot_prove_cst_bound
func.func @cannot_prove_cst_bound() -> index {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
@@ -278,7 +278,7 @@
// -----
-// CHECK-LABEL: func @can_prove_symbolic_bound
+// CHECK-LABEL: func.func @can_prove_symbolic_bound
func.func @can_prove_symbolic_bound() -> index {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/fold_tensor_extract_op.mlir b/compiler/src/iree/compiler/Codegen/Common/test/fold_tensor_extract_op.mlir
index 800bafb..9938b02 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/fold_tensor_extract_op.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/fold_tensor_extract_op.mlir
@@ -8,7 +8,7 @@
%1 = tensor.extract %0[%c1, %c2] : tensor<2x3xi32>
return %1 : i32
}
-// CHECK: func @fold_tensor_extract
+// CHECK: func.func @fold_tensor_extract
// CHECK-SAME: %[[ARG0:.+]]: memref<2x3xi32>
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/forop_canonicalization.mlir b/compiler/src/iree/compiler/Codegen/Common/test/forop_canonicalization.mlir
index c8c2dca..e580ff4 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/forop_canonicalization.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/forop_canonicalization.mlir
@@ -20,7 +20,7 @@
return %21, %22 : vector<4xf32>, vector<4xf32>
}
-// CHECK-LABEL: func @loop_carried_vector_shape_cast
+// CHECK-LABEL: func.func @loop_carried_vector_shape_cast
// CHECK-NOT: vector.shape_cast
// CHECK: scf.for {{.*}} -> (vector<4xf32>, vector<4xf32>) {
// CHECK-NOT: vector.shape_cast
@@ -51,7 +51,7 @@
return %21, %22 : vector<4xf32>, vector<4xf32>
}
-// CHECK-LABEL: func @loop_carried_unrealized_conversion_cast
+// CHECK-LABEL: func.func @loop_carried_unrealized_conversion_cast
// CHECK-NOT: unrealized_conversion_cast
// CHECK: scf.for {{.*}} -> (vector<4xf32>, vector<4xf32>) {
// CHECK-NOT: unrealized_conversion_cast
@@ -77,7 +77,7 @@
return %21 : f32
}
-// CHECK-LABEL: func @loop_carried_extract
+// CHECK-LABEL: func.func @loop_carried_extract
// CHECK-NOT: vector.broadcast
// CHECK: scf.for {{.*}} -> (f32) {
// CHECK-NOT: vector.extract
@@ -105,7 +105,7 @@
return %0#0, %0#1, %0#2 : vector<8xf16>, vector<8xf16>, vector<4xf16>
}
-// CHECK-LABEL: func @loop_pack_v8f16
+// CHECK-LABEL: func.func @loop_pack_v8f16
// CHECK-SAME: (%[[ARG0:.+]]: vector<8xf16>, %[[ARG1:.+]]: vector<8xf16>, %[[ARG2:.+]]: vector<4xf16>)
// CHECK: %[[CAST_ARG0:.+]] = vector.bitcast %[[ARG0]] : vector<8xf16> to vector<4xf32>
// CHECK: %[[CAST_ARG1:.+]] = vector.bitcast %[[ARG1]] : vector<8xf16> to vector<4xf32>
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/insert_distribution_info.mlir b/compiler/src/iree/compiler/Codegen/Common/test/insert_distribution_info.mlir
index eda7699..37b2edd 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/insert_distribution_info.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/insert_distribution_info.mlir
@@ -58,7 +58,7 @@
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_X]]]
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_Y]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[C1]] : index, index, index
-// CHECK: func @matmul_tensors()
+// CHECK: func.func @matmul_tensors()
// -----
@@ -124,7 +124,7 @@
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_X]]]
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_Y]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[C1]] : index, index, index
-// CHECK: func @add()
+// CHECK: func.func @add()
// -----
@@ -189,7 +189,7 @@
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_Y]]]
// CHECK-DAG: %[[D2:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_Z]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[D2]] : index, index, index
-// CHECK: func @add4D()
+// CHECK: func.func @add4D()
// -----
@@ -247,7 +247,7 @@
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_X]]]
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_Y]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[WORKLOAD_Z]]
-// CHECK: func @batch_matmul_tensors()
+// CHECK: func.func @batch_matmul_tensors()
// -----
@@ -300,7 +300,7 @@
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_X]]]
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP1]]()[%[[WORKLOAD_Y]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[C1]]
-// CHECK: func @preset_config()
+// CHECK: func.func @preset_config()
// -----
@@ -409,7 +409,7 @@
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_X]]]
// CHECK: hal.return %[[D0]], %[[C1]], %[[C1]] : index, index, index
-// CHECK: func @static_1d_fft_stage2()
+// CHECK: func.func @static_1d_fft_stage2()
// -----
@@ -454,7 +454,7 @@
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_Y]]]
// CHECK-DAG: %[[D2:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_Z]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[D2]] : index, index, index
-// CHECK: func @static_3d_fft_stage3()
+// CHECK: func.func @static_3d_fft_stage3()
// -----
@@ -524,7 +524,7 @@
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_X]]]
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_Y]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[C1]] : index, index, index
-// CHECK: func @outs_fusion_fn()
+// CHECK: func.func @outs_fusion_fn()
// -----
@@ -589,7 +589,7 @@
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_Y]]]
// CHECK-DAG: %[[D2:.+]] = affine.apply #[[MAP]]()[%[[WORKLOAD_Z]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[D2]] : index, index, index
-// CHECK: func @conv()
+// CHECK: func.func @conv()
// -----
@@ -648,7 +648,7 @@
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP1]]()[%[[WORKLOAD_Y]]]
// CHECK-DAG: %[[D2:.+]] = affine.apply #[[MAP2]]()[%[[WORKLOAD_Z]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[D2]] : index, index, index
-// CHECK: func @conv_static()
+// CHECK: func.func @conv_static()
// -----
@@ -705,7 +705,7 @@
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_X]]]
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP1]]()[%[[WORKLOAD_Y]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[C1]] : index, index, index
-// CHECK: func @generic_static()
+// CHECK: func.func @generic_static()
// -----
@@ -883,7 +883,7 @@
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_X]]]
// CHECK: hal.return %[[D0]], %[[C1]], %[[C1]] : index, index, index
-// CHECK: func @reduction
+// CHECK: func.func @reduction
// -----
@@ -941,7 +941,7 @@
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_X]]]
// CHECK: hal.return %[[D0]], %[[C1]], %[[C1]] : index, index, index
-// CHECK: func @gemm_unit_N()
+// CHECK: func.func @gemm_unit_N()
// -----
@@ -996,7 +996,7 @@
// CHECK-SAME: %[[WORKLOAD_Z:[a-zA-Z0-9_]+]]: index)
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK: hal.return %[[C1]], %[[C1]], %[[C1]] : index, index, index
-// CHECK: func @gemm_unit_M_unit_N()
+// CHECK: func.func @gemm_unit_M_unit_N()
// -----
@@ -1057,7 +1057,7 @@
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_Y]]]
// CHECK-DAG: %[[D2:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_Z]]]
// CHECK: hal.return %[[D0]], %[[D1]], %[[D2]] : index, index, index
-// CHECK: func @generic_unit_dims()
+// CHECK: func.func @generic_unit_dims()
// -----
@@ -1114,7 +1114,7 @@
// CHECK-SAME: %[[WORKLOAD_Z:[a-zA-Z0-9_]+]]: index)
// CHECK: %[[C1:.+]] = arith.constant 1 : index
// CHECK: hal.return %[[C1]], %[[C1]], %[[C1]] : index, index, index
-// CHECK: func @reduce_to_scalar()
+// CHECK: func.func @reduce_to_scalar()
// -----
@@ -1168,7 +1168,7 @@
// CHECK-SAME: %[[WORKLOAD_Z:[a-zA-Z0-9_]+]]: index)
// CHECK: %[[C1:.+]] = arith.constant 1 : index
// CHECK: hal.return %[[C1]], %[[C1]], %[[C1]] : index, index, index
-// CHECK: func @scalar()
+// CHECK: func.func @scalar()
// -----
@@ -1190,7 +1190,7 @@
hal.executable.variant public @llvm, target = #executable_target_embedded_elf_x86_64_ {
hal.executable.entry_point public @matmul_interchange layout(#executable_layout) {translation_info = #translation}
builtin.module {
- func @matmul_interchange() {
+ func.func @matmul_interchange() {
%0 = hal.interface.constant.load[0] : index
%1 = hal.interface.constant.load[1] : index
%2 = hal.interface.constant.load[2] : index
@@ -1230,4 +1230,4 @@
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP0]]()[%[[WORKLOAD_X]]]
// CHECK-DAG: %[[D1:.+]] = affine.apply #[[MAP1]]()[%[[WORKLOAD_Y]]]
// CHECK: hal.return %[[D1]], %[[D0]], %[[C1]] : index, index, index
-// CHECK: func @matmul_interchange()
+// CHECK: func.func @matmul_interchange()
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/iree_comprehensive_bufferize.mlir b/compiler/src/iree/compiler/Codegen/Common/test/iree_comprehensive_bufferize.mlir
index 44347d4..1546bcf 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/iree_comprehensive_bufferize.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/iree_comprehensive_bufferize.mlir
@@ -36,7 +36,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s0 * s1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s1, s0)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK: func @matmul()
+// CHECK: func.func @matmul()
// CHECK-DAG: %[[M:.+]] = hal.interface.constant.load[0]
// CHECK-DAG: %[[N:.+]] = hal.interface.constant.load[1]
// CHECK-DAG: %[[K:.+]] = hal.interface.constant.load[2]
@@ -109,7 +109,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s0 * s1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s1, s0)>
-// CHECK: func @matmul_fill()
+// CHECK: func.func @matmul_fill()
// CHECK-DAG: %[[CST:.+]] = arith.constant 0.000000e+00 : f32
// CHECK-DAG: %[[M:.+]] = hal.interface.constant.load[0]
// CHECK-DAG: %[[N:.+]] = hal.interface.constant.load[1]
@@ -185,7 +185,7 @@
}
return
}
-// CHECK: func @elementwise()
+// CHECK: func.func @elementwise()
// CHECK-DAG: %[[GLB_CST:.+]] = memref.get_global @__constant_1x10xf32 : memref<1x10xf32>
// CHECK-DAG: %[[IN_BUF:.+]] = hal.interface.binding.subspan set(0) binding(0) {{.+}} : memref<1x10xf32>
// CHECK-DAG: %[[OUT_BUF:.+]] = hal.interface.binding.subspan set(0) binding(1) {{.+}} : memref<1x10xf32>
@@ -224,7 +224,7 @@
}
return
}
-// CHECK: func @rank_reduced_slice()
+// CHECK: func.func @rank_reduced_slice()
// CHECK-DAG: %[[SRC_BINDING:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<1x40xf32>
// CHECK-DAG: %[[DST_BINDING:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<10xf32>
// CHECK: scf.for %[[IV0:.+]] =
@@ -236,7 +236,7 @@
// -----
-// CHECK-LABEL: func @reverse_dim(
+// CHECK-LABEL: func.func @reverse_dim(
// CHECK-DAG: %[[alloc:.*]] = memref.alloc() : memref<2x3xf32>
// CHECK-DAG: %[[global:.*]] = memref.get_global
// CHECK: iree_linalg_ext.reverse dimensions(dense<0> : tensor<1xi64>) ins(%[[global]] : memref<2x3xf32>) outs(%[[alloc]] : memref<2x3xf32>)
@@ -259,7 +259,7 @@
// -----
-// CHECK-LABEL: func @fft_tensor(
+// CHECK-LABEL: func.func @fft_tensor(
// CHECK: memref.alloc
// CHECK: memref.alloc
// CHECK: iree_linalg_ext.fft ins(%{{.*}} : index) outs(%{{.*}}, %{{.*}} : memref<1024xf32>, memref<1024xf32>)
@@ -292,7 +292,7 @@
flow.dispatch.tensor.store %6#1, %1, offsets = [], sizes = [], strides = [] : tensor<f32> -> !flow.dispatch.tensor<readwrite:f32>
return
}
-// CHECK: func @scan_1d_dim0_inclusive_sum
+// CHECK: func.func @scan_1d_dim0_inclusive_sum
// CHECK-NOT: memref.alloca
// CHECK: iree_linalg_ext.scan
// CHECK-SAME: ins(%{{.*}} : memref<6xf32>)
@@ -312,7 +312,7 @@
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [4], strides = [1] : tensor<4xi32> -> !flow.dispatch.tensor<readwrite:4xi32>
return
}
-// CHECK: func @sort1D
+// CHECK: func.func @sort1D
// CHECK: %[[BUF:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : memref<4xi32>
// CHECK: iree_linalg_ext.sort
// CHECK-SAME: outs(%[[BUF]] : memref<4xi32>)
@@ -341,7 +341,7 @@
}
return
}
-// CHECK: func @scatter_update_scalar_1D
+// CHECK: func.func @scatter_update_scalar_1D
// CHECK-DAG: %[[UPDATE:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : memref<4xi32>
// CHECK-DAG: %[[INDICES:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) offset(%c0) alignment(64) : memref<4x1xi32>
// CHECK-DAG: %[[ORIGINAL:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) offset(%c0) alignment(64) : memref<8xi32>
@@ -375,7 +375,7 @@
// XXX(hanchung): I don't know why there are memref.cast ops, might be a bug?
// Since we don't have e2e top-k tests, I can't figure out how it works today.
-// CHECK: func @topk
+// CHECK: func.func @topk
// CHECK-DAG: %[[INPUT_VALUES:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<200x8xf32>
// CHECK-DAG: %[[XXX_VALUES:.+]] = memref.cast %[[INPUT_VALUES]] : memref<200x8xf32> to memref<200x8xf32,
// CHECK-DAG: %[[INPUT_INDICES:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<200x8xi32>
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir b/compiler/src/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
index 0a7310d..127c208 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
@@ -27,7 +27,7 @@
return
}
-// CHECK-LABEL: func @tile_from_tensor_load()
+// CHECK-LABEL: func.func @tile_from_tensor_load()
// CHECK-DAG: %[[TENSOR_LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: memref.assume_alignment %[[TENSOR_LHS]], 32 : memref<?x?xf32>
// CHECK-DAG: %[[TENSOR_RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
@@ -72,7 +72,7 @@
return
}
-// CHECK-LABEL: func @tile_from_tensor_load_inplace()
+// CHECK-LABEL: func.func @tile_from_tensor_load_inplace()
// CHECK-DAG: %[[TENSOR_LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[TENSOR_RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RETURN:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -115,7 +115,7 @@
return
}
-// CHECK-LABEL: func @tile_from_tensor_load_inplace_and_copy()
+// CHECK-LABEL: func.func @tile_from_tensor_load_inplace_and_copy()
// CHECK-DAG: %[[TENSOR_LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[TENSOR_RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RETURN1:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -167,7 +167,7 @@
return
}
-// CHECK-LABEL: func @tile_from_pointwise_lhs()
+// CHECK-LABEL: func.func @tile_from_pointwise_lhs()
// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<1x3xf32>
// CHECK-DAG: %[[TENSOR_LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[TENSOR_RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
@@ -222,7 +222,7 @@
return
}
-// CHECK-LABEL: func @tile_from_pointwise_lhs_inplace()
+// CHECK-LABEL: func.func @tile_from_pointwise_lhs_inplace()
// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<1x3xf32>
// CHECK-DAG: %[[TENSOR_LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[TENSOR_RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
@@ -275,7 +275,7 @@
return
}
-// CHECK-LABEL: func @tile_from_pointwise_outs()
+// CHECK-LABEL: func.func @tile_from_pointwise_outs()
// CHECK-DAG: %[[TENSOR_LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[TENSOR_RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[TENSOR_INIT:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -330,7 +330,7 @@
return
}
-// CHECK-LABEL: func @tile_from_pointwise_outs_inplace()
+// CHECK-LABEL: func.func @tile_from_pointwise_outs_inplace()
// CHECK-DAG: %[[TENSOR_LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[TENSOR_RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RETURN:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -377,7 +377,7 @@
return
}
-// CHECK-LABEL: func @tile_from_matmul_outs()
+// CHECK-LABEL: func.func @tile_from_matmul_outs()
// CHECK-DAG: %[[TENSOR_LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[TENSOR_RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[TENSOR_INIT:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -424,7 +424,7 @@
return
}
-// CHECK-LABEL: func @tile_from_matmul_outs_inplace()
+// CHECK-LABEL: func.func @tile_from_matmul_outs_inplace()
// CHECK-DAG: %[[TENSOR_LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[TENSOR_RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RETURN:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -484,7 +484,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s0, s1)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s1, s0)>
-// CHECK: func @bufferize_dynamic()
+// CHECK: func.func @bufferize_dynamic()
// CHECK: %[[DIM0:.+]] = hal.interface.constant.load[0] : index
// CHECK: %[[DIM1:.+]] = hal.interface.constant.load[1] : index
// CHECK: %[[DIM2:.+]] = hal.interface.constant.load[2] : index
@@ -556,7 +556,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s0, s1)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s1, s0)>
-// CHECK: func @bufferize_dynamic_inplace()
+// CHECK: func.func @bufferize_dynamic_inplace()
// CHECK: %[[DIM0:.+]] = hal.interface.constant.load[0] : index
// CHECK: %[[DIM1:.+]] = hal.interface.constant.load[1] : index
// CHECK: %[[DIM2:.+]] = hal.interface.constant.load[2] : index
@@ -597,7 +597,7 @@
return
}
-// CHECK: func @reshape_simple()
+// CHECK: func.func @reshape_simple()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: %[[RESHAPE:.+]] = memref.expand_shape %[[ARG0]] {{\[}}[0, 1]]
@@ -628,7 +628,7 @@
return
}
-// CHECK: func @reshape_fused_source()
+// CHECK: func.func @reshape_fused_source()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<12xi32>
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<3x4xi32>
// CHECK: %[[RESHAPE:.+]] = memref.expand_shape %[[ARG0]] {{\[}}[0, 1]]
@@ -663,7 +663,7 @@
return
}
-// CHECK: func @reshape_fused_source_and_copyout()
+// CHECK: func.func @reshape_fused_source_and_copyout()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<12xi32>
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<3x4xi32>
// CHECK-DAG: %[[RET1:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : memref<3x4xi32>
@@ -698,7 +698,7 @@
return
}
-// CHECK: func @reshape_fused_target()
+// CHECK: func.func @reshape_fused_target()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<3x4xi32>
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<12xi32>
// CHECK: %[[RESHAPE:.+]] = memref.expand_shape %[[RET0]] {{\[}}[0, 1]]
@@ -744,7 +744,7 @@
return
}
-// CHECK-LABEL: func @dot_general_lowering()
+// CHECK-LABEL: func.func @dot_general_lowering()
// CHECK-DAG: %[[LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RESHAPE_LHS:.+]] = memref.collapse_shape %[[LHS]]
@@ -776,7 +776,7 @@
return
}
-// CHECK-LABEL: func @slice()
+// CHECK-LABEL: func.func @slice()
// CHECK-DAG: %[[ARG:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RETURN:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: %[[SUBVIEW:.+]] = memref.subview %[[ARG]]
@@ -799,7 +799,7 @@
return
}
-// CHECK-LABEL: func @slice_rank_reducing()
+// CHECK-LABEL: func.func @slice_rank_reducing()
// CHECK-DAG: %[[ARG:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RETURN:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: %[[SUBVIEW:.+]] = memref.subview %[[ARG]]
@@ -827,7 +827,7 @@
return
}
-// CHECK-LABEL: func @slice_multiple_copy()
+// CHECK-LABEL: func.func @slice_multiple_copy()
// CHECK-DAG: %[[ARG:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RETURN1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RETURN2:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -852,7 +852,7 @@
return
}
-// CHECK-LABEL: func @slice_in_place()
+// CHECK-LABEL: func.func @slice_in_place()
// CHECK-NOT: linalg.generic
@@ -872,7 +872,7 @@
return
}
-// CHECK-LABEL: func @slice_whole_stride_dispatch_0()
+// CHECK-LABEL: func.func @slice_whole_stride_dispatch_0()
// CHECK-DAG: %[[INPUT:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[OUTPUT:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[SUBVIEW_INPUT:.+]] = memref.subview %[[INPUT]]
@@ -902,7 +902,7 @@
return
}
-// CHECK-LABEL: func @subtensor_insert()
+// CHECK-LABEL: func.func @subtensor_insert()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -926,7 +926,7 @@
return
}
-// CHECK-LABEL: func @tensor_extract()
+// CHECK-LABEL: func.func @tensor_extract()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: %[[LOAD:.+]] = memref.load %[[ARG0]]
@@ -945,7 +945,7 @@
return
}
-// CHECK-LABEL: func @load_to_store()
+// CHECK-LABEL: func.func @load_to_store()
// CHECK: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<3x4xi32>
// CHECK: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<3x4xi32>
// CHECK: linalg.generic {{.*}} ins(%[[IN]] {{.*}} outs(%[[OUT]]
@@ -960,7 +960,7 @@
return
}
-// CHECK-LABEL: func @constant()
+// CHECK-LABEL: func.func @constant()
// CHECK: %[[CST:.+]] = arith.constant {{.+}} : tensor<2x2x3xi32>
// CHECK: %[[MEMREF:.+]] = bufferization.to_memref %[[CST]] : memref<2x2x3xi32>
// CHECK: %[[RESULT:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
@@ -1003,7 +1003,7 @@
return
}
-// CHECK-LABEL: func @rhs_non_splat_constant
+// CHECK-LABEL: func.func @rhs_non_splat_constant
// CHECK-DAG: %[[CONSTANT:.+]] = arith.constant {{.+}} : tensor<3x5xf32>
// CHECK-DAG: %[[RHS:.+]] = bufferization.to_memref %[[CONSTANT]]
// CHECK-DAG: %[[LHS_INPUT:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<1x5x3x1xf32>
@@ -1049,7 +1049,7 @@
return
}
-// CHECK-LABEL: func @gather()
+// CHECK-LABEL: func.func @gather()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -1081,7 +1081,7 @@
return
}
-// CHECK-LABEL: func @pooling_nhwc_sum
+// CHECK-LABEL: func.func @pooling_nhwc_sum
// CHECK: %[[WINDOW:.+]] = memref.alloc() : memref<2x3xf32>
// CHECK-DAG: %[[INIT:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<f32>
// CHECK-DAG: %[[INPUT:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<1x4x6x1xf32>
@@ -1155,7 +1155,7 @@
return
}
-// CHECK-LABEL: func @read_only_subtensor
+// CHECK-LABEL: func.func @read_only_subtensor
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<?x?xf32>
// CHECK-DAG: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<?x?xf32>
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : memref<?x?xf32>
@@ -1194,7 +1194,7 @@
return
}
-// CHECK-LABEL: func @reshape_read_only
+// CHECK-LABEL: func.func @reshape_read_only
// CHECK-DAG: %[[INPUT:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[OUTPUT:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: %[[RESHAPE:.+]] = memref.collapse_shape %[[INPUT]]
@@ -1240,7 +1240,7 @@
return
}
-// CHECK: func @use_buffer_for_operand_when_output_tensor_not_used()
+// CHECK: func.func @use_buffer_for_operand_when_output_tensor_not_used()
// CHECK-NOT: memref.alloc
// CHECK: %[[OUTPUT:.+]] = hal.interface.binding.subspan set(0) binding(3) type(storage_buffer)
@@ -1293,7 +1293,7 @@
return
}
-// CHECK-LABEL: func @dont_use_buffer_for_operand_when_output_tensor_used()
+// CHECK-LABEL: func.func @dont_use_buffer_for_operand_when_output_tensor_used()
// CHECK: %[[ALLOC:.+]] = memref.alloc
// CHECK: %[[OUTPUT:.+]] = hal.interface.binding.subspan set(0) binding(3) type(storage_buffer)
// CHECK: linalg.fill
@@ -1333,7 +1333,7 @@
return
}
-// CHECK-LABEL: func @bufferize_cst_output_tensor()
+// CHECK-LABEL: func.func @bufferize_cst_output_tensor()
// CHECK-DAG: %[[CST1:.+]] = arith.constant dense<-2147483648> : tensor<i32>
// CHECK-DAG: %[[CST5:.+]] = arith.constant dense<[1, 2, 3, 4, 5]> : tensor<5xi32>
@@ -1384,7 +1384,7 @@
return
}
-// CHECK-LABEL: func @cast_follwed_by_store()
+// CHECK-LABEL: func.func @cast_follwed_by_store()
// CHECK-DAG: %[[ZERO:.+]] = arith.constant 0.000000e+00 : f32
// CHECK-DAG: %[[LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<4x32x1024xf32>
// CHECK-DAG: %[[RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<4x1024x64xf32>
@@ -1419,7 +1419,7 @@
return
}
-// CHECK-LABEL: func @rank_reduced_subtensor_insert()
+// CHECK-LABEL: func.func @rank_reduced_subtensor_insert()
// CHECK-DAG: %[[ARG:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RET:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: %[[SUBVIEW:.+]] = memref.subview %[[RET]]
@@ -1466,7 +1466,7 @@
return
}
-// CHECK-LABEL: func @bufferize_transfer_op()
+// CHECK-LABEL: func.func @bufferize_transfer_op()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[ARG2:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -1522,7 +1522,7 @@
return
}
-// CHECK-LABEL: func @bufferize_transfer_op_inplace()
+// CHECK-LABEL: func.func @bufferize_transfer_op_inplace()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -1593,7 +1593,7 @@
return
}
-// CHECK-LABEL: func @multi_result()
+// CHECK-LABEL: func.func @multi_result()
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -1649,7 +1649,7 @@
return
}
-// CHECK-LABEL: func @padded_matmul()
+// CHECK-LABEL: func.func @padded_matmul()
// CHECK-DAG: %[[LHS_PADDED:.+]] = memref.alloc() : memref<64x32xf32>
// CHECK-DAG: %[[RHS_PADDED:.+]] = memref.alloc() : memref<32x16xf32>
// CHECK-DAG: %[[C0:.+]] = arith.constant 0.000000e+00 : f32
@@ -1722,7 +1722,7 @@
}
// CHECK: #[[MAP1:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 4)>
-// CHECK: func @dot_general_padded
+// CHECK: func.func @dot_general_padded
// CHECK-DAG: %[[ALLOC_RET0:.+]] = memref.alloc
// CHECK-DAG: %[[ALLOC_ARG1:.+]] = memref.alloc
// CHECK-DAG: %[[ALLOC_ARG0:.+]] = memref.alloc
@@ -1795,7 +1795,7 @@
return
}
-// CHECK-LABEL: func @multi_result_reduce
+// CHECK-LABEL: func.func @multi_result_reduce
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -2129,7 +2129,7 @@
return
}
-// CHECK-LABEL: func @sort1D()
+// CHECK-LABEL: func.func @sort1D()
// CHECK-DAG: %[[INPUT:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[OUTPUT:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: linalg.generic {{.*}} ins(%[[INPUT]] {{.*}} outs(%[[OUTPUT]]
@@ -2172,7 +2172,7 @@
return
}
-// CHECK-LABEL: func @sort1D_inplace()
+// CHECK-LABEL: func.func @sort1D_inplace()
// CHECK-DAG: %[[INOUT:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK: scf.for %[[ARG0:.+]] =
// CHECK: scf.for %[[ARG1:.+]] =
@@ -2198,7 +2198,7 @@
return
}
-// CHECK-LABEL: func @iree_linalg_ext_sort_1d()
+// CHECK-LABEL: func.func @iree_linalg_ext_sort_1d()
// CHECK-DAG: %[[INOUT:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK: iree_linalg_ext.sort
// CHECK-SAME: dimension(0)
@@ -2240,7 +2240,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECK: func @tensor_insert_slice()
+// CHECK: func.func @tensor_insert_slice()
// CHECK-DAG: %[[SRC:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<?x?xi32>
// CHECK-DAG: %[[DST:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<?x?xi32>
// CHECK-DAG: %[[OFFSET_Y:.+]] = hal.interface.constant.load[0]
@@ -2285,7 +2285,7 @@
return
}
-// CHECK-LABEL: func @dynamic_update_slice()
+// CHECK-LABEL: func.func @dynamic_update_slice()
// CHECK-DAG: %[[SRC:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<?xi32>
// CHECK-DAG: %[[DST:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : memref<?x?xi32>
// CHECK-DAG: %[[OFFSET_Y:.+]] = hal.interface.constant.load[0]
@@ -2368,7 +2368,7 @@
return
}
-// CHECK-LABEL: func @multi_level_tile_fuse()
+// CHECK-LABEL: func.func @multi_level_tile_fuse()
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[M:.+]] = hal.interface.constant.load[0]
// CHECK-DAG: %[[N:.+]] = hal.interface.constant.load[1]
@@ -2448,7 +2448,7 @@
return
}
-// CHECK-LABEL: func @operand_fusion()
+// CHECK-LABEL: func.func @operand_fusion()
// CHECK-DAG: %[[M:.+]] = hal.interface.constant.load[0]
// CHECK-DAG: %[[N:.+]] = hal.interface.constant.load[1]
// CHECK-DAG: %[[K:.+]] = hal.interface.constant.load[2]
@@ -2538,7 +2538,7 @@
return
}
-// CHECK-LABEL: func @two_level_tile_and_fuse()
+// CHECK-LABEL: func.func @two_level_tile_and_fuse()
// CHECK-DAG: %[[LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[BIAS:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -2613,7 +2613,7 @@
}
return
}
-// CHECK: func @forward_dispatch_3()
+// CHECK: func.func @forward_dispatch_3()
// -----
@@ -2662,7 +2662,7 @@
}
return
}
-// CHECK-LABEL: func @dot_general_nontrivial_batching_mutliple_parallel_dimension()
+// CHECK-LABEL: func.func @dot_general_nontrivial_batching_mutliple_parallel_dimension()
// CHECK-NOT: memref.alloc
// -----
@@ -2695,7 +2695,7 @@
: tensor<?x?xf32> -> !flow.dispatch.tensor<writeonly:?x?xf32>{%d0, %d1}
return
}
-// CHECK-LABEL: func @no_op_subview()
+// CHECK-LABEL: func.func @no_op_subview()
// CHECK-DAG: %[[SRC:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[DEST:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK: linalg.generic
@@ -2716,7 +2716,7 @@
: tensor<?xf32> -> !flow.dispatch.tensor<writeonly:?xf32>{%d0}
return
}
-// CHECK-LABEL: func @rank_reducing_no_op_subview()
+// CHECK-LABEL: func.func @rank_reducing_no_op_subview()
// CHECK-DAG: %[[SRC:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[DEST:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK: %[[SUBVIEW:.+]] = memref.subview %[[SRC]][0, 0] [1, %{{.+}}]
@@ -2726,7 +2726,7 @@
// -----
-// CHECK-LABEL: func @dispatch_scatter()
+// CHECK-LABEL: func.func @dispatch_scatter()
func.func @dispatch_scatter() {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/remove_dead_allocs.mlir b/compiler/src/iree/compiler/Codegen/Common/test/remove_dead_allocs.mlir
index 67c6c5c..146d3d0 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/remove_dead_allocs.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/remove_dead_allocs.mlir
@@ -4,7 +4,7 @@
%0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
return
}
-// CHECK-LABEL: func @alloc_remove
+// CHECK-LABEL: func.func @alloc_remove
// CHECK-NEXT: return
// -----
@@ -13,6 +13,6 @@
%0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
return %0 : memref<?x?xf32>
}
-// CHECK-LABEL: func @alloc_keep
+// CHECK-LABEL: func.func @alloc_keep
// CHECK-NEXT: %[[ALLOC:.+]] = memref.alloc
// CHECK-NEXT: return %[[ALLOC]]
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir b/compiler/src/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir
index d603306..24e9bea 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir
@@ -7,7 +7,7 @@
]>
]>
-// CHECK-LABEL: func @dispatch_0()
+// CHECK-LABEL: func.func @dispatch_0()
hal.executable private @dispatch_0 {
hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb"> {
hal.executable.entry_point @dispatch_0 layout(#executable_layout) {
@@ -53,7 +53,7 @@
]>
]>
-// CHECK-LABEL: func @workgroup_tile_loop()
+// CHECK-LABEL: func.func @workgroup_tile_loop()
#translation = #iree_codegen.translation_info<LLVMGPUDistribute, workload_per_wg = [32]>
hal.executable private @workgroup_tile_loop {
hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb"> {
@@ -87,7 +87,7 @@
]>
]>
-// CHECK-LABEL: func @workgroup_tile_loop_negative()
+// CHECK-LABEL: func.func @workgroup_tile_loop_negative()
#translation = #iree_codegen.translation_info<LLVMGPUDistribute, workload_per_wg = [16]>
hal.executable private @workgroup_tile_loop_negative {
hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb"> {
@@ -121,7 +121,7 @@
]>
]>
-// CHECK-LABEL: func @both_workgroup_and_workitem()
+// CHECK-LABEL: func.func @both_workgroup_and_workitem()
// CHECK-NOT: scf.for
// CHECK: gpu.barrier
#translation = #iree_codegen.translation_info<LLVMGPUDistribute, workload_per_wg = [32, 8, 1]>
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/rewrite_linalg_destructive_updates.mlir b/compiler/src/iree/compiler/Codegen/Common/test/rewrite_linalg_destructive_updates.mlir
index 8e43c47..3b55f83 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/rewrite_linalg_destructive_updates.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/rewrite_linalg_destructive_updates.mlir
@@ -35,7 +35,7 @@
flow.dispatch.tensor.store %10, %2, offsets = [0, 0], sizes = [128, 384], strides = [1, 1] : tensor<128x384xf32> -> !flow.dispatch.tensor<writeonly:128x384xf32>
return
}
-// CHECK-LABEL: func @matmul
+// CHECK-LABEL: func.func @matmul
// CHECK: scf.for
// CHECK: scf.for
// CHECK: %[[MATMUL:.+]] = linalg.matmul
@@ -120,7 +120,7 @@
}
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 64)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<()[s0, s1, s2] -> (s0 * s1 + s2)>
-// CHECK: func @check_offset_strides()
+// CHECK: func.func @check_offset_strides()
// CHECK-DAG: %[[LHS_OFFSET_Y:.+]] = hal.interface.constant.load[0]
// CHECK-DAG: %[[LHS_OFFSET_X:.+]] = hal.interface.constant.load[1]
// CHECK-DAG: %[[LHS_STRIDE_Y:.+]] = hal.interface.constant.load[2]
@@ -165,7 +165,7 @@
// -----
-func @argmax() {
+func.func @argmax() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -222,7 +222,7 @@
flow.dispatch.tensor.store %t5#1, %5, offsets = [0, 0], sizes = [%2, %t1], strides = [1, 1] : tensor<?x?xi32> -> !flow.dispatch.tensor<writeonly:?x?xi32>{%2, %t1}
return
}
-// CHECK-LABEL: func @argmax()
+// CHECK-LABEL: func.func @argmax()
// CHECK: scf.for
// CHECK-NOT: iter_args
// CHECK: scf.for
@@ -236,7 +236,7 @@
// -----
-func @reduce() {
+func.func @reduce() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -295,7 +295,7 @@
flow.dispatch.tensor.store %t5#0, %o1, offsets = [0, 0], sizes = [%2, %t1], strides = [1, 1] : tensor<?x?xf32> -> !flow.dispatch.tensor<writeonly:?x?xf32>{%2, %t1}
return
}
-// CHECK-LABEL: func @reduce()
+// CHECK-LABEL: func.func @reduce()
// CHECK-DAG: %[[OUT1:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK-DAG: %[[OUT2:.+]] = hal.interface.binding.subspan set(0) binding(2)
// CHECK: scf.for
@@ -311,7 +311,7 @@
// -----
-func @scatter() {
+func.func @scatter() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -355,7 +355,7 @@
flow.dispatch.tensor.store %t8, %t2, offsets = [0, 0], sizes = [%t1, %3], strides = [1, 1] : tensor<?x?xf32> -> !flow.dispatch.tensor<readwrite:?x?xf32>{%t1, %3}
return
}
-// CHECK-LABEL: func @scatter()
+// CHECK-LABEL: func.func @scatter()
// CHECK: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(2)
// CHECK: %[[OUT_TENSOR:.+]] = flow.dispatch.tensor.load %[[OUT]]
// CHECK: scf.for
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/swizzle_workgroup.mlir b/compiler/src/iree/compiler/Codegen/Common/test/swizzle_workgroup.mlir
index 73e70bd..441148a 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/swizzle_workgroup.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/swizzle_workgroup.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --iree-workgroup-swizzle='logTile=3' %s | FileCheck %s
-func @matmul() {
+func.func @matmul() {
%c0 = arith.constant 0 : index
%c128 = arith.constant 128 : index
%c96 = arith.constant 96 : index
@@ -28,7 +28,7 @@
return
}
-// CHECK-LABEL: func @matmul
+// CHECK-LABEL: func.func @matmul
// CHECK: %[[WORKGROUPIDX:.*]] = hal.interface.workgroup.id[0] : index
// CHECK: %[[WORKGROUPIDY:.*]] = hal.interface.workgroup.id[1] : index
// CHECK: %[[WORKGROUPCOUNTX:.*]] = hal.interface.workgroup.count[0] : index
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/tile_and_distribute_to_workgroups.mlir b/compiler/src/iree/compiler/Codegen/Common/test/tile_and_distribute_to_workgroups.mlir
index 984937c..969efeb 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/tile_and_distribute_to_workgroups.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/tile_and_distribute_to_workgroups.mlir
@@ -51,7 +51,7 @@
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUTileFuseAndVectorize>
// CHECK: hal.executable.entry_point public @matmul_tensors
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @matmul_tensors()
+// CHECK: func.func @matmul_tensors()
// CHECK-DAG: %[[M:.+]] = hal.interface.constant.load[0]
// CHECK-DAG: %[[N:.+]] = hal.interface.constant.load[1]
// CHECK-DAG: %[[K:.+]] = hal.interface.constant.load[2]
@@ -138,7 +138,7 @@
// CHECK: hal.executable private @add
// CHECK: hal.executable.entry_point public @add
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @add()
+// CHECK: func.func @add()
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: scf.for %[[IV1:.+]] =
// CHECK: %[[RESULT:.+]] = linalg.generic
@@ -198,7 +198,7 @@
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDoubleTilingExpert>
// CHECK: hal.executable.entry_point public @add4D
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @add4D()
+// CHECK: func.func @add4D()
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: scf.for %[[IV1:.+]] =
@@ -255,7 +255,7 @@
}
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUTileFuseAndVectorize>
// CHECK: hal.executable.entry_point public @batch_matmul_tensors
-// CHECK: func @batch_matmul_tensors()
+// CHECK: func.func @batch_matmul_tensors()
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: scf.for %[[IV1:.+]] =
// CHECK: scf.for %[[IV2:.+]] =
@@ -305,7 +305,7 @@
}
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDoubleTilingExpert>
// CHECK: hal.executable.entry_point public @preset_config
-// CHECK: func @preset_config()
+// CHECK: func.func @preset_config()
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: scf.for %[[IV1:.+]] =
// CHECK-DAG: %[[LHS:.+]] = flow.dispatch.tensor.load %{{.+}}, offsets = [%[[IV0]], 0], sizes = [32, 256]
@@ -364,7 +364,7 @@
}
// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 * 64)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 64)>
-// CHECK: func @copy_op()
+// CHECK: func.func @copy_op()
// CHECK-DAG: %[[SOURCE_SIZE_Y:.+]] = hal.interface.constant.load[0] : index
// CHECK-DAG: %[[SOURCE_SIZE_X:.+]] = hal.interface.constant.load[1] : index
// CHECK-DAG: %[[DEST_SIZE_Y:.+]] = hal.interface.constant.load[2] : index
@@ -439,7 +439,7 @@
// CHECK: hal.executable private @static_1d_fft_stage2
// CHECK: hal.executable.entry_point public @static_1d_fft_stage2
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @static_1d_fft_stage2()
+// CHECK: func.func @static_1d_fft_stage2()
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: %[[RESULT:.+]]:2 = iree_linalg_ext.fft
// CHECK-DAG: flow.dispatch.tensor.store %[[RESULT]]#0, %{{.+}}, offsets = [%[[IV0]]]
@@ -479,7 +479,7 @@
// CHECK: hal.executable private @static_3d_fft_stage3
// CHECK: hal.executable.entry_point public @static_3d_fft_stage3
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @static_3d_fft_stage3()
+// CHECK: func.func @static_3d_fft_stage3()
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: scf.for %[[IV1:.+]] =
// CHECK: scf.for %[[IV2:.+]] =
@@ -543,7 +543,7 @@
}
}
}
-// CHECK: func @outs_fusion_fn
+// CHECK: func.func @outs_fusion_fn
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: scf.for %[[IV1:.+]] =
// CHECK: %[[INIT:.+]] = linalg.init_tensor
@@ -607,7 +607,7 @@
// CHECK: hal.executable private @conv
// CHECK: hal.executable.entry_point public @conv
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @conv()
+// CHECK: func.func @conv()
// CHECK: %[[C0:.+]] = arith.constant 0
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: scf.for %[[IV1:.+]] =
@@ -664,7 +664,7 @@
// CHECK: hal.executable private @conv_static
// CHECK: hal.executable.entry_point public @conv_static
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @conv_static()
+// CHECK: func.func @conv_static()
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: scf.for %[[IV1:.+]] =
@@ -720,7 +720,7 @@
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDoubleTilingExpert>
// CHECK: hal.executable private @generic_static
// CHECK: hal.executable.entry_point public @generic_static
-// CHECK: func @generic_static()
+// CHECK: func.func @generic_static()
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: scf.for %[[IV1:.+]] =
// CHECK: %[[RESULT:.+]] = linalg.generic
@@ -875,7 +875,7 @@
// CHECK: hal.executable private @reduction
// CHECK: hal.executable.entry_point public @reduction
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @reduction
+// CHECK: func.func @reduction
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: %[[INIT:.+]] = linalg.init_tensor
// CHECK: %[[FILL:.+]] = linalg.fill
@@ -934,7 +934,7 @@
// CHECK: hal.executable private @gemm_unit_N
// CHECK: hal.executable.entry_point public @gemm_unit_N
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @gemm_unit_N()
+// CHECK: func.func @gemm_unit_N()
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[M:.+]] = hal.interface.constant.load[0]
// CHECK-DAG: %[[WG_ID_X:.+]] = hal.interface.workgroup.id[0]
@@ -993,7 +993,7 @@
// CHECK: hal.executable private @gemm_unit_M_unit_N
// CHECK: hal.executable.entry_point public @gemm_unit_M_unit_N
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @gemm_unit_M_unit_N()
+// CHECK: func.func @gemm_unit_M_unit_N()
// CHECK-NOT: scf.for
// CHECK: %[[GEMM:.+]] = linalg.matmul
// CHECK: flow.dispatch.tensor.store %[[GEMM]], %{{.+}}, offsets = [0, 0]
@@ -1048,7 +1048,7 @@
// CHECK: hal.executable private @generic_unit_dims
// CHECK: hal.executable.entry_point public @generic_unit_dims
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @generic_unit_dims()
+// CHECK: func.func @generic_unit_dims()
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: scf.for %[[IV0:.+]] =
// CHECK: scf.for %[[IV1:.+]] =
@@ -1105,7 +1105,7 @@
// CHECK: hal.executable private @reduce_to_scalar
// CHECK: hal.executable.entry_point public @reduce_to_scalar
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @reduce_to_scalar()
+// CHECK: func.func @reduce_to_scalar()
// CHECK-NOT: scf.for
// -----
@@ -1154,7 +1154,7 @@
// CHECK: hal.executable private @scalar
// CHECK: hal.executable.entry_point public @scalar
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @scalar()
+// CHECK: func.func @scalar()
// CHECK-NOT: scf.for
// -----
@@ -1200,7 +1200,7 @@
}
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 + 10)>
-// CHECK: func @rank_reduced_slice()
+// CHECK: func.func @rank_reduced_slice()
// CHECK-DAG: %[[SRC_BINDING:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-SAME: : !flow.dispatch.tensor<readonly:5x40xf32>
// CHECK-DAG: %[[DST_BINDING:.+]] = hal.interface.binding.subspan set(0) binding(1)
@@ -1262,7 +1262,7 @@
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDoubleTilingExpert>
// CHECK: hal.executable.entry_point public @matmul_interchange
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @matmul_interchange()
+// CHECK: func.func @matmul_interchange()
// CHECK-DAG: %[[D0:.+]] = hal.interface.constant.load[0] : index
// CHECK-DAG: %[[D1:.+]] = hal.interface.constant.load[1] : index
// CHECK: scf.for %{{.+}} = %{{.+}} to %[[D1]] step %{{.+}} {
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/transpose_canonicalization.mlir b/compiler/src/iree/compiler/Codegen/Common/test/transpose_canonicalization.mlir
index 3c81cef..2482f8f 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/transpose_canonicalization.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/transpose_canonicalization.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt %s --iree-codegen-optimize-vector-transfer | FileCheck %s
-// CHECK-LABEL: func @transpose
+// CHECK-LABEL: func.func @transpose
// CHECK-NEXT: vector.shape_cast %{{.*}} : vector<1x1x4xf32> to vector<1x4x1xf32>
func.func @transpose(%arg0: vector<1x1x4xf32>) -> vector<1x4x1xf32> {
%0 = vector.transpose %arg0, [0, 2, 1] : vector<1x1x4xf32> to vector<1x4x1xf32>
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/type_propagation.mlir b/compiler/src/iree/compiler/Codegen/Common/test/type_propagation.mlir
index 0659fae..2881896 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/type_propagation.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/type_propagation.mlir
@@ -18,7 +18,7 @@
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes=[%d], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
return
}
-// CHECK-LABEL: func @generic_op_illegal_operand()
+// CHECK-LABEL: func.func @generic_op_illegal_operand()
// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
@@ -52,7 +52,7 @@
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes=[%d], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
return
}
-// CHECK-LABEL: func @generic_op_illegal_operand_i7()
+// CHECK-LABEL: func.func @generic_op_illegal_operand_i7()
// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
@@ -86,7 +86,7 @@
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes=[%d], strides=[1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:?xi64>{%d}
return
}
-// CHECK-LABEL: func @generic_op_illegal_operand_i33()
+// CHECK-LABEL: func.func @generic_op_illegal_operand_i33()
// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
@@ -121,7 +121,7 @@
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes=[%d], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
return
}
-// CHECK-LABEL: func @generic_op_illegal_result()
+// CHECK-LABEL: func.func @generic_op_illegal_result()
// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
@@ -150,7 +150,7 @@
flow.dispatch.tensor.store %5, %1, offsets = [%offset], sizes=[%size], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
return
}
-// CHECK-LABEL: func @tensor_extract()
+// CHECK-LABEL: func.func @tensor_extract()
// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
@@ -175,7 +175,7 @@
flow.dispatch.tensor.store %8, %2, offsets = [0], sizes=[%d], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
return
}
-// CHECK-LABEL: func @tensor_insert()
+// CHECK-LABEL: func.func @tensor_insert()
// CHECK-DAG: %[[IN1:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[IN2:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(2)
@@ -206,7 +206,7 @@
flow.dispatch.tensor.store %8, %1, offsets=[0], sizes=[%d], strides=[1]: tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
return
}
-// CHECK-LABEL: func @for_loop()
+// CHECK-LABEL: func.func @for_loop()
// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
@@ -230,7 +230,7 @@
flow.dispatch.tensor.store %3, %0, offsets=[0], sizes=[%d], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
return
}
-// CHECK-LABEL: func @fill_op()
+// CHECK-LABEL: func.func @fill_op()
// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(0)
// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor
// CHECK-DAG: %[[FALSE:.+]] = arith.constant false
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/vectorize_linalg_conv.mlir b/compiler/src/iree/compiler/Codegen/Common/test/vectorize_linalg_conv.mlir
index 1d526cb..4ec8bd9 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/vectorize_linalg_conv.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/vectorize_linalg_conv.mlir
@@ -11,7 +11,7 @@
// CHECK: #map1 = affine_map<(d0, d1, d2) -> (d2, d1)>
// CHECK: #map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
-// CHECK: func @vectorize_conv
+// CHECK: func.func @vectorize_conv
// CHECK-SAME: %[[FILTER_SUBVIEW:.+]]: memref<1x1x3x4xf32>,
// CHECK-SAME: %[[INPUT_SUBVIEW:.+]]: memref<1x2x2x3xf32>,
// CHECK-SAME: %[[OUTPUT_SUBVIEW:.+]]: memref<1x2x2x4xf32>
@@ -70,7 +70,7 @@
// -----
-// CHECK-LABEL: func @do_not_vectorize_conv_with_non_1_batch
+// CHECK-LABEL: func.func @do_not_vectorize_conv_with_non_1_batch
func.func @do_not_vectorize_conv_with_non_1_batch(%filter: memref<1x1x4x4xf32>, %input: memref<2x1x7x4xf32>, %output: memref<2x1x4x4xf32>) {
// CHECK: linalg.conv_2d_nhwc_hwcf
linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
@@ -81,7 +81,7 @@
// -----
-// CHECK-LABEL: func @do_not_vectorize_conv_with_non_1_filter_height
+// CHECK-LABEL: func.func @do_not_vectorize_conv_with_non_1_filter_height
func.func @do_not_vectorize_conv_with_non_1_filter_height(%filter: memref<2x1x4x4xf32>, %input: memref<1x2x7x4xf32>, %output: memref<1x1x4x4xf32>) {
// CHECK: linalg.conv_2d_nhwc_hwcf
linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
@@ -92,7 +92,7 @@
// -----
-// CHECK-LABEL: func @do_not_vectorize_conv_with_non_1_filter_width
+// CHECK-LABEL: func.func @do_not_vectorize_conv_with_non_1_filter_width
func.func @do_not_vectorize_conv_with_non_1_filter_width(%filter: memref<1x2x4x4xf32>, %input: memref<1x1x8x4xf32>, %output: memref<1x1x4x4xf32>) {
// CHECK: linalg.conv_2d_nhwc_hwcf
linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
@@ -103,7 +103,7 @@
// -----
-// CHECK-LABEL: func @do_not_vectorize_conv_with_non_1_dilation
+// CHECK-LABEL: func.func @do_not_vectorize_conv_with_non_1_dilation
func.func @do_not_vectorize_conv_with_non_1_dilation(%filter: memref<1x1x4x4xf32>, %input: memref<1x1x7x4xf32>, %output: memref<1x1x4x4xf32>) {
// CHECK: linalg.conv_2d_nhwc_hwcf
linalg.conv_2d_nhwc_hwcf {dilations = dense<[2, 1]> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
@@ -119,7 +119,7 @@
return
}
-// CHECK-LABEL: func @vectorize_depthwise_conv
+// CHECK-LABEL: func.func @vectorize_depthwise_conv
// CHECK-SAME: %[[INPUT_SUBVIEW:.+]]: memref<1x3x3x8xf32>,
// CHECK-SAME: %[[FILTER_SUBVIEW:.+]]: memref<1x1x8xf32>,
// CHECK-SAME: %[[OUTPUT_SUBVIEW:.+]]: memref<1x2x2x8xf32>
@@ -178,7 +178,7 @@
// -----
-// CHECK-LABEL: func @do_not_vectorize_depthwise_conv_with_non_1_filter_height
+// CHECK-LABEL: func.func @do_not_vectorize_depthwise_conv_with_non_1_filter_height
func.func @do_not_vectorize_depthwise_conv_with_non_1_filter_height(%input: memref<1x2x3x4xf32>, %filter: memref<2x1x4xf32>, %output: memref<1x1x2x4xf32>) {
// CHECK: linalg.depthwise_conv_2d_nhwc_hwc
linalg.depthwise_conv_2d_nhwc_hwc {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>}
@@ -189,7 +189,7 @@
// -----
-// CHECK-LABEL: func @do_not_vectorize_depthwise_conv_with_non_1_filter_width
+// CHECK-LABEL: func.func @do_not_vectorize_depthwise_conv_with_non_1_filter_width
func.func @do_not_vectorize_depthwise_conv_with_non_1_filter_width(%input: memref<1x1x4x4xf32>, %filter: memref<1x2x4xf32>, %output: memref<1x1x2x4xf32>) {
// CHECK: linalg.depthwise_conv_2d_nhwc_hwc
linalg.depthwise_conv_2d_nhwc_hwc {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>}
@@ -207,7 +207,7 @@
return %0 : tensor<1x2x2x4xf32>
}
-// CHECK-LABEL: func @vectorize_conv
+// CHECK-LABEL: func.func @vectorize_conv
// CHECK-SAME: %[[FILTER_TENSOR:.+]]: tensor<1x1x3x4xf32>,
// CHECK-SAME: %[[INPUT_TENSOR:.+]]: tensor<1x2x2x3xf32>,
// CHECK-SAME: %[[INIT_TENSOR:.+]]: tensor<1x2x2x4xf32>
@@ -244,7 +244,7 @@
return %0 : tensor<1x2x2x8xf32>
}
-// CHECK-LABEL: func @vectorize_depthwise_conv
+// CHECK-LABEL: func.func @vectorize_depthwise_conv
// CHECK-SAME: %[[INPUT_TENSOR:.+]]: tensor<1x3x3x8xf32>,
// CHECK-SAME: %[[FILTER_TENSOR:.+]]: tensor<1x1x8xf32>,
// CHECK-SAME: %[[INIT_TENSOR:.+]]: tensor<1x2x2x8xf32>
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/vectorize_linalg_mmt4d.mlir b/compiler/src/iree/compiler/Codegen/Common/test/vectorize_linalg_mmt4d.mlir
index 0161a62..f6a335d 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/vectorize_linalg_mmt4d.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/vectorize_linalg_mmt4d.mlir
@@ -8,7 +8,7 @@
// CHECK: #[[MAP0:.+]] = affine_map<([[D0:.*]], [[D1:.*]], [[D2:.*]]) -> ([[D0]], [[D2]])>
// CHECK: #[[MAP1:.+]] = affine_map<([[D0]], [[D1]], [[D2]]) -> ([[D1]], [[D2]])>
// CHECK: #[[MAP2:.+]] = affine_map<([[D0]], [[D1]], [[D2]]) -> ([[D0]], [[D1]])>
-// CHECK: func @tiled_mmt4d_4x4x4_f32(
+// CHECK: func.func @tiled_mmt4d_4x4x4_f32(
// CHECK-SAME: %[[LHS:[[:alnum:]]+]]: tensor<1x1x4x4xf32>
// CHECK-SAME: %[[RHS:[[:alnum:]]+]]: tensor<1x1x4x4xf32>
// CHECK-SAME: %[[ACC:[[:alnum:]]+]]: tensor<1x1x4x4xf32>
@@ -35,7 +35,7 @@
// CHECK: #[[MAP0:.+]] = affine_map<([[D0:.*]], [[D1:.*]], [[D2:.*]]) -> ([[D0]], [[D2]])>
// CHECK: #[[MAP1:.+]] = affine_map<([[D0]], [[D1]], [[D2]]) -> ([[D1]], [[D2]])>
// CHECK: #[[MAP2:.+]] = affine_map<([[D0]], [[D1]], [[D2]]) -> ([[D0]], [[D1]])>
-// CHECK: func @tiled_mmt4d_8x2x4_i8(
+// CHECK: func.func @tiled_mmt4d_8x2x4_i8(
// CHECK-SAME: %[[LHS:[[:alnum:]]+]]: tensor<1x1x8x2xi8>
// CHECK-SAME: %[[RHS:[[:alnum:]]+]]: tensor<1x1x4x2xi8>
// CHECK-SAME: %[[ACC:[[:alnum:]]+]]: tensor<1x1x8x4xi32>
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/check_ir_before_llvm_conversion.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/check_ir_before_llvm_conversion.mlir
index 7b7af56..e5c51f2 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/check_ir_before_llvm_conversion.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/check_ir_before_llvm_conversion.mlir
@@ -23,7 +23,7 @@
#map = affine_map<(d0) -> (-d0, 16384)>
// expected-error @+1 {{expected total size of stack allocation is not greater than 32 KB, but got 65536 bytes}}
module {
- func @dynamic_big_allocas(%arg0: index) {
+ func.func @dynamic_big_allocas(%arg0: index) {
%0 = affine.min #map(%arg0)
%1 = memref.alloca(%0) : memref<?xf32>
return
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir
index fa6b0f2..5c11813 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir
@@ -139,7 +139,7 @@
translation_info = #translation
}
builtin.module {
- func @illegal() {
+ func.func @illegal() {
%c0 = arith.constant 0 : index
%lhs = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<4x8xf32>
%rhs = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<8x16xf32>
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/materialize_vmvx_launch_configuration.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/materialize_vmvx_launch_configuration.mlir
index 48945d4..d57eca3 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/materialize_vmvx_launch_configuration.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/materialize_vmvx_launch_configuration.mlir
@@ -114,6 +114,6 @@
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDefault>
// CHECK: hal.executable.entry_point public @static_1d_fft_stage2
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @static_1d_fft_stage2()
+// CHECK: func.func @static_1d_fft_stage2()
// CHECK: iree_linalg_ext.fft
// CHECK-SAME: lowering_config = #[[CONFIG]]
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/materialize_x86_64_launch_configuration.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/materialize_x86_64_launch_configuration.mlir
index c79af34..a0be14e 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/materialize_x86_64_launch_configuration.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/materialize_x86_64_launch_configuration.mlir
@@ -369,7 +369,7 @@
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDoubleTilingExpert>
// CHECK: hal.executable.entry_point
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @preset_config
+// CHECK: func.func @preset_config
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -449,7 +449,7 @@
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDefault>
// CHECK: hal.executable.entry_point public @static_1d_fft_stage2
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @static_1d_fft_stage2()
+// CHECK: func.func @static_1d_fft_stage2()
// CHECK: iree_linalg_ext.fft
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -486,7 +486,7 @@
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDefault>
// CHECK: hal.executable.entry_point public @static_3d_fft_stage3
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @static_3d_fft_stage3()
+// CHECK: func.func @static_3d_fft_stage3()
// CHECK: iree_linalg_ext.fft
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -548,7 +548,7 @@
// CHECK: #[[TRANSLATION:.+]] = #iree_codegen.translation_info<CPUDoubleTilingExpert>
// CHECK: hal.executable.entry_point public @outs_fusion_fn
// CHECK-SAME: translation_info = #[[TRANSLATION]]
-// CHECK: func @outs_fusion_fn()
+// CHECK: func.func @outs_fusion_fn()
// CHECK: linalg.generic
// CHECK-NOT: lowering_config
// CHECK: linalg.generic
@@ -1247,7 +1247,7 @@
hal.executable.variant @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64_ {
hal.executable.entry_point @transpose_8x8 layout(#executable_layout)
builtin.module {
- func @transpose_8x8() {
+ func.func @transpose_8x8() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:512x1024xf32>
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/pipeline_tests.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/pipeline_tests.mlir
index ada40c4..3588736 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/pipeline_tests.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/pipeline_tests.mlir
@@ -23,7 +23,7 @@
hal.executable.variant public @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64_ {
hal.executable.entry_point public @check_no_cse ordinal(0) layout(#executable_layout5)
builtin.module {
- func @check_no_cse() {
+ func.func @check_no_cse() {
%cst = arith.constant 3.840000e+02 : f32
%cst_0 = arith.constant 0.000000e+00 : f32
%0 = hal.interface.constant.load[0] : i32
@@ -51,7 +51,7 @@
}
}
}
-// CHECK: func @check_no_cse()
+// CHECK: func.func @check_no_cse()
// CHECK-NOT: memref.alloc
// CHECK: %[[FOR:.+]] = scf.for
// CHECK: %[[DIVF:.+]] = arith.divf %[[FOR]]
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/test_config_mmt4d.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/test_config_mmt4d.mlir
index efb9862..fd03197 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/test_config_mmt4d.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/test_config_mmt4d.mlir
@@ -40,6 +40,6 @@
}
// CHECK-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering_config<tile_sizes = {{\[}}[48, 32], [1, 1, 1, 4, 4, 1], [1, 1, 1, 4, 4, 1]{{\]}}
-// CHECK: func @mmt4d_384x384x512_4x1x4_dispatch_0()
+// CHECK: func.func @mmt4d_384x384x512_4x1x4_dispatch_0()
// CHECK: linalg.mmt4d
// CHECK-SAME: lowering_config = #[[CONFIG]]
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/tile_fuse_and_vectorize.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/tile_fuse_and_vectorize.mlir
index 3fb9ea5..a8255a9 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/tile_fuse_and_vectorize.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/tile_fuse_and_vectorize.mlir
@@ -41,7 +41,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 * 64)>
-// CHECK: func @dot_384x512x128_dispatch_0() {
+// CHECK: func.func @dot_384x512x128_dispatch_0() {
// CHECK-DAG: %[[CST:.+]] = arith.constant 0.000000e+00 : f32
// CHECK-DAG: %[[CST_VECTOR:.+]] = arith.constant dense<0.000000e+00> : vector<16x16xf32>
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -130,7 +130,7 @@
return
}
-// CHECK: func @matmul_gather() {
+// CHECK: func.func @matmul_gather() {
// Check that matmul is lowered to vector ops
// CHECK-NOT: linalg.matmul
// CHECK: vector.outerproduct
@@ -201,7 +201,7 @@
return
}
-// CHECK: func @nonvectorizable_matmul_and_vectorizable_generic
+// CHECK: func.func @nonvectorizable_matmul_and_vectorizable_generic
// Verify that both matmul and generic ops are not vectorized.
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C16:.+]] = arith.constant 16 : index
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/transpose_avx2_lowering.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/transpose_avx2_lowering.mlir
index a09cd09..12a69e5 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/transpose_avx2_lowering.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/transpose_avx2_lowering.mlir
@@ -20,7 +20,7 @@
hal.executable.variant @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64_ {
hal.executable.entry_point @transpose_10_8x8_pattern layout(#executable_layout)
builtin.module {
- func @transpose_10_8x8_pattern() {
+ func.func @transpose_10_8x8_pattern() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:512x1024xf32>
@@ -41,7 +41,7 @@
}
}
-// CHECK-LABEL: func @transpose_10_8x8_pattern
+// CHECK-LABEL: func.func @transpose_10_8x8_pattern
// CHECK-COUNT-8: vector.load
// CHECK-NOT: vector.extract
// CHECK-NOT: vector.insert
@@ -74,7 +74,7 @@
hal.executable.variant @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64_ {
hal.executable.entry_point @transpose_021_8x8_pattern layout(#executable_layout)
builtin.module {
- func @transpose_021_8x8_pattern() {
+ func.func @transpose_021_8x8_pattern() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:64x96x128xf32>
@@ -95,7 +95,7 @@
}
}
-// CHECK-LABEL: func @transpose_021_8x8_pattern
+// CHECK-LABEL: func.func @transpose_021_8x8_pattern
// CHECK-COUNT-8: vector.load
// CHECK-NOT: vector.extract
// CHECK-NOT: vector.insert
@@ -128,7 +128,7 @@
hal.executable.variant @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64_ {
hal.executable.entry_point @transpose_201_8x8_pattern layout(#executable_layout)
builtin.module {
- func @transpose_201_8x8_pattern() {
+ func.func @transpose_201_8x8_pattern() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:64x96x128xf32>
@@ -149,7 +149,7 @@
}
}
-// CHECK-LABEL: func @transpose_201_8x8_pattern
+// CHECK-LABEL: func.func @transpose_201_8x8_pattern
// CHECK-COUNT-8: vector.load
// CHECK-NOT: vector.extract
// CHECK-NOT: vector.insert
@@ -182,7 +182,7 @@
hal.executable.variant @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64_ {
hal.executable.entry_point @transpose_210_8x8_pattern layout(#executable_layout)
builtin.module {
- func @transpose_210_8x8_pattern() {
+ func.func @transpose_210_8x8_pattern() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:64x96x128xf32>
@@ -203,7 +203,7 @@
}
}
-// CHECK-LABEL: func @transpose_210_8x8_pattern
+// CHECK-LABEL: func.func @transpose_210_8x8_pattern
// CHECK-COUNT-8: vector.load
// CHECK-NOT: vector.extract
// CHECK-NOT: vector.insert
@@ -236,7 +236,7 @@
hal.executable.variant @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64_ {
hal.executable.entry_point @transpose_120_8x8_pattern layout(#executable_layout)
builtin.module {
- func @transpose_120_8x8_pattern() {
+ func.func @transpose_120_8x8_pattern() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:64x96x128xf32>
@@ -257,7 +257,7 @@
}
}
-// CHECK-LABEL: func @transpose_120_8x8_pattern
+// CHECK-LABEL: func.func @transpose_120_8x8_pattern
// CHECK-COUNT-8: vector.load
// CHECK-NOT: vector.extract
// CHECK-NOT: vector.insert
@@ -290,7 +290,7 @@
hal.executable.variant @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64_ {
hal.executable.entry_point @transpose_102 layout(#executable_layout)
builtin.module {
- func @transpose_102() {
+ func.func @transpose_102() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:64x96x128xf32>
@@ -311,7 +311,7 @@
}
}
-// CHECK-LABEL: func @transpose_102
+// CHECK-LABEL: func.func @transpose_102
// CHECK-NOT: vector.shuffle %{{.*}}, %{{.*}} [0, 8, 1, 9, 4, 12, 5, 13] : vector<8xf32>, vector<8xf32>
// CHECK-NOT: llvm.inline_asm asm_dialect = intel "vblendps $0, $1, $2, 0xcc", "=x,x,x" %{{.*}}, %{{.*}} : (vector<8xf32>, vector<8xf32>) -> vector<8xf32>
// -----
@@ -336,7 +336,7 @@
hal.executable.variant @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64_ {
hal.executable.entry_point @test_no_avx2_feature layout(#executable_layout)
builtin.module {
- func @test_no_avx2_feature() {
+ func.func @test_no_avx2_feature() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:512x1024xf32>
@@ -357,6 +357,6 @@
}
}
-// CHECK-LABEL: func @test_no_avx2_feature
+// CHECK-LABEL: func.func @test_no_avx2_feature
// CHECK-NOT: vector.shuffle %{{.*}}, %{{.*}} [0, 8, 1, 9, 4, 12, 5, 13] : vector<8xf32>, vector<8xf32>
// CHECK-NOT: llvm.inline_asm asm_dialect = intel "vblendps $0, $1, $2, 0xcc", "=x,x,x" %{{.*}}, %{{.*}} : (vector<8xf32>, vector<8xf32>) -> vector<8xf32>
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/unfused_fma.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/unfused_fma.mlir
index ce3f592..2a619bb 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/unfused_fma.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/unfused_fma.mlir
@@ -5,7 +5,7 @@
return %0 : f32
}
-// CHECK: func @fma_unfused(%[[A:.+]]: f32, %[[B:.+]]: f32, %[[C:.+]]: f32)
+// CHECK: func.func @fma_unfused(%[[A:.+]]: f32, %[[B:.+]]: f32, %[[C:.+]]: f32)
// CHECK: %[[MUL:.+]] = llvm.fmul %[[A]], %[[B]]
// CHECK: %[[RES:.+]] = llvm.fadd %[[MUL]], %[[C]]
// CHECK: return %[[RES]]
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/vector_contract_to_arm_asm.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/vector_contract_to_arm_asm.mlir
index 3bd60cd..1f50583 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/vector_contract_to_arm_asm.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/vector_contract_to_arm_asm.mlir
@@ -24,7 +24,7 @@
} %lhs_wide, %rhs_wide, %acc : vector<8x4xi32>, vector<8x4xi32> into vector<8x8xi32>
return %res : vector<8x8xi32>
}
-// AARCH64-DOTPROD-LABEL: func @check_in_depth_mmt_8x4x8_i8i8i32(
+// AARCH64-DOTPROD-LABEL: func.func @check_in_depth_mmt_8x4x8_i8i8i32(
// AARCH64-DOTPROD-SAME: %[[LHS:[^:[:space:]]+]]
// AARCH64-DOTPROD-SAME: %[[RHS:[^:[:space:]]+]]
// AARCH64-DOTPROD-SAME: %[[ACC:[^:[:space:]]+]]
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/verify_linalg_transform_legality.mlir b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/verify_linalg_transform_legality.mlir
index 3690cbe..4683cbd 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/test/verify_linalg_transform_legality.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/test/verify_linalg_transform_legality.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --iree-llvmcpu-verify-linalg-transform-legality %s --verify-diagnostics -split-input-file
-func @matmul_123x456xf32_times_456x789xf32_into_123x789xf32_dispatch_0() {
+func.func @matmul_123x456xf32_times_456x789xf32_into_123x789xf32_dispatch_0() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(64) : !flow.dispatch.tensor<readonly:123x4x114xf32>
diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir
index 1d526ae..e9bfac0 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir
@@ -350,7 +350,7 @@
}
// Check that the convolution is distributed.
-// CHECK-LABEL: func @conv_dispatch
+// CHECK-LABEL: func.func @conv_dispatch
// CHECK: scf.for
// CHECK: scf.for
// CHECK: scf.for
diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir
index 1aec990..a78716f 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir
@@ -36,7 +36,7 @@
// CHECK: hal.executable.entry_point public @add_dispatch_0
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [64 : index, 1 : index, 1 : index]
-// CHECK: func @add_dispatch_0
+// CHECK: func.func @add_dispatch_0
// CHECK: linalg.generic
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -74,7 +74,7 @@
// CHECK: hal.executable.entry_point public @dot_dispatch_1
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [2 : index, 4 : index, 1 : index]
-// CHECK: func @dot_dispatch_1
+// CHECK: func.func @dot_dispatch_1
// CHECK: linalg.fill
// CHECK-SAME: lowering_config = #[[CONFIG]]
// CHECK: linalg.matmul
diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/legalize.mlir b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/legalize.mlir
index 64aecd8..d1b5b6c 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/legalize.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/legalize.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --iree-test-llvmgpu-legalize-ops --split-input-file %s | FileCheck %s
-// CHECK-LABEL: func @scalarize
+// CHECK-LABEL: func.func @scalarize
func.func @scalarize(
%arg0: vector<3x1x2xf32>,%arg1: vector<2xf32>, %arg2: vector<2xf32>)
-> (vector<3x1x2xf32>, vector<2xf32>) {
@@ -38,7 +38,7 @@
// -----
// CHECK: memref.global "private" @__shared_memory__ : memref<16x16xf32, 3>
-// CHECK: func @allocation
+// CHECK: func.func @allocation
// CHECK: %[[A:.*]] = memref.get_global @__shared_memory__ : memref<16x16xf32, 3>
// CHECK: memref.store %{{.*}}, %[[A]][%{{.*}}, %{{.*}}] : memref<16x16xf32, 3>
func.func @allocation(%arg0: f32) {
diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/reduce_bank_conflicts.mlir b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/reduce_bank_conflicts.mlir
index 195a8dc..6d7bfe7 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/reduce_bank_conflicts.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/reduce_bank_conflicts.mlir
@@ -3,7 +3,7 @@
#map = affine_map<(d0, d1, d2) -> (d0 * 2048 + d1 * 64 + d2)>
// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0, d1, d2) -> (d0 * 2176 + d1 * 68 + d2)>
-// CHECK-LABEL: func @pad_alloc
+// CHECK-LABEL: func.func @pad_alloc
func.func @pad_alloc(%a: memref<1024x1024xf32>) {
// CHECK: %[[A:.*]] = memref.alloc() : memref<4x32x68xf32, 3>
%0 = memref.alloc() : memref<4x32x64xf32, 3>
diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/tensorcore_vectorization.mlir b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/tensorcore_vectorization.mlir
index 127138a..425e406 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/tensorcore_vectorization.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/tensorcore_vectorization.mlir
@@ -33,7 +33,7 @@
return
}
-// CHECK-LABEL: func @dot
+// CHECK-LABEL: func.func @dot
// CHECK-COUNT-4: vector.transfer_write {{.*}} : vector<16x16xf32>, memref<32x32xf32
// CHECK: scf.for
// CHECK-COUNT-4: vector.transfer_read {{.*}} {in_bounds = [true, true]} : memref<32x16xf32, #{{.*}}>, vector<16x8xf32>
diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/vector_to_gpu.mlir b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/vector_to_gpu.mlir
index 6f006f0..f043399 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/vector_to_gpu.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/vector_to_gpu.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt %s --allow-unregistered-dialect --iree-llvmgpu-vector-to-gpu --canonicalize --split-input-file | FileCheck %s
-// CHECK-LABEL: func @copies_to_asyncs
+// CHECK-LABEL: func.func @copies_to_asyncs
func.func @copies_to_asyncs(%a: memref<1024x1024xf32>) {
%0 = memref.alloc() : memref<4x32x16xf32, 3>
%c0 = arith.constant 0 : index
@@ -30,7 +30,7 @@
return %0 : vector<16x1x8xf32>
}
// CHECK-DAG:#[[$MAP:.*]] = affine_map<(d0, d1) -> (d0 * 4096 + d1 + 8964)>
-// CHECK-LABEL: func @ksplitmatmul_basic
+// CHECK-LABEL: func.func @ksplitmatmul_basic
// CHECK-DAG: %[[ID:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[M:.*]] = memref.subview
@@ -52,7 +52,7 @@
%0 = vector.transfer_read %a[%c2, %c3, %c4], %cst {in_bounds = [true, true, true]} : memref<128x16x256xf32>, vector<16x2x8xf32>
return %0 : vector<16x2x8xf32>
}
-// CHECK-LABEL: func @ksplitmatmul_nounitdim
+// CHECK-LABEL: func.func @ksplitmatmul_nounitdim
// CHECK-DAG: %[[ID:.*]] = arith.constant 2 : index
// CHECK-DAG: %[[ID2:.*]] = arith.constant 3 : index
// CHECK-DAG: %[[ID3:.*]] = arith.constant 4 : index
@@ -73,7 +73,7 @@
return %0 : vector<16x1x1x8xf32>
}
// CHECK-DAG:#[[$MAP:.*]] = affine_map<(d0, d1) -> (d0 * 131072 + d1 + 287749)>
-// CHECK-LABEL: func @ksplitmatmul_4D
+// CHECK-LABEL: func.func @ksplitmatmul_4D
// CHECK-DAG: %[[ID:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[M:.*]] = memref.subview
@@ -97,7 +97,7 @@
return %0 : vector<16x1x8xf32>
}
// CHECK-DAG:#[[$MAP:.*]] = affine_map<(d0, d1) -> (d0 * 8192 + d1 + 8414213)>
-// CHECK-LABEL: func @ksplitmatmul_4D_lower_rank_read
+// CHECK-LABEL: func.func @ksplitmatmul_4D_lower_rank_read
// CHECK-DAG: %[[ID:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[M:.*]] = memref.subview
@@ -121,7 +121,7 @@
return %0 : vector<16x1x8x1xf32>
}
-// CHECK-LABEL: func @ksplitmatmul_4D_negative
+// CHECK-LABEL: func.func @ksplitmatmul_4D_negative
// CHECK-DAG: %[[ID:.*]] = arith.constant 2 : index
// CHECK-DAG: %[[ID2:.*]] = arith.constant 3 : index
// CHECK-DAG: %[[ID3:.*]] = arith.constant 4 : index
@@ -144,7 +144,7 @@
}
// CHECK-DAG:#[[$MAP:.*]] = affine_map<(d0, d1) -> (d0 * 256 + d1 + 287749)>
-// CHECK-LABEL: func @ksplitmatmul_4D_allone
+// CHECK-LABEL: func.func @ksplitmatmul_4D_allone
// CHECK-DAG: %[[ID:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[M:.*]] = memref.subview
diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/vectorization.mlir b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/vectorization.mlir
index 42df78f..37f46e2 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMGPU/test/vectorization.mlir
+++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/test/vectorization.mlir
@@ -14,7 +14,7 @@
}
return
}
-// CHECK-LABEL: func @add_dispatch_0
+// CHECK-LABEL: func.func @add_dispatch_0
// CHECK-COUNT-8: vector.transfer_read {{.*}} : memref<1x8x4xf32>, vector<1x1x4xf32>
// CHECK-COUNT-8: vector.transfer_read {{.*}} : memref<1x4x8xf32>, vector<1x1x4xf32>
// CHECK-COUNT-8: addf %{{.*}}, %{{.*}} : vector<1x1x4xf32>
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_adreno_conv.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_adreno_conv.mlir
index bbf5a91..f1c2204 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_adreno_conv.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_adreno_conv.mlir
@@ -48,7 +48,7 @@
// CHECK: hal.executable.entry_point public @conv_112x112x512
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [64 : index, 1 : index, 1 : index]
-// CHECK: func @conv_112x112x512()
+// CHECK: func.func @conv_112x112x512()
// CHECK: linalg.conv_2d_nhwc_hwcf
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -102,7 +102,7 @@
// CHECK: hal.executable.entry_point public @conv_112x112x32
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [8 : index, 8 : index, 1 : index]
-// CHECK: func @conv_112x112x32()
+// CHECK: func.func @conv_112x112x32()
// CHECK: linalg.conv_2d_nhwc_hwcf
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -156,7 +156,7 @@
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [4 : index, 4 : index, 4 : index]
-// CHECK: func @conv_16x16x16()
+// CHECK: func.func @conv_16x16x16()
// CHECK: linalg.conv_2d_nhwc_hwcf
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -210,7 +210,7 @@
// CHECK: hal.executable.entry_point public @dwconv_28x28x144
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [4 : index, 4 : index, 4 : index]
-// CHECK: func @dwconv_28x28x144()
+// CHECK: func.func @dwconv_28x28x144()
// CHECK: linalg.depthwise_conv_2d_nhwc_hwc
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -263,6 +263,6 @@
// CHECK: hal.executable.entry_point public @dwconv_4x4x8
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [2 : index, 4 : index, 4 : index]
-// CHECK: func @dwconv_4x4x8()
+// CHECK: func.func @dwconv_4x4x8()
// CHECK: linalg.depthwise_conv_2d_nhwc_hwc
// CHECK-SAME: lowering_config = #[[CONFIG]]
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_adreno_matmul.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_adreno_matmul.mlir
index 32ebbba..a5556d4 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_adreno_matmul.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_adreno_matmul.mlir
@@ -48,7 +48,7 @@
// CHECK: hal.executable.entry_point public @matmul_1024x2048x512
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [32 : index, 2 : index, 1 : index]
-// CHECK: func @matmul_1024x2048x512()
+// CHECK: func.func @matmul_1024x2048x512()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -102,7 +102,7 @@
// CHECK: hal.executable.entry_point public @matmul_3136x24x96
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [2 : index, 32 : index, 1 : index]
-// CHECK: func @matmul_3136x24x96()
+// CHECK: func.func @matmul_3136x24x96()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -156,7 +156,7 @@
// CHECK: hal.executable.entry_point public @matmul_196x64x192
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [16 : index, 4 : index, 1 : index]
-// CHECK: func @matmul_196x64x192()
+// CHECK: func.func @matmul_196x64x192()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -202,7 +202,7 @@
// CHECK: hal.executable.entry_point public @matmul_12544x96x16
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [8 : index, 8 : index, 1 : index]
-// CHECK: func @matmul_12544x96x16()
+// CHECK: func.func @matmul_12544x96x16()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -254,7 +254,7 @@
// CHECK: hal.executable.entry_point public @matmul_49x160x576
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [8 : index, 1 : index, 1 : index]
-// CHECK: func @matmul_49x160x576()
+// CHECK: func.func @matmul_49x160x576()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -308,7 +308,7 @@
// CHECK: hal.executable.entry_point public @batch_matmul_4x384x384
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [32 : index, 2 : index, 1 : index]
-// CHECK: func @batch_matmul_4x384x384()
+// CHECK: func.func @batch_matmul_4x384x384()
// CHECK: linalg.batch_matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -362,6 +362,6 @@
// CHECK: hal.executable.entry_point public @batch_matmul_4x8x8
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [2 : index, 8 : index, 1 : index]
-// CHECK: func @batch_matmul_4x8x8()
+// CHECK: func.func @batch_matmul_4x8x8()
// CHECK: linalg.batch_matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_conv.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_conv.mlir
index d52eb7b..a72b44f 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_conv.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_conv.mlir
@@ -67,6 +67,6 @@
// CHECK: hal.executable.entry_point public @conv_pointwise_112x112x32
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [8 : index, 2 : index, 2 : index]
-// CHECK: func @conv_pointwise_112x112x32()
+// CHECK: func.func @conv_pointwise_112x112x32()
// CHECK: linalg.conv_2d_nhwc_hwcf
// CHECK-SAME: lowering_config = #[[CONFIG]]
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir
index 6cb3e52..28dff4b 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir
@@ -38,7 +38,7 @@
// CHECK: hal.executable.entry_point public @static_1d_sort
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [1 : index, 1 : index, 1 : index]
-// CHECK: func @static_1d_sort()
+// CHECK: func.func @static_1d_sort()
// CHECK: iree_linalg_ext.sort
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -87,7 +87,7 @@
// CHECK: hal.executable.entry_point public @static_3d_sort
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [16 : index, 1 : index, 1 : index]
-// CHECK: func @static_3d_sort()
+// CHECK: func.func @static_3d_sort()
// CHECK: iree_linalg_ext.sort
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -132,7 +132,7 @@
// CHECK: hal.executable.entry_point public @static_1d_fft_stage2
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [16 : index, 1 : index, 1 : index]
-// CHECK: func @static_1d_fft_stage2()
+// CHECK: func.func @static_1d_fft_stage2()
// CHECK: iree_linalg_ext.fft
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -181,6 +181,6 @@
// CHECK: hal.executable.entry_point public @static_3d_fft_stage3
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [16 : index, 1 : index, 1 : index]
-// CHECK: func @static_3d_fft_stage3()
+// CHECK: func.func @static_3d_fft_stage3()
// CHECK: iree_linalg_ext.fft
// CHECK-SAME: lowering_config = #[[CONFIG]]
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_matmul.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_matmul.mlir
index efe2d1e..c873e10 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_matmul.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_default_matmul.mlir
@@ -49,7 +49,7 @@
// CHECK: hal.executable.entry_point public @batch_matmul_1x3x32
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [32 : index, 1 : index, 1 : index]
-// CHECK: func @batch_matmul_1x3x32()
+// CHECK: func.func @batch_matmul_1x3x32()
// CHECK: linalg.batch_matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -103,7 +103,7 @@
// CHECK: hal.executable.entry_point public @matmul_64x16
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [16 : index, 4 : index, 1 : index]
-// CHECK: func @matmul_64x16()
+// CHECK: func.func @matmul_64x16()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -168,7 +168,7 @@
// CHECK: hal.executable.entry_point public @matmul_400x273
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [32 : index, 2 : index, 1 : index]
-// CHECK: func @matmul_400x273()
+// CHECK: func.func @matmul_400x273()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -234,7 +234,7 @@
// CHECK: hal.executable.entry_point public @matmul_25x546
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [2 : index, 32 : index, 1 : index]
-// CHECK: func @matmul_25x546()
+// CHECK: func.func @matmul_25x546()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -310,6 +310,6 @@
// CHECK: hal.executable.entry_point public @matmul_pointwise_256x1024
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [32 : index, 2 : index, 1 : index]
-// CHECK: func @matmul_pointwise_256x1024()
+// CHECK: func.func @matmul_pointwise_256x1024()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_mali_conv.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_mali_conv.mlir
index 09328d7..6e11ef5 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_mali_conv.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_mali_conv.mlir
@@ -49,7 +49,7 @@
// CHECK: hal.executable.entry_point public @conv_112x112x512
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [16 : index, 1 : index, 1 : index]
-// CHECK: func @conv_112x112x512()
+// CHECK: func.func @conv_112x112x512()
// CHECK: linalg.conv_2d_nhwc_hwcf
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -103,7 +103,7 @@
// CHECK: hal.executable.entry_point public @conv_112x112x32
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [8 : index, 2 : index, 1 : index]
-// CHECK: func @conv_112x112x32()
+// CHECK: func.func @conv_112x112x32()
// CHECK: linalg.conv_2d_nhwc_hwcf
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -156,7 +156,7 @@
// CHECK: hal.executable.entry_point public @conv_16x16x16
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [4 : index, 2 : index, 2 : index]
-// CHECK: func @conv_16x16x16()
+// CHECK: func.func @conv_16x16x16()
// CHECK: linalg.conv_2d_nhwc_hwcf
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -210,7 +210,7 @@
// CHECK: hal.executable.entry_point public @dwconv_28x28x144
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [4 : index, 2 : index, 2 : index]
-// CHECK: func @dwconv_28x28x144()
+// CHECK: func.func @dwconv_28x28x144()
// CHECK: linalg.depthwise_conv_2d_nhwc_hwc
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -265,6 +265,6 @@
// CHECK: hal.executable.entry_point public @dwconv_1x2x8
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [2 : index, 2 : index, 1 : index]
-// CHECK: func @dwconv_1x2x8()
+// CHECK: func.func @dwconv_1x2x8()
// CHECK: linalg.depthwise_conv_2d_nhwc_hwc
// CHECK-SAME: lowering_config = #[[CONFIG]]
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_mali_matmul.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_mali_matmul.mlir
index ba46201..ed62788 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_mali_matmul.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_mali_matmul.mlir
@@ -46,7 +46,7 @@
// CHECK: hal.executable.entry_point public @matmul_1024x2048x512
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [8 : index, 2 : index, 1 : index]
-// CHECK: func @matmul_1024x2048x512()
+// CHECK: func.func @matmul_1024x2048x512()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -99,7 +99,7 @@
// CHECK: hal.executable.entry_point public @matmul_3136x24x96
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [2 : index, 8 : index, 1 : index]
-// CHECK: func @matmul_3136x24x96()
+// CHECK: func.func @matmul_3136x24x96()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -153,7 +153,7 @@
// CHECK: hal.executable.entry_point public @matmul_196x64x192
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [8 : index, 2 : index, 1 : index]
-// CHECK: func @matmul_196x64x192()
+// CHECK: func.func @matmul_196x64x192()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -200,7 +200,7 @@
// CHECK: hal.executable.entry_point public @matmul_12544x96x16
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [8 : index, 2 : index, 1 : index]
-// CHECK: func @matmul_12544x96x16()
+// CHECK: func.func @matmul_12544x96x16()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -254,7 +254,7 @@
// CHECK: hal.executable.entry_point public @matmul_49x160x576
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [8 : index, 1 : index, 1 : index]
-// CHECK: func @matmul_49x160x576()
+// CHECK: func.func @matmul_49x160x576()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -280,7 +280,7 @@
}> {
hal.executable.entry_point @matmul_1x1024x576 layout(#executable_layout)
builtin.module {
- func @matmul_1x1024x576() {
+ func.func @matmul_1x1024x576() {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant 3.000000e+00 : f32
%cst_1 = arith.constant 6.000000e+00 : f32
@@ -311,7 +311,7 @@
// CHECK: hal.executable.entry_point public @matmul_1x1024x576
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [64 : index, 1 : index, 1 : index]
-// CHECK: func @matmul_1x1024x576()
+// CHECK: func.func @matmul_1x1024x576()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -366,7 +366,7 @@
// CHECK: hal.executable.entry_point public @batch_matmul_4x384x384
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [8 : index, 2 : index, 1 : index]
-// CHECK: func @batch_matmul_4x384x384()
+// CHECK: func.func @batch_matmul_4x384x384()
// CHECK: linalg.batch_matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
@@ -421,6 +421,6 @@
// CHECK: hal.executable.entry_point public @batch_matmul_4x2x8
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [2 : index, 2 : index, 1 : index]
-// CHECK: func @batch_matmul_4x2x8()
+// CHECK: func.func @batch_matmul_4x2x8()
// CHECK: linalg.batch_matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_nvidia_matmul_cooperative_ops.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_nvidia_matmul_cooperative_ops.mlir
index d6ddfe5..f40c790 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/config_nvidia_matmul_cooperative_ops.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/config_nvidia_matmul_cooperative_ops.mlir
@@ -81,7 +81,7 @@
// CHECK: hal.executable.entry_point public @matmul_256x1024x128_div_sub
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK-SAME: workgroup_size = [32 : index, 1 : index, 1 : index]
-// CHECK: func @matmul_256x1024x128_div_sub()
+// CHECK: func.func @matmul_256x1024x128_div_sub()
// CHECK: linalg.matmul
// CHECK-SAME: lowering_config = #[[CONFIG]]
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/create_fast_slow_path.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/create_fast_slow_path.mlir
index 8062bfc..8f065df 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/create_fast_slow_path.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/create_fast_slow_path.mlir
@@ -52,7 +52,7 @@
return
}
-// CHECK-LABEL: func @padded_conv
+// CHECK-LABEL: func.func @padded_conv
// CHECK: %[[C0:.+]] = arith.constant 0 : index
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/distribute_to_invocations.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/distribute_to_invocations.mlir
index b8f84a2..f6e10cc 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/distribute_to_invocations.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/distribute_to_invocations.mlir
@@ -14,7 +14,7 @@
return
}
-// CHECK-LABEL: func @distribute_to_x
+// CHECK-LABEL: func.func @distribute_to_x
// CHECK-SAME: %[[LB:.+]]: index, %[[UB:.+]]: index, %[[STEP:.+]]: index
// CHECK: %[[ID:.+]] = gpu.thread_id x
// CHECK: %[[DIM:.+]] = gpu.block_dim x
@@ -39,7 +39,7 @@
return
}
-// CHECK-LABEL: func @distribute_to_y
+// CHECK-LABEL: func.func @distribute_to_y
// CHECK-SAME: %[[LB:.+]]: index, %[[UB:.+]]: index, %[[STEP:.+]]: index
// CHECK: %[[ID:.+]] = gpu.thread_id y
// CHECK: %[[DIM:.+]] = gpu.block_dim y
@@ -64,7 +64,7 @@
return
}
-// CHECK-LABEL: func @distribute_to_z
+// CHECK-LABEL: func.func @distribute_to_z
// CHECK-SAME: %[[LB:.+]]: index, %[[UB:.+]]: index, %[[STEP:.+]]: index
// CHECK: %[[ID:.+]] = gpu.thread_id z
// CHECK: %[[DIM:.+]] = gpu.block_dim z
@@ -89,6 +89,6 @@
return
}
-// CHECK-LABEL: func @no_distribute_without_attr
+// CHECK-LABEL: func.func @no_distribute_without_attr
// CHECK-SAME: %[[LB:.+]]: index, %[[UB:.+]]: index, %[[STEP:.+]]: index
// CHECK: scf.for %{{.+}} = %[[LB]] to %[[UB]] step %[[STEP]] {
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir
index cc4eb58..e0f1301 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir
@@ -64,7 +64,7 @@
}
}
-// CHECK-LABEL: func @matmul
+// CHECK-LABEL: func.func @matmul
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[TIDX:.+]] = gpu.thread_id x
@@ -124,7 +124,7 @@
}
}
-// CHECK-LABEL: func @conv_1d
+// CHECK-LABEL: func.func @conv_1d
// CHECK-DAG: %[[RET:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
@@ -241,7 +241,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 * 4)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 * 32)>
-// CHECK: func @conv_2d
+// CHECK: func.func @conv_2d
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -318,7 +318,7 @@
}
}
-// CHECK-LABEL: func @conv_3d
+// CHECK-LABEL: func.func @conv_3d
// CHECK-DAG: %[[TIDX:.+]] = gpu.thread_id x
// CHECK-DAG: %[[TIDY:.+]] = gpu.thread_id y
// CHECK-DAG: %[[TIDZ:.+]] = gpu.thread_id z
@@ -387,7 +387,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 * 4)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<()[s0] -> (s0 * 32)>
-// CHECK: func @pooling_nhwc_max
+// CHECK: func.func @pooling_nhwc_max
// CHECK-DAG: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK-DAG: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK-DAG: %[[RET0:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
@@ -455,7 +455,7 @@
}
}
-// CHECK-LABEL: func @matvec()
+// CHECK-LABEL: func.func @matvec()
// CHECK: %[[A:.+]] = hal.interface.binding.subspan {{.+}} : memref<250x1024xf32>
// CHECK: %[[B:.+]] = hal.interface.binding.subspan {{.+}} : memref<1024xf32>
// CHECK: %[[C:.+]] = hal.interface.binding.subspan {{.+}} : memref<250xf32>
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir
index 5299bde..37e561a 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir
@@ -49,7 +49,7 @@
}
}
-// CHECK-LABEL: func @static_scatter_update_slice()
+// CHECK-LABEL: func.func @static_scatter_update_slice()
// CHECK: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: %[[ARG2:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer)
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir
index 0f614c7..6bf75f7 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir
@@ -52,7 +52,7 @@
}
}
-// CHECK-LABEL: func @static_3d_sort()
+// CHECK-LABEL: func.func @static_3d_sort()
// CHECK: %[[ARG0:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
// CHECK: %[[ARG1:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer)
// CHECK: scf.for
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_promote_matmul.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_promote_matmul.mlir
index c2a53ae..90bcab2 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_promote_matmul.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_promote_matmul.mlir
@@ -68,7 +68,7 @@
}
}
-// CHECK-LABEL: func @matmul_256x1024x128()
+// CHECK-LABEL: func.func @matmul_256x1024x128()
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C32:.+]] = arith.constant 32 : index
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir
index 7fe56ea..6e14756 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir
@@ -63,7 +63,7 @@
}
}
-// CHECK-LABEL: func @fused_fill_batch_matmul
+// CHECK-LABEL: func.func @fused_fill_batch_matmul
// CHECK-NOT: vector.transfer
// CHECK: %{{.+}}:8 = scf.for
// CHECK-COUNT-12: vector.transfer_read
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir
index 5da56de..74161b4 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir
@@ -69,7 +69,7 @@
}
}
-// CHECK-LABEL: func @conv_static_shape_f32()
+// CHECK-LABEL: func.func @conv_static_shape_f32()
// No vector transfer write ops generated for the linalg.fill op: it's cancelled with read ops.
// CHECK-NOT: vector.transfer
@@ -160,7 +160,7 @@
}
}
-// CHECK-LABEL: func @depthwise_conv_static_shape_f32()
+// CHECK-LABEL: func.func @depthwise_conv_static_shape_f32()
// No vector transfer write ops generated for the linalg.fill op: it's cancelled with read ops.
// CHECK-NOT: vector.transfer
@@ -274,7 +274,7 @@
}
}
-// CHECK-LABEL: func @low_padded_conv()
+// CHECK-LABEL: func.func @low_padded_conv()
// Loop nest for workgroup tiling and distribution
// CHECK-COUNT-3: scf.for
@@ -403,7 +403,7 @@
}
}
-// CHECK-LABEL: func @low_high_padded_depthwise_conv()
+// CHECK-LABEL: func.func @low_high_padded_depthwise_conv()
// Loop nest for workgroup tiling and distribution
// CHECK-COUNT-3: scf.for
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir
index 18c6a87..65f7460 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir
@@ -53,7 +53,7 @@
}
}
-// CHECK-LABEL: func @matmul_static_shape_f16
+// CHECK-LABEL: func.func @matmul_static_shape_f16
// CHECK-NOT: vector.transfer
// CHECK: %{{.+}}:8 = scf.for
// CHECK-COUNT-12: vector.transfer_read
@@ -117,7 +117,7 @@
}
}
-// CHECK-LABEL: func @matmul_static_shape_f32
+// CHECK-LABEL: func.func @matmul_static_shape_f32
// CHECK-NOT: vector.transfer
// CHECK: %{{.+}}:8 = scf.for
// CHECK-COUNT-12: vector.transfer_read
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir
index 93b73bf..9f03534 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir
@@ -91,7 +91,7 @@
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d2, d1)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-// CHECK: func @matmul_256x1024x128_div_sub
+// CHECK: func.func @matmul_256x1024x128_div_sub
// CHECK-DAG: %[[INIT:.+]] = arith.constant dense<0.000000e+00> : vector<16x16xf16>
// CHECK-DAG: %[[PAD:.+]] = arith.constant 0.000000e+00 : f16
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir
index 8c7b10e..b991a93 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir
@@ -12,7 +12,7 @@
[SPV_KHR_storage_buffer_storage_class, SPV_NV_cooperative_matrix, SPV_KHR_8bit_storage]>,
{max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>}> {
builtin.module {
- // CHECK-LABEL: func @matmul_contract
+ // CHECK-LABEL: func.func @matmul_contract
// CHECK-SAME: %[[ARG0:.+]]: memref<8x32xi8>, %[[ARG1:.+]]: memref<32x8xi8>, %[[ARG2:.+]]: memref<8x8xi32>
func.func @matmul_contract(%arg0: memref<8x32xi8>, %arg1: memref<32x8xi8>, %arg2: memref<8x8xi32>) {
%c0 = arith.constant 0 : index
@@ -57,7 +57,7 @@
[SPV_KHR_storage_buffer_storage_class, SPV_NV_cooperative_matrix, SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>,
{max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>}> {
builtin.module {
- // CHECK-LABEL: func @matmul_contract_licm
+ // CHECK-LABEL: func.func @matmul_contract_licm
func.func @matmul_contract_licm(%arg0: memref<4096x4096xi8>, %arg1: memref<4096x4096xi8>, %arg2: memref<4096x4096xi32>) {
%c32 = arith.constant 32 : index
%c4096 = arith.constant 4096 : index
@@ -103,7 +103,7 @@
[SPV_KHR_storage_buffer_storage_class, SPV_NV_cooperative_matrix, SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>,
{max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>}> {
builtin.module {
- // CHECK-LABEL: func @matmul_contract_vector_memref
+ // CHECK-LABEL: func.func @matmul_contract_vector_memref
func.func @matmul_contract_vector_memref(%arg0: memref<4096x256xvector<4xi32>>, %arg1: memref<4096x256xvector<4xi32>>, %arg2: memref<4096x1024xvector<4xi32>>) {
%c32 = arith.constant 32 : index
%c4096 = arith.constant 4096 : index
@@ -138,7 +138,7 @@
[SPV_KHR_storage_buffer_storage_class, SPV_NV_cooperative_matrix, SPV_KHR_8bit_storage]>,
{max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>}> {
builtin.module {
- // CHECK-LABEL: func @const_elementwise_ops
+ // CHECK-LABEL: func.func @const_elementwise_ops
func.func @const_elementwise_ops(%add_val: vector<16x16xf16>, %sub_val: vector<16x16xf16>, %div_val: vector<16x16xf16>) -> vector<16x16xf16> {
// CHECK: %[[SPLAT:.+]] = spv.Constant 8.000000e+00 : f16
// CHECK: %[[CST:.+]] = spv.CompositeConstruct %[[SPLAT]] : !spv.coopmatrix<16x16xf16, Subgroup>
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_elementwise_ops.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_elementwise_ops.mlir
index 15fcc33..7bc00d2 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_elementwise_ops.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_elementwise_ops.mlir
@@ -18,7 +18,7 @@
return %0 : tensor<2x8xf32>
}
-// CHECK-LABEL: func @add
+// CHECK-LABEL: func.func @add
// CHECK-COUNT-8: vector.transfer_read %{{.+}} : tensor<2x8xf32>, vector<4xf32>
// CHECK-COUNT-4: arith.addf %{{.*}}, %{{.*}} : vector<4xf32>
// CHECK-COUNT-4: arith.mulf %{{.*}}, %{{.*}} : vector<4xf32>
@@ -43,7 +43,7 @@
return %0 : tensor<2x4xf32>
}
-// CHECK-LABEL: func @transpose_add
+// CHECK-LABEL: func.func @transpose_add
// CHECK-SAME: (%[[LHS:.+]]: tensor<4x2xf32>, %[[RHS:.+]]: tensor<2xf32>)
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_load_store.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_load_store.mlir
index ff8b702..b9c00f3 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_load_store.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_load_store.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --split-input-file --iree-spirv-vectorize-load-store --canonicalize -cse --mlir-print-local-scope %s | FileCheck %s
-// CHECK-LABEL: func @alloc_copy
+// CHECK-LABEL: func.func @alloc_copy
// CHECK-SAME: (%[[ARG0:.+]]: memref<4096x1024xvector<4xf32>>, %[[X:.+]]: index, %[[Y:.+]]: index)
// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<128x8xvector<4xf32>, 3>
// CHECK: %[[IDX:.+]] = affine.apply affine_map<()[s0] -> (s0 floordiv 4)>()[%[[Y]]]
@@ -24,7 +24,7 @@
// Test that the memref is not vectorized if used by scalar load or store.
-// CHECK-LABEL: func @alloc_copy
+// CHECK-LABEL: func.func @alloc_copy
// CHECK-SAME: %[[ARG0:.+]]: memref<4096x4096xf32>
func.func @alloc_copy(%arg0: memref<4096x4096xf32>, %x: index, %y: index) {
%cst = arith.constant 0.000000e+00 : f32
@@ -37,7 +37,7 @@
// -----
-// CHECK-LABEL: func @resource_copy
+// CHECK-LABEL: func.func @resource_copy
// CHECK: %[[A:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<4096x1024xvector<4xf32>>
// CHECK: %[[B:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<4096x1024xvector<4xf32>>
// CHECK: %[[V:.+]] = memref.load %[[A]][%{{.*}}, %{{.*}}] : memref<4096x1024xvector<4xf32>>
@@ -58,7 +58,7 @@
// -----
-// CHECK-LABEL: func @resource_copy_f16
+// CHECK-LABEL: func.func @resource_copy_f16
// CHECK: %[[A:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<4096x1024xvector<4xf16>>
// CHECK: %[[B:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<4096x1024xvector<4xf16>>
// CHECK: %[[V:.+]] = memref.load %[[A]][%{{.*}}, %{{.*}}] : memref<4096x1024xvector<4xf16>>
@@ -79,7 +79,7 @@
// -----
-// CHECK-LABEL: func @resource_copy_8xf16
+// CHECK-LABEL: func.func @resource_copy_8xf16
// CHECK: %[[A:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<4096x512xvector<4xf32>>
// CHECK: %[[B:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<4096x512xvector<4xf32>>
// CHECK: %[[V:.+]] = memref.load %[[A]][%{{.*}}, %{{.*}}] : memref<4096x512xvector<4xf32>>
@@ -100,7 +100,7 @@
// -----
-// CHECK-LABEL: func @resource_copy_dynamic_shape()
+// CHECK-LABEL: func.func @resource_copy_dynamic_shape()
func.func @resource_copy_dynamic_shape() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
@@ -128,7 +128,7 @@
// -----
-// CHECK-LABEL: func @resource_copy_dynamic_last_dim()
+// CHECK-LABEL: func.func @resource_copy_dynamic_last_dim()
func.func @resource_copy_dynamic_last_dim() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
@@ -144,7 +144,7 @@
// -----
-// CHECK-LABEL: func @do_not_vectorize_odd_vector_size
+// CHECK-LABEL: func.func @do_not_vectorize_odd_vector_size
func.func @do_not_vectorize_odd_vector_size() {
%cst = arith.constant 0.0 : f32
%c0 = arith.constant 0 : index
@@ -176,7 +176,7 @@
// -----
-// CHECK-LABEL: func @scalarize_vector_transfer_op
+// CHECK-LABEL: func.func @scalarize_vector_transfer_op
func.func @scalarize_vector_transfer_op(%arg: vector<3xf32>) -> (vector<3xf32>) {
%c0 = arith.constant 0: index
%c3 = arith.constant 3: index
@@ -209,7 +209,7 @@
// -----
-// CHECK-LABEL: func @scalarize_non_minor_identity_transfer_write
+// CHECK-LABEL: func.func @scalarize_non_minor_identity_transfer_write
// CHECK-SAME: (%[[VALUE:.+]]: vector<4xf32>, %[[I1:.+]]: index, %[[I2:.+]]: index)
func.func @scalarize_non_minor_identity_transfer_write(%value: vector<4xf32>, %i1: index, %i2: index) {
%c0 = arith.constant 0: index
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_matmul.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_matmul.mlir
index a5bafea..4e6571f 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_matmul.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_matmul.mlir
@@ -43,7 +43,7 @@
return
}
-// CHECK-LABEL: func @matmul_2x128x4()
+// CHECK-LABEL: func.func @matmul_2x128x4()
// CHECK-DAG: %[[ZERO:.+]] = arith.constant dense<0.000000e+00> : vector<4xf32>
// CHECK-DAG: %[[PAD:.+]] = arith.constant 0.000000e+00 : f32
@@ -86,7 +86,7 @@
return %0 : tensor<8x8xf32>
}
-// CHECK-LABEL: func @matmul_8x8x2
+// CHECK-LABEL: func.func @matmul_8x8x2
// CHECK-COUNT-8: vector.transfer_read {{.*}} : tensor<8x2xf32>, vector<2xf32>
// CHECK-COUNT-4: vector.transfer_read {{.*}} : tensor<2x8xf32>, vector<4xf32>
@@ -103,7 +103,7 @@
return %0 : tensor<8x8xf32>
}
-// CHECK-LABEL: func @matmul_8x8x1
+// CHECK-LABEL: func.func @matmul_8x8x1
// CHECK-COUNT-8: vector.transfer_read {{.*}} : tensor<8x1xf32>, vector<1xf32>
// CHECK-COUNT-2: vector.transfer_read {{.*}} : tensor<1x8xf32>, vector<4xf32>
@@ -133,7 +133,7 @@
return %bcast_add: tensor<1x8xf32>
}
-// CHECK-LABEL: func @matmul_broadcast_add
+// CHECK-LABEL: func.func @matmul_broadcast_add
// CHECK-SAME: (%[[INIT:[a-z0-9]+]]: tensor<1x8xf32>
// CHECK-SAME: %[[BIAS:[a-z0-9]+]]: tensor<1xf32>)
@@ -182,7 +182,7 @@
return %0: tensor<2x8xf16>
}
-// CHECK-LABEL: func @matmul_2x8x128_fp16
+// CHECK-LABEL: func.func @matmul_2x8x128_fp16
// CHECK-SAME: (%{{.+}}: tensor<2x128xf16>, %{{.+}}: tensor<128x8xf16>, %[[X:.+]]: tensor<2x8xf16>, %[[Y:.+]]: tensor<2x8xf16>)
// CHECK: %[[ZERO:.+]] = arith.constant dense<0.000000e+00> : vector<8xf16>
// CHECK: %[[FOR:.+]]:2 = scf.for %arg4 = %{{.+}} to %{{.+}} step %{{.+}} iter_args(%arg5 = %[[ZERO]], %arg6 = %[[ZERO]])
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_tensor_pad.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_tensor_pad.mlir
index 77bd949..72f3d4e 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_tensor_pad.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/vectorize_tensor_pad.mlir
@@ -13,7 +13,7 @@
return %pad: tensor<1x2x2x3xf32>
}
-// CHECK-LABEL: func @pad_tensor
+// CHECK-LABEL: func.func @pad_tensor
// CHECK-SAME: (%[[SOURCE:.+]]: tensor<1x?x?x3xf32>, %[[LOW1:.+]]: index, %[[LOW2:.+]]: index, %{{.+}}: index, %{{.+}}: index)
// CHECK-DAG: %[[I0:.+]] = arith.constant 0 : index
diff --git a/compiler/src/iree/compiler/Codegen/Sandbox/test/fusion_expert.mlir b/compiler/src/iree/compiler/Codegen/Sandbox/test/fusion_expert.mlir
index d2e7134..1e7f037 100644
--- a/compiler/src/iree/compiler/Codegen/Sandbox/test/fusion_expert.mlir
+++ b/compiler/src/iree/compiler/Codegen/Sandbox/test/fusion_expert.mlir
@@ -22,7 +22,7 @@
} -> tensor<?x?xf32>
return %2 : tensor<?x?xf32>
}
-// CHECK: func @matmul_bias_add(
+// CHECK: func.func @matmul_bias_add(
// CHECK: scf.for
// CHECK-SAME: {
// CHECK: scf.for
@@ -55,7 +55,7 @@
} -> tensor<20x120xf32>
return %2 : tensor<20x120xf32>
}
-// CHECK: func @matmul_bias_add_static(
+// CHECK: func.func @matmul_bias_add_static(
// CHECK-SAME: %[[ARG0:.+]]: tensor<20x60xf32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<60x120xf32>
// CHECK-SAME: %[[ARG2:.+]]: tensor<120xf32>
diff --git a/compiler/src/iree/compiler/Codegen/Sandbox/test/outline_one_parent_loop.mlir b/compiler/src/iree/compiler/Codegen/Sandbox/test/outline_one_parent_loop.mlir
index 739e6f9..d639496 100644
--- a/compiler/src/iree/compiler/Codegen/Sandbox/test/outline_one_parent_loop.mlir
+++ b/compiler/src/iree/compiler/Codegen/Sandbox/test/outline_one_parent_loop.mlir
@@ -1,8 +1,8 @@
// RUN: iree-opt %s --outline-one-parent-loop="anchor-func=test anchor-op=scf.yield parent-loop-num=1 result-func-name=foo" | FileCheck %s
// RUN: iree-opt %s --outline-one-parent-loop="anchor-func=matmul anchor-op=vector.contract parent-loop-num=2 result-func-name=bar" | FileCheck %s --check-prefix=MATMUL
-// CHECK-LABEL: func @foo
-// CHECK-LABEL: func @test
+// CHECK-LABEL: func.func @foo
+// CHECK-LABEL: func.func @test
func.func @test(%ub: index, %it: index) -> index {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -12,8 +12,8 @@
return %res: index
}
-// MATMUL-LABEL: func @bar
-// MATMUL-LABEL: func @matmul
+// MATMUL-LABEL: func.func @bar
+// MATMUL-LABEL: func.func @matmul
func.func @matmul(%arg0: tensor<24x48xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<48x32xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<24x32xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<24x32xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
@@ -100,9 +100,9 @@
%c0 = arith.constant 0 : index
%0 = memref.dim %arg3, %c0 : memref<?xi64>
%1 = scf.for %arg4 = %c0 to %0 step %c1 iter_args(%arg5 = %arg2) -> (tensor<24x32xf32>) {
- %2 = call @nano_time() : () -> i64
- %3 = call @matmul(%arg0, %arg1, %arg5) : (tensor<24x48xf32>, tensor<48x32xf32>, tensor<24x32xf32>) -> tensor<24x32xf32>
- %4 = call @nano_time() : () -> i64
+ %2 = func.call @nano_time() : () -> i64
+ %3 = func.call @matmul(%arg0, %arg1, %arg5) : (tensor<24x48xf32>, tensor<48x32xf32>, tensor<24x32xf32>) -> tensor<24x32xf32>
+ %4 = func.call @nano_time() : () -> i64
%5 = arith.subi %4, %2 : i64
memref.store %5, %arg3[%arg4] : memref<?xi64>
scf.yield %3 : tensor<24x32xf32>
diff --git a/compiler/src/iree/compiler/Codegen/Sandbox/test/single_tiling_expert.mlir b/compiler/src/iree/compiler/Codegen/Sandbox/test/single_tiling_expert.mlir
index c7fb914..73d88fa 100644
--- a/compiler/src/iree/compiler/Codegen/Sandbox/test/single_tiling_expert.mlir
+++ b/compiler/src/iree/compiler/Codegen/Sandbox/test/single_tiling_expert.mlir
@@ -6,7 +6,7 @@
outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
-// CHECK: func @matmul(
+// CHECK: func.func @matmul(
// CHECK: scf.for
// CHECK-SAME: {
// CHECK: scf.for
@@ -26,7 +26,7 @@
outs(%arg2 : tensor<20x80xf32>) -> tensor<20x80xf32>
return %0 : tensor<20x80xf32>
}
-// CHECK: func @matmul_static(
+// CHECK: func.func @matmul_static(
// CHECK: scf.for
// CHECK-SAME: {
// CHECK: scf.for
diff --git a/compiler/src/iree/compiler/Codegen/Sandbox/test/unroll_one_vector_op.mlir b/compiler/src/iree/compiler/Codegen/Sandbox/test/unroll_one_vector_op.mlir
index de2e3d3..be33d15 100644
--- a/compiler/src/iree/compiler/Codegen/Sandbox/test/unroll_one_vector_op.mlir
+++ b/compiler/src/iree/compiler/Codegen/Sandbox/test/unroll_one_vector_op.mlir
@@ -10,7 +10,7 @@
iterator_types = ["parallel", "parallel", "reduction"]
}
-// CHECK-LABEL: func @test
+// CHECK-LABEL: func.func @test
func.func @test(%a: vector<4x3xf32>, %b: vector<3x4xf32>, %c: vector<4x4xf32>) -> vector<4x4xf32> {
// CHECK: vector.contract {{.*}} : vector<2x3xf32>, vector<3x4xf32> into vector<2x4xf32>
// CHECK: vector.contract {{.*}} : vector<2x3xf32>, vector<3x4xf32> into vector<2x4xf32>
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/extract_slice.mlir b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/extract_slice.mlir
index c0195c9..b30d576 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/extract_slice.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/extract_slice.mlir
@@ -5,7 +5,7 @@
: tensor<5x24x48xf32> to tensor<4xf32>
return %0 : tensor<4xf32>
}
-// CHECK-LABEL: func @extract_slice1(
+// CHECK-LABEL: func.func @extract_slice1(
// CHECK-SAME: %[[ARG0:.+]]: tensor<5x24x48xf32>)
// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
// CHECK-DAG: %[[C3:.+]] = arith.constant 3 : index
@@ -22,7 +22,7 @@
: tensor<5x24x48xf32> to tensor<2x48xf32>
return %0 : tensor<2x48xf32>
}
-// CHECK-LABEL: func @extract_slice2
+// CHECK-LABEL: func.func @extract_slice2
// CHECK-SAME: %[[ARG0:.+]]: tensor<5x24x48xf32>)
// CHECK-DAG: %[[C3:.+]] = arith.constant 3 : index
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -40,7 +40,7 @@
: tensor<5x24x48xf32> to tensor<2x24xf32>
return %0 : tensor<2x24xf32>
}
-// CHECK-LABEL: func @extract_slice3
+// CHECK-LABEL: func.func @extract_slice3
// CHECK: tensor.extract_slice
// -----
@@ -50,7 +50,7 @@
: tensor<5x24x48xf32> to tensor<2x24xf32>
return %0 : tensor<2x24xf32>
}
-// CHECK-LABEL: func @extract_slice4
+// CHECK-LABEL: func.func @extract_slice4
// CHECK: tensor.extract_slice
// -----
@@ -60,7 +60,7 @@
: tensor<5x24x48xf32> to tensor<2x48xf32>
return %0 : tensor<2x48xf32>
}
-// CHECK-LABEL: func @extract_slice5
+// CHECK-LABEL: func.func @extract_slice5
// CHECK: tensor.extract_slice
// -----
@@ -70,7 +70,7 @@
: tensor<5x24x48xf32> to tensor<?x48xf32>
return %0 : tensor<?x48xf32>
}
-// CHECK-LABEL: func @extract_slice6
+// CHECK-LABEL: func.func @extract_slice6
// CHECK: tensor.extract_slice
// -----
@@ -80,7 +80,7 @@
: tensor<5x?x48xf32> to tensor<2x48xf32>
return %0 : tensor<2x48xf32>
}
-// CHECK-LABEL: func @extract_slice7(
+// CHECK-LABEL: func.func @extract_slice7(
// CHECK-SAME: %[[ARG0:.+]]: tensor<5x?x48xf32>
// CHECK-SAME: %[[ARG1:.+]]: index)
// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
@@ -99,7 +99,7 @@
%0 = tensor.extract_slice %arg0[4, 0] [1, 513] [1, 1] : tensor<?x513xi32> to tensor<513xi32>
return %0 : tensor<513xi32>
}
-// CHECK-LABEL: func @rank_reducing_extract_slice
+// CHECK-LABEL: func.func @rank_reducing_extract_slice
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -119,7 +119,7 @@
%0 = tensor.extract_slice %arg0[0, 1, 0, 0] [1, 49, 20, 1] [1, 1, 1, 1] : tensor<1x50x20x1xf32> to tensor<49x20xf32>
return %0 : tensor<49x20xf32>
}
-// CHECK-LABEL: func @rank_reducing_extract_slice_trailing_unit_dims
+// CHECK-LABEL: func.func @rank_reducing_extract_slice_trailing_unit_dims
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[C49:.+]] = arith.constant 49 : index
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/fill.mlir b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/fill.mlir
index 04116a0..18b49c8 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/fill.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/fill.mlir
@@ -9,7 +9,7 @@
: tensor<20x?x40xf32> into tensor<5x4x?x4x2x4x5xf32>
return %0, %1 : tensor<?x5x?xf32>, tensor<5x4x?x4x2x4x5xf32>
}
-// CHECK-LABEL: func @tensor_reshape
+// CHECK-LABEL: func.func @tensor_reshape
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x4x?x5x?x6xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<20x?x40xf32>
// CHECK-DAG: %[[R0:.+]] = flow.tensor.reshape %[[ARG0]]
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/from_elements.mlir b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/from_elements.mlir
index c154d4a..f74dd18 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/from_elements.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/from_elements.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --allow-unregistered-dialect --split-input-file --iree-flow-convert-to-flow %s | FileCheck %s
-// CHECK: func @tensor.from_elements__to__flow.tensor.splat(%[[arg0:.*]]: i8)
+// CHECK: func.func @tensor.from_elements__to__flow.tensor.splat(%[[arg0:.*]]: i8)
func.func @tensor.from_elements__to__flow.tensor.splat(%arg0: i8) -> (i8) {
// CHECK: %[[splat_res:.*]] = flow.tensor.splat %[[arg0]] : tensor<1xi8>
%0 = tensor.from_elements %arg0 : tensor<1xi8>
@@ -10,7 +10,7 @@
}
// -----
-// CHECK: func @tensor.from_elements__not_convertible(%[[arg0:.*]]: i8)
+// CHECK: func.func @tensor.from_elements__not_convertible(%[[arg0:.*]]: i8)
func.func @tensor.from_elements__not_convertible(%arg0: i8) -> (i8) {
// CHECK: %[[c0:.*]] = arith.constant 0
%c0 = arith.constant 0 : index
@@ -40,7 +40,7 @@
%0 = tensor.from_elements %arg0 : tensor<f32>
return %0 : tensor<f32>
}
-// CHECK: func @tensor.from_elements_0D
+// CHECK: func.func @tensor.from_elements_0D
// CHECK-SAME: %[[ARG0:.+]]: f32
// CHECK: %[[SPLAT:.+]] = flow.tensor.splat %[[ARG0]] : tensor<f32>
// CHECK: return %[[SPLAT]]
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/insert_slice.mlir b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/insert_slice.mlir
index be002c7..1d40abb 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/insert_slice.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/insert_slice.mlir
@@ -8,7 +8,7 @@
tensor<1x4x48xf32> into tensor<?x24x48xf32>
return %0 : tensor<?x24x48xf32>
}
-// CHECK-LABEL: func @insert_slice_convert
+// CHECK-LABEL: func.func @insert_slice_convert
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]
// CHECK-DAG: %[[C0:.+]] = arith.constant 0
@@ -28,7 +28,7 @@
tensor<4x48xf32> into tensor<?x24x48xf32>
return %0 : tensor<?x24x48xf32>
}
-// CHECK-LABEL: func @insert_slice_convert_rank_reducing
+// CHECK-LABEL: func.func @insert_slice_convert_rank_reducing
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]
// CHECK-DAG: %[[C0:.+]] = arith.constant 0
@@ -46,7 +46,7 @@
%0 = tensor.insert_slice %arg0 into %arg1[0, 1, 0, 0] [1, 49, 20, 1] [1, 1, 1, 1] : tensor<49x20xf32> into tensor<1x50x20x1xf32>
return %0 : tensor<1x50x20x1xf32>
}
-// CHECK-LABEL: func @rank_reducing_insert_slice_trailing_unit_dims
+// CHECK-LABEL: func.func @rank_reducing_insert_slice_trailing_unit_dims
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK: %[[RESHAPE:.+]] = flow.tensor.reshape %{{.+}} : tensor<49x20xf32> -> tensor<1x49x20x1xf32>
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/reshape.mlir b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/reshape.mlir
index cf353bc..c4992c0 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/reshape.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/test/reshape.mlir
@@ -15,7 +15,7 @@
}
// CHECK: #[[MAP:.+]] = affine_map<(d0)[s0, s1] -> (d0 + s0 + s1)>
-// CHECK: func @turn_fill_into_splat
+// CHECK: func.func @turn_fill_into_splat
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<f32>
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: index
diff --git a/compiler/src/iree/compiler/Dialect/Flow/IR/test/dispatch_workgroups_folding.mlir b/compiler/src/iree/compiler/Dialect/Flow/IR/test/dispatch_workgroups_folding.mlir
index 2d19fa8..1268d8e 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/IR/test/dispatch_workgroups_folding.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/IR/test/dispatch_workgroups_folding.mlir
@@ -75,7 +75,7 @@
// -----
-// CHECK-LABEL: func @dontInlineReadWrite
+// CHECK-LABEL: func.func @dontInlineReadWrite
// CHECK-SAME: (%[[ARG0:.+]]: tensor<1x4xf32>)
func.func @dontInlineReadWrite(%arg0: tensor<1x4xf32>) -> tensor<4x8xf32> {
// CHECK: %[[CST:.+]] = arith.constant dense<0.000000e+00> : tensor<4x8xf32>
@@ -99,7 +99,7 @@
// -----
-// CHECK-LABEL: func @remove_unused_result
+// CHECK-LABEL: func.func @remove_unused_result
func.func @remove_unused_result(%arg0 : tensor<9xi32>, %arg1 : tensor<9xi32>) -> (tensor<i32>) {
%c1 = arith.constant 1 : index
// CHECK: flow.dispatch.workgroups[%c1, %c1, %c1]() : () -> tensor<i32> =
@@ -124,7 +124,7 @@
// -----
-// CHECK-LABEL: func @remove_unused_dynamic_result
+// CHECK-LABEL: func.func @remove_unused_dynamic_result
func.func @remove_unused_dynamic_result(%dim: index) -> (tensor<i32>) {
%c1 = arith.constant 1 : index
// CHECK: flow.dispatch.workgroups[%c1, %c1, %c1]() : () -> tensor<i32> =
@@ -153,7 +153,7 @@
// -----
-// CHECK-LABEL: func @remove_unused_read_write_result
+// CHECK-LABEL: func.func @remove_unused_read_write_result
func.func @remove_unused_read_write_result(%arg0 : tensor<9xi32>, %arg1 : tensor<9xi32>) -> (tensor<i32>) {
%c1 = arith.constant 1 : index
// CHECK: flow.dispatch.workgroups[%c1, %c1, %c1]() : () -> tensor<i32> =
@@ -178,7 +178,7 @@
// -----
-// CHECK-LABEL: func @keep_used_read_write_result
+// CHECK-LABEL: func.func @keep_used_read_write_result
func.func @keep_used_read_write_result(%arg0 : tensor<9xi32>, %arg1 : tensor<9xi32>) -> (tensor<i32>) {
%c1 = arith.constant 1 : index
// CHECK: flow.dispatch.workgroups[%c1, %c1, %c1]() : () -> (tensor<i32>, tensor<i32>) =
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/capture_dispatch_dynamic_dims.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/capture_dispatch_dynamic_dims.mlir
index 14c1f2f..3815c71 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/capture_dispatch_dynamic_dims.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/capture_dispatch_dynamic_dims.mlir
@@ -25,7 +25,7 @@
// CHECK-LABEL: @capture2DimsForOneTensor
// CHECK-SAME: (%[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG0_DIM0:.+]]: index, %[[ARG0_DIM1:.+]]: index, %[[RET0_DIM0:.+]]: index, %[[RET0_DIM1:.+]]: index)
-func @capture2DimsForOneTensor(%arg0: tensor<?x?xf32>, %arg0_dim0: index, %arg0_dim1: index, %ret0_dim0: index, %ret0_dim1: index) {
+func.func @capture2DimsForOneTensor(%arg0: tensor<?x?xf32>, %arg0_dim0: index, %arg0_dim1: index, %ret0_dim0: index, %ret0_dim1: index) {
%c1 = arith.constant 1 : index
// CHECK: flow.dispatch.workgroups[%c1, %c1, %c1](%[[ARG0]], %[[ARG0_DIM0]], %[[ARG0_DIM1]], %[[RET0_DIM0]], %[[RET0_DIM1]])
%0 = flow.dispatch.workgroups[%c1, %c1, %c1](%arg0) : (tensor<?x?xf32>{%arg0_dim0, %arg0_dim1}) -> tensor<?x?xf32>{%ret0_dim0, %ret0_dim1} =
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/deduplicate_executables.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/deduplicate_executables.mlir
index 26719a3..eecf5b5 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/deduplicate_executables.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/deduplicate_executables.mlir
@@ -10,7 +10,7 @@
}
}
}
-// CHECK-LABEL: func @single_executable
+// CHECK-LABEL: func.func @single_executable
func.func @single_executable(%arg0: tensor<4xf32>) -> tensor<4xf32> {
%c4 = arith.constant 4 : index
// CHECK: %0 = flow.dispatch @single_executable_ex_0::@single_executable_entry_0[%c4](%arg0) : (tensor<4xf32>) -> tensor<4xf32>
@@ -50,7 +50,7 @@
}
}
}
-// CHECK-LABEL: func @duplicate_executables
+// CHECK-LABEL: func.func @duplicate_executables
func.func @duplicate_executables(%arg0: tensor<4xf32>) -> tensor<4xf32> {
%c4 = arith.constant 4 : index
// CHECK: %0 = flow.dispatch @duplicate_executables_ex_0::@duplicate_executables_entry_0[%c4](%arg0) : (tensor<4xf32>) -> tensor<4xf32>
@@ -84,7 +84,7 @@
}
}
}
-// CHECK-LABEL: func @same_ops_diff_operands
+// CHECK-LABEL: func.func @same_ops_diff_operands
func.func @same_ops_diff_operands(%arg0: tensor<2xi32>, %arg1: tensor<2xi32>) -> tensor<2xi32> {
%c4 = arith.constant 4 : index
// CHECK: %0 = flow.dispatch @same_ops_diff_operands_ex_0::@entry_0[%c4](%arg0, %arg1) : (tensor<2xi32>, tensor<2xi32>) -> tensor<2xi32>
@@ -126,7 +126,7 @@
}
}
}
-// CHECK-LABEL: func @multiple_entry_points
+// CHECK-LABEL: func.func @multiple_entry_points
func.func @multiple_entry_points(%arg0: tensor<4xf32>) -> tensor<4xf32> {
// CHECK: %[[C4:.*]] = arith.constant 4
%c4 = arith.constant 4 : index
@@ -177,7 +177,7 @@
}
}
}
-// CHECK-LABEL: func @different_types
+// CHECK-LABEL: func.func @different_types
func.func @different_types(%arg0: tensor<4xf32>) -> tensor<4xi1> {
%c4 = arith.constant 4 : index
// CHECK: %0 = flow.dispatch @different_types_float_ex::@different_types_float_entry[%c4](%arg0) : (tensor<4xf32>) -> tensor<4xi1>
@@ -234,7 +234,7 @@
}
}
}
-// CHECK-LABEL: func @nested_ops
+// CHECK-LABEL: func.func @nested_ops
func.func @nested_ops(%arg0: tensor<1x4xi32>) -> tensor<1xi32> {
%c4 = arith.constant 4 : index
// CHECK: %0 = flow.dispatch @nested_ops_ex_0::@nested_ops_entry_0[%c4](%arg0) : (tensor<1x4xi32>) -> tensor<1xi32>
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir
index e3596d2..dc2db38 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir
@@ -6,7 +6,7 @@
outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}
-// CHECK: func @tile_matmul_alone
+// CHECK: func.func @tile_matmul_alone
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
@@ -60,7 +60,7 @@
} -> tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}
-// CHECK: func @generic_op_alone(
+// CHECK: func.func @generic_op_alone(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?xf32>
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -95,7 +95,7 @@
outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32>
return %2 : tensor<?x?xf32>
}
-// CHECK: func @fuse_matmul_with_fill
+// CHECK: func.func @fuse_matmul_with_fill
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -152,7 +152,7 @@
outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32>
return %4 : tensor<?x?xf32>
}
-// CHECK: func @keep_separate_dispatches_for_producer
+// CHECK: func.func @keep_separate_dispatches_for_producer
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -210,7 +210,7 @@
}
// For ops of rank greater than 3 we serialized the higher dimension. When flow
// supports larger ranks this can be changed.
-// CHECK: func @tile_4d_generic_op_alone
+// CHECK: func.func @tile_4d_generic_op_alone
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x?x?xf32>
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -248,7 +248,7 @@
return %1, %2 : tensor<?x?xf32>, tensor<?x?xf32>
}
-// CHECK: func @always_fuse_cast(
+// CHECK: func.func @always_fuse_cast(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[RHS1:[a-zA-Z0-9_]+]]: tensor<4x?xf32>
// CHECK-SAME: %[[RHS2:[a-zA-Z0-9_]+]]: tensor<4x?xf32>
@@ -288,7 +288,7 @@
return %7 : tensor<?x?xf32>
}
-// CHECK: func @dont_fuse_tensor_update_with_fill
+// CHECK: func.func @dont_fuse_tensor_update_with_fill
// CHECK: %[[SPLAT:.+]] = flow.tensor.splat
// CHECK: flow.tensor.update %{{.+}}, %[[SPLAT]]
@@ -298,7 +298,7 @@
%cst = arith.constant dense<[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]> : tensor<2x2x3xi32>
return %cst : tensor<2x2x3xi32>
}
-// CHECK-LABEL: func @pass_constant_through()
+// CHECK-LABEL: func.func @pass_constant_through()
// CHECK: %[[CST:.+]] = arith.constant dense<{{.+}}> : tensor<2x2x3xi32>
// CHECK: return %[[CST]]
@@ -320,7 +320,7 @@
outs(%CC: tensor<?x?xf32>) -> tensor<?x?xf32>
return %D: tensor<?x?xf32>
}
-// CHECK-LABEL: func @fuse_matmul_with_generic_op
+// CHECK-LABEL: func.func @fuse_matmul_with_generic_op
// linalg.generic is fused inside the dispatch region and becomes dead.
// CHECK-NOT: generic
// CHECK: flow.dispatch.workgroups
@@ -348,7 +348,7 @@
return %D, %CC: tensor<?x?xf32>, tensor<?x?xf32>
}
-// CHECK: func @keep_original_producer_uses
+// CHECK: func.func @keep_original_producer_uses
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
@@ -383,7 +383,7 @@
return %2 : tensor<1x112x112x32xf32>
}
-// CHECK-LABEL: func @conv2d
+// CHECK-LABEL: func.func @conv2d
// CHECK-DAG: %[[C32:.+]] = arith.constant 32
// CHECK-DAG: %[[C112:.+]] = arith.constant 112
// CHECK: %[[RESULT:.+]] = flow.dispatch.workgroups[%[[C32]], %[[C112]], %[[C112]]]
@@ -401,7 +401,7 @@
return %4 : tensor<1x56x56x96xf32>
}
-// CHECK-LABEL: func @depthwise_conv2d
+// CHECK-LABEL: func.func @depthwise_conv2d
// CHECK-DAG: %[[C56:.+]] = arith.constant 56
// CHECK-DAG: %[[C96:.+]] = arith.constant 96
// CHECK: %[[RESULT:.+]] = flow.dispatch.workgroups[%[[C96]], %[[C56]], %[[C56]]]
@@ -417,7 +417,7 @@
%arg1[%arg2, %arg3] [%arg4, %arg5] [1, 1] : tensor<?x?xf32> into tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
-// CHECK: func @subtensor_insert
+// CHECK: func.func @subtensor_insert
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: index
@@ -475,7 +475,7 @@
return %reduce : tensor<f32>
}
-// CHECK-LABEL: func @fuse_non_tiled_reduction_fill
+// CHECK-LABEL: func.func @fuse_non_tiled_reduction_fill
// CHECK: %[[C1:.+]] = arith.constant 1 : index
// CHECK: flow.dispatch.workgroups[%[[C1]], %[[C1]], %[[C1]]]({{.+}}) : (tensor<1000xf32>, tensor<1000xf32>, tensor<f32>) -> tensor<f32> =
@@ -524,7 +524,7 @@
} -> tensor<1x?xf32>
return %9 : tensor<1x?xf32>
}
-// CHECK-LABEL: func @inline_dag_1
+// CHECK-LABEL: func.func @inline_dag_1
// CHECK-NOT: linalg.
// CHECK-NOT: tensor.extract_slice
// CHECK: flow.dispatch.workgroups
@@ -584,7 +584,7 @@
} -> tensor<1x?xf32>
return %9 : tensor<1x?xf32>
}
-// CHECK-LABEL: func @inline_dag_2
+// CHECK-LABEL: func.func @inline_dag_2
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<1x?xf32>
// CHECK: flow.dispatch.workgroups
@@ -634,7 +634,7 @@
} -> tensor<9xi1>
return %256 : tensor<9xi1>
}
-// CHECK: func @inline_dag_3
+// CHECK: func.func @inline_dag_3
// CHECK-SAME: %[[ARG0:.+]]: tensor<9xi32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<18xi32>
// CHECK-SAME: %[[ARG2:.+]]: tensor<i32>
@@ -683,7 +683,7 @@
} -> tensor<i16>
return %8 : tensor<i16>
}
-// CHECK-LABEL: func @inline_dag_4
+// CHECK-LABEL: func.func @inline_dag_4
// CHECK-SAME: %[[ARG0:.+]]: tensor<4xi32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<i32>
// CHECK: flow.dispatch.workgroups
@@ -741,7 +741,7 @@
} -> (tensor<?xi32>, tensor<?xi32>)
return %4#0, %4#1 : tensor<?xi32>, tensor<?xi32>
}
-// CHECK-LABEL: func @multi_result
+// CHECK-LABEL: func.func @multi_result
// CHECK: %[[RESULT_OUT:.+]]:2 = flow.dispatch.workgroups
// CHECK-NEXT: %[[ARG5:[a-zA-Z0-9_]+]]: !flow.dispatch.tensor<writeonly:?xi32>
// CHECK-SAME: %[[ARG6:[a-zA-Z0-9_]+]]: !flow.dispatch.tensor<writeonly:?xi32>
@@ -770,7 +770,7 @@
%12 = tensor.extract_slice %arg0[%5, %11] [1, %arg3] [1, 1] : tensor<?x?xi32> to tensor<1x?xi32>
return %12 : tensor<1x?xi32>
}
-// CHECK-LABEL: func @dynamic_slice(
+// CHECK-LABEL: func.func @dynamic_slice(
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xi32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<i32>
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor<i32>
@@ -813,7 +813,7 @@
%9 = hal.tensor.export %6 : tensor<?x?xf32>{%7, %8} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
-// CHECK-LABEL: func @dynamic_dot()
+// CHECK-LABEL: func.func @dynamic_dot()
// CHECK-NOT: linalg.fill
// CHECK-NOT: linalg.matmul
// CHECK: flow.dispatch.workgroups
@@ -839,7 +839,7 @@
} -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
-// CHECK: func @scatter(
+// CHECK: func.func @scatter(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x1xi32>
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
@@ -877,7 +877,7 @@
} -> tensor<?x?x?xi32>, tensor<?x?x?xf32>
return %0, %1 : tensor<?x?x?xi32>, tensor<?x?x?xf32>
}
-// CHECK: func @sort_3d(
+// CHECK: func.func @sort_3d(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x?xi32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -926,7 +926,7 @@
} -> tensor<8xi32>
return %0 : tensor<8xi32>
}
-// CHECK: func @scatter_static
+// CHECK: func.func @scatter_static
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<4xi32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<4x1xi32>
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor<8xi32>
@@ -951,7 +951,7 @@
return %4 : tensor<1x3x3x160xf32>
}
-// CHECK-LABEL: func @pooling_nwhc_sum_static
+// CHECK-LABEL: func.func @pooling_nwhc_sum_static
// CHECK-DAG: %[[C3:.+]] = arith.constant 3 : index
// CHECK-DAG: %[[C160:.+]] = arith.constant 160 : index
// CHECK: flow.dispatch.workgroups[%[[C160]], %[[C3]], %[[C3]]]
@@ -973,7 +973,7 @@
outs(%fill : tensor<?x?xf32>) -> tensor<?x?xf32>
return %matmul : tensor<?x?xf32>
}
-// CHECK-LABEL: func @named_op_outs_fusion
+// CHECK-LABEL: func.func @named_op_outs_fusion
// CHECK: flow.dispatch.workgroups
// CHECK: %[[FILL:.+]] = linalg.fill_rng_2d
// CHECK: linalg.matmul
@@ -1001,7 +1001,7 @@
%arg3[%9, %15] [1, %d0] [1, 1] : tensor<?xi32> into tensor<?x?xi32>
return %17 : tensor<?x?xi32>
}
-// CHECK-LABEL: func @dynamic_slice
+// CHECK-LABEL: func.func @dynamic_slice
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: i32
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: i32
// CHECK-SAME: %[[ARG2:.+]]: tensor<?xi32>
@@ -1025,7 +1025,7 @@
tensor<?x?xf32> to tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
-// CHECK: func @extract_slice
+// CHECK: func.func @extract_slice
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: index
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: index
@@ -1081,7 +1081,7 @@
} -> tensor<32xi32>
return %0 : tensor<32xi32>
}
-// CHECK: func @inline_cst(%[[ARG0:.+]]: tensor<4x32xi32>)
+// CHECK: func.func @inline_cst(%[[ARG0:.+]]: tensor<4x32xi32>)
// CHECK: flow.dispatch.workgroups
// CHECK-SAME: (%[[ARG0]])
// CHECK: %[[CST:.+]] = arith.constant dense<0> : tensor<32xi32>
@@ -1100,7 +1100,7 @@
} -> tensor<2xi32>
return %0 : tensor<2xi32>
}
-// CHECK: func @inline_cst2(%[[ARG0:.+]]: tensor<4x2xi32>)
+// CHECK: func.func @inline_cst2(%[[ARG0:.+]]: tensor<4x2xi32>)
// CHECK: flow.dispatch.workgroups
// CHECK-SAME: (%[[ARG0]])
// CHECK: %[[CST:.+]] = arith.constant dense<[21, 42]> : tensor<2xi32>
@@ -1114,7 +1114,7 @@
outs(%arg2 : tensor<?x1xf32>) -> tensor<?x1xf32>
return %0 : tensor<?x1xf32>
}
-// CHECK: func @gemm_unitN(
+// CHECK: func.func @gemm_unitN(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>,
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x1xf32>,
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: tensor<?x1xf32>)
@@ -1132,7 +1132,7 @@
outs(%arg2 : tensor<1x1xf32>) -> tensor<1x1xf32>
return %0 : tensor<1x1xf32>
}
-// CHECK: func @gemm_unitM_unitN(
+// CHECK: func.func @gemm_unitM_unitN(
// CHECK: %[[C1:.+]] = arith.constant 1 : index
// CHECK: flow.dispatch.workgroups[%[[C1]], %[[C1]], %[[C1]]]
// CHECK: linalg.matmul
@@ -1146,7 +1146,7 @@
outs(%arg2 : tensor<1x?xf32>) -> tensor<1x?xf32>
return %0 : tensor<1x?xf32>
}
-// CHECK: func @gemm_unitM(
+// CHECK: func.func @gemm_unitM(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<1x?xf32>,
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xf32>,
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: tensor<1x?xf32>)
@@ -1171,7 +1171,7 @@
} -> tensor<1x?x1x1x?x?x1x?xf32>
return %0 : tensor<1x?x1x1x?x?x1x?xf32>
}
-// CHECK: func @unit_dim_generic(
+// CHECK: func.func @unit_dim_generic(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<1x?x1x1x?x?x1x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<1x?x1x1x?x?x1x?xf32>)
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -1208,7 +1208,7 @@
} -> tensor<?x56x56x64xi8>
return %4 : tensor<?x56x56x64xi8>
}
-// CHECK: func @no_fuse_quantized
+// CHECK: func.func @no_fuse_quantized
// CHECK: flow.dispatch.workgroups
// CHECK: linalg.depthwise_conv_2d_nhwc_hwc_q
// CHECK-NOT: linalg.generic
@@ -1234,7 +1234,7 @@
: tensor<2x2xf32> into tensor<3x3xf32>
return %1 : tensor<3x3xf32>
}
-// CHECK: func @dont_fuse_tensor_insert_dest_producer
+// CHECK: func.func @dont_fuse_tensor_insert_dest_producer
// CHECK-SAME: %[[ARG0:.+]]: tensor<2x2xf32>
// CHECK: %[[CST:.+]] = arith.constant {{.+}} : tensor<3x3xf32>
// CHECK: %[[DISPATCH1:.+]] = flow.dispatch.workgroups
@@ -1252,7 +1252,7 @@
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}
-// CHECK: func @fill_op_alone(
+// CHECK: func.func @fill_op_alone(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: index
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: index
// CHECK: %[[SPLAT:.+]] = flow.tensor.splat %[[CST]] : tensor<?x?xf32>{%arg0, %arg1}
@@ -1283,7 +1283,7 @@
outs(%fill2 : tensor<?x?xf32>) -> tensor<?x?xf32>
return %1, %2 : tensor<?x?xf32>, tensor<?x?xf32>
}
-// CHECK: func @dont_fuse_reshape(
+// CHECK: func.func @dont_fuse_reshape(
// CHECK-SAME: %[[LHS:.+]]: tensor<?xf32>
// CHECK-DAG: %[[RESHAPE:.+]] = flow.tensor.reshape %[[LHS]]
// CHECK: %[[DISPATCH1:.+]] = flow.dispatch.workgroups
@@ -1310,7 +1310,7 @@
: tensor<3x40xf32> into tensor<5x40xf32>
return %1 : tensor<5x40xf32>
}
-// CHECK: func @concat_pattern
+// CHECK: func.func @concat_pattern
// CHECK-SAME: %[[SRC1:.+]]: tensor<2x40xf32>
// CHECK-SAME: %[[SRC2:.+]]: tensor<3x40xf32>
// CHECK-SAME: %[[DEST:.+]]: tensor<5x40xf32>
@@ -1328,7 +1328,7 @@
%1 = tensor.insert_slice %0 into %arg10[%arg6, %arg7] [%arg3, 1] [%arg8, %arg9] : tensor<?xf32> into tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}
-// CHECK: func @generic_tensor_insert(
+// CHECK: func.func @generic_tensor_insert(
// CHECK-SAME: %[[SOURCE:[a-zA-Z0-9]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[SOURCE_OFFSET_Y:[a-zA-Z0-9]+]]: index
// CHECK-SAME: %[[SOURCE_OFFSET_X:[a-zA-Z0-9]+]]: index
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors_fusion.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors_fusion.mlir
index bfbf065..288c394 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors_fusion.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors_fusion.mlir
@@ -29,7 +29,7 @@
// * linalg.generic's linalg.fill is pulled into the same group;
// * linalg.conv's linalg.fill is pulled into the same group.
-// CHECK-LABEL: func @fuse_conv2d_elementwise
+// CHECK-LABEL: func.func @fuse_conv2d_elementwise
// CHECK: flow.dispatch.workgroups
// CHECK: %[[INIT:.+]] = linalg.init_tensor
@@ -68,7 +68,7 @@
return %3, %2 : tensor<1x112x112x32xf32>, tensor<1x112x112x32xf32>
}
-// CHECK-LABLE: func @dont_fuse_conv2d_with_multiple_uses
+// CHECK-LABLE: func.func @dont_fuse_conv2d_with_multiple_uses
// CHECK: flow.dispatch.workgroups
// CHECK: linalg.conv_2d_nhwc_hwcf
@@ -102,7 +102,7 @@
return %3 : tensor<1x112x112x32xf32>
}
-// CHECK-LABEL: func @dont_fuse_conv2d_with_non_identity_map
+// CHECK-LABEL: func.func @dont_fuse_conv2d_with_non_identity_map
// CHECK: flow.dispatch.workgroups
// CHECK: linalg.conv_2d_nhwc_hwcf
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir
index c545876..07f82f0 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir
@@ -10,7 +10,7 @@
// CHECK-DAG: util.global private @[[MAIN_IN_0:.+]] {noinline} = dense<{{.*}}> : tensor<5x3xf32>
// CHECK-DAG: util.global private @[[MAIN_IN_1:.+]] {noinline} = dense<{{.*}}> : tensor<3x5xf32>
-// CHECK: func @two_dispatch_benchmark()
+// CHECK: func.func @two_dispatch_benchmark()
// CHECK-DAG: %[[ARG0:.+]] = util.global.load @[[MAIN_IN_0]] : tensor<5x3xf32>
// CHECK-DAG: %[[ARG1:.+]] = util.global.load @[[MAIN_IN_1]] : tensor<3x5xf32>
// CHECK: %[[RET:.+]]:2 = call @two_dispatch(%[[ARG0]], %[[ARG1]])
@@ -34,7 +34,7 @@
// CHECK: util.global private @_benchmark_input_0 {noinline} = dense<0> : tensor<i32>
// CHECK: util.global private @_benchmark_input_1 {noinline} = dense<0> : tensor<i32>
-// CHECK: func @while_benchmark() attributes {iree.abi.stub, iree.reflection = {iree.benchmark = "entry"}} {
+// CHECK: func.func @while_benchmark() attributes {iree.abi.stub, iree.reflection = {iree.benchmark = "entry"}} {
// CHECK-DAG: %[[ARG0:.+]] = util.global.load @_benchmark_input_0 : tensor<i32>
// CHECK-DAG: %[[ARG1:.+]] = util.global.load @_benchmark_input_1 : tensor<i32>
// CHECK: %[[RET0:.+]] = call @while(%[[ARG0]], %[[ARG1]])
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/initialize_empty_tensor.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/initialize_empty_tensor.mlir
index 6c443f6..8b89f7c 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/initialize_empty_tensor.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/initialize_empty_tensor.mlir
@@ -1,11 +1,11 @@
// RUN: iree-opt --iree-flow-initialize-empty-tensors %s | FileCheck %s
-func @return_zero_init(%arg0 : index, %arg1 : index) -> (tensor<?x?x42xi32>, tensor<?x42x?xf32>) {
+func.func @return_zero_init(%arg0 : index, %arg1 : index) -> (tensor<?x?x42xi32>, tensor<?x42x?xf32>) {
%0 = linalg.init_tensor [%arg0, %arg1, 42] : tensor<?x?x42xi32>
%1 = linalg.init_tensor [%arg1, 42, %arg0] : tensor<?x42x?xf32>
return %0, %1 : tensor<?x?x42xi32>, tensor<?x42x?xf32>
}
-// CHECK: func @return_zero_init(
+// CHECK: func.func @return_zero_init(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: index
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: index
// CHECK-DAG: %[[ZERO_INT:.+]] = arith.constant 0 : i32
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/inject_dispatch_tracing.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/inject_dispatch_tracing.mlir
index 0883a3f..ad8d60a 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/inject_dispatch_tracing.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/inject_dispatch_tracing.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --split-input-file --pass-pipeline='func.func(iree-flow-inject-dispatch-tracing)' %s | FileCheck %s
-// CHECK-LABEL: func @singleDispatch
+// CHECK-LABEL: func.func @singleDispatch
// CHECK-SAME: (%[[ARG0:.+]]: tensor<4xf32>)
func.func @singleDispatch(%arg0: tensor<4xf32>) -> tensor<4xf32> {
%c4 = arith.constant 4 : index
@@ -14,7 +14,7 @@
// -----
-// CHECK-LABEL: func @multiDispatch
+// CHECK-LABEL: func.func @multiDispatch
// CHECK-SAME: (%[[ARG0:.+]]: tensor<4xf32>)
func.func @multiDispatch(%arg0: tensor<4xf32>) -> tensor<4xf32> {
%c4 = arith.constant 4 : index
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/interchange_generic_ops.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/interchange_generic_ops.mlir
index aff7e7e..87d20ec 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/interchange_generic_ops.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/interchange_generic_ops.mlir
@@ -3,7 +3,7 @@
// CHECK: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
// CHECK: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d0, d1)>
// CHECK: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d0, d1)>
-// CHECK: func @interchange
+// CHECK: func.func @interchange
// CHECK: linalg.generic {indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]}
func.func @interchange(%arg0: tensor<?x?x?xf32>, %arg1: tensor<?x?x?xf32>, %arg2: tensor<?x?x?xf32>) -> (tensor<?x?x?xf32>) {
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/outline_dispatch_regions.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/outline_dispatch_regions.mlir
index 2d11ede..032e819 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/outline_dispatch_regions.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/outline_dispatch_regions.mlir
@@ -3,7 +3,7 @@
// CHECK: flow.executable private @staticShapeDispatch_dispatch_0
// CHECK-NEXT: flow.dispatch.entry public @staticShapeDispatch_dispatch_0 attributes {
// CHECK-SAME: workgroup_rank = 2 : index}
-// CHECK: func @staticShapeDispatch_dispatch_0(
+// CHECK: func.func @staticShapeDispatch_dispatch_0(
// CHECK-SAME: %[[ARG:.+]]: !flow.dispatch.tensor<readonly:8x4xf32>,
// CHECK-SAME: %[[RET:.+]]: !flow.dispatch.tensor<writeonly:4x8xf32>) {
// CHECK-DAG: %[[ARG_VALUE:.+]] = flow.dispatch.tensor.load %[[ARG]], {{.*}} : !flow.dispatch.tensor<readonly:8x4xf32> -> tensor<8x4xf32>
@@ -12,7 +12,7 @@
// CHECK-NEXT: return
// CHECK-NEXT: }
-// CHECK-LABEL: func @staticShapeDispatch(
+// CHECK-LABEL: func.func @staticShapeDispatch(
// CHECK-SAME: %[[ARG0:.+]]: tensor<8x4xf32>)
func.func @staticShapeDispatch(%arg0 : tensor<8x4xf32>) -> tensor<4x8xf32> {
// CHECK-DAG: %[[X:.+]] = arith.constant 100
@@ -39,14 +39,14 @@
// CHECK: flow.executable private @dispatchFnMuli_dispatch_0
// CHECK-NEXT: flow.dispatch.entry public @dispatchFnMuli_dispatch_0 attributes {
// CHECK-SAME: workgroup_rank = 2 : index}
-// CHECK: func @dispatchFnMuli_dispatch_0(
+// CHECK: func.func @dispatchFnMuli_dispatch_0(
// CHECK: flow.executable private @dispatchFnMuli_dispatch_1
// CHECK-NEXT: flow.dispatch.entry public @dispatchFnMuli_dispatch_1 attributes {
// CHECK-SAME: workgroup_rank = 2 : index}
-// CHECK: func @dispatchFnMuli_dispatch_1(
+// CHECK: func.func @dispatchFnMuli_dispatch_1(
-// CHECK-LABEL: func @dispatchFnMuli(
+// CHECK-LABEL: func.func @dispatchFnMuli(
// CHECK-SAME: %[[ARG0:.+]]: tensor<8x4xf32>)
func.func @dispatchFnMuli(%arg0 : tensor<8x4xf32>) -> tensor<8x4xf32> {
// CHECK-DAG: %[[X:.+]] = arith.constant 100
@@ -83,7 +83,7 @@
// CHECK: flow.executable private @dispatchFn1_dispatch_0
-// CHECK-LABEL: func @dispatchFn1
+// CHECK-LABEL: func.func @dispatchFn1
func.func @dispatchFn1(%arg0 : tensor<8x4xf32>) -> tensor<4x8xf32> {
%x = arith.constant 100 : index
%y = arith.constant 50 : index
@@ -98,7 +98,7 @@
// CHECK: flow.executable private @dispatchFn2_dispatch_0
-// CHECK-LABEL: func @dispatchFn2
+// CHECK-LABEL: func.func @dispatchFn2
func.func @dispatchFn2(%arg0 : tensor<8x4xf32>) -> tensor<4x8xf32> {
%x = arith.constant 100 : index
%y = arith.constant 50 : index
@@ -116,7 +116,7 @@
// CHECK: flow.executable private @dynamicShapeDispatch_dispatch_0
// CHECK-NEXT: flow.dispatch.entry public @dynamicShapeDispatch_dispatch_0 attributes {
// CHECK-SAME: workgroup_rank = 2 : index}
-// CHECK: func @dynamicShapeDispatch_dispatch_0(
+// CHECK: func.func @dynamicShapeDispatch_dispatch_0(
// CHECK-SAME: %[[ARG_TENSOR:.+]]: !flow.dispatch.tensor<readonly:7x?x24x?xf32>,
// CHECK-SAME: %[[DIM1_CAPTURE:.+]]: index, %[[DIM3_CAPTURE:.+]]: index,
// CHECK-SAME: %[[RET_TENSOR:.+]]: !flow.dispatch.tensor<writeonly:?x?x1024xf32>) {
@@ -128,7 +128,7 @@
// CHECK: return
// CHECK-NEXT: }
-// CHECK-LABEL: func @dynamicShapeDispatch(
+// CHECK-LABEL: func.func @dynamicShapeDispatch(
// CHECK-SAME: %[[ARG0:.+]]: tensor<7x?x24x?xf32>
func.func @dynamicShapeDispatch(%arg0 : tensor<7x?x24x?xf32>) -> tensor<?x?x1024xf32> {
%c1 = arith.constant 1 : index
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/pad_tensor_to_tensor.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/pad_tensor_to_tensor.mlir
index 1304301..0d59859 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/pad_tensor_to_tensor.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/pad_tensor_to_tensor.mlir
@@ -15,7 +15,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 4)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 3)>
-// CHECK: func @pad_tensor
+// CHECK: func.func @pad_tensor
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<f32>
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: index
@@ -50,7 +50,7 @@
return %1 : tensor<18x12xf32>
}
}
-// CHECK-LABEL: func @pad_tensor_static
+// CHECK-LABEL: func.func @pad_tensor_static
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<12x4xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<f32>
// CHECK-DAG: %[[VAL:.+]] = tensor.extract %[[ARG1]]
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/test_partitionable_loops_interface.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/test_partitionable_loops_interface.mlir
index da209f3..3f25002 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/test_partitionable_loops_interface.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/test_partitionable_loops_interface.mlir
@@ -18,7 +18,7 @@
} -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
-// CHECK-LABEL: func @generic_dynamic(
+// CHECK-LABEL: func.func @generic_dynamic(
// CHECK: util.unfoldable_constant dense<[1, 0, 1]> : tensor<3xindex>
// -----
@@ -39,7 +39,7 @@
} -> tensor<1x?xf32>
return %0 : tensor<1x?xf32>
}
-// CHECK-LABEL: func @generic_unit_dim(
+// CHECK-LABEL: func.func @generic_unit_dim(
// CHECK: util.unfoldable_constant dense<[0, 0, 1]> : tensor<3xindex>
// -----
@@ -65,7 +65,7 @@
} -> tensor<?x?x?x?xf32>
return %0 : tensor<?x?x?x?xf32>
}
-// CHECK-LABEL: func @generic_4D(
+// CHECK-LABEL: func.func @generic_4D(
// CHECK: util.unfoldable_constant dense<[0, 1, 1, 1]> : tensor<4xindex>
// -----
@@ -89,7 +89,7 @@
} -> tensor<?x?x1x?xf32>
return %0 : tensor<?x?x1x?xf32>
}
-// CHECK-LABEL: func @generic_4D_unit_dim(
+// CHECK-LABEL: func.func @generic_4D_unit_dim(
// CHECK: util.unfoldable_constant dense<[1, 1, 0, 1]> : tensor<4xindex>
// -----
@@ -101,7 +101,7 @@
outs(%init : tensor<?x?xf32>) -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
-// CHECK-LABEL: func @named_op(
+// CHECK-LABEL: func.func @named_op(
// CHECK: util.unfoldable_constant dense<[1, 1, 0]> : tensor<3xindex>
// -----
@@ -115,7 +115,7 @@
}
-// CHECK-LABEL: func @named_op_unit_dim(
+// CHECK-LABEL: func.func @named_op_unit_dim(
// CHECK: util.unfoldable_constant dense<[0, 1, 0]> : tensor<3xindex>
// -----
@@ -127,7 +127,7 @@
outs(%init : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
return %0 : tensor<?x?x?x?xf32>
}
-// CHECK-LABEL: func @mmt4d(
+// CHECK-LABEL: func.func @mmt4d(
// CHECK: util.unfoldable_constant dense<[1, 1, 0, 0, 0, 0]> : tensor<6xindex>
// -----
@@ -139,7 +139,7 @@
outs(%init : tensor<1x?x?x?xf32>) -> tensor<1x?x?x?xf32>
return %0 : tensor<1x?x?x?xf32>
}
-// CHECK-LABEL: func @mmt4d_unit_dim(
+// CHECK-LABEL: func.func @mmt4d_unit_dim(
// CHECK: util.unfoldable_constant dense<[1, 1, 0, 0, 0, 0]> : tensor<6xindex>
// -----
@@ -155,7 +155,7 @@
} -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
-// CHECK-LABEL: func @sort(
+// CHECK-LABEL: func.func @sort(
// CHECK: util.unfoldable_constant dense<[0, 1]> : tensor<2xindex>
// -----
@@ -171,5 +171,5 @@
} -> tensor<?x1xf32>
return %0 : tensor<?x1xf32>
}
-// CHECK-LABEL: func @sort_unit_dim(
+// CHECK-LABEL: func.func @sort_unit_dim(
// CHECK: util.unfoldable_constant dense<[0, 1]> : tensor<2xindex>
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/transformation.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/transformation.mlir
index 3cf3d7e..5b08dca 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/transformation.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/transformation.mlir
@@ -18,12 +18,12 @@
// CHECK-LABEL: flow.executable private @hloElementwiseOps_dispatch_0 {
// CHECK-NEXT: flow.dispatch.entry public @hloElementwiseOps_dispatch_0
// CHECK-NEXT: module {
-// CHECK-NEXT: func @hloElementwiseOps_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:4xf32>, %arg1: !flow.dispatch.tensor<writeonly:4xf32>) {
+// CHECK-NEXT: func.func @hloElementwiseOps_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:4xf32>, %arg1: !flow.dispatch.tensor<writeonly:4xf32>) {
// CHECK: %{{.+}} = linalg.generic
// CHECK: %{{.+}} = arith.addf %{{.+}}, %{{.+}} : f32
// CHECK-NEXT: %{{.+}} = arith.subf %{{.+}}, %{{.+}} : f32
// CHECK-NEXT: %{{.+}} = arith.mulf %{{.+}}, %{{.+}} : f32
-// CHECK: func @hloElementwiseOps(%arg0: tensor<4xf32>) -> tensor<4xf32> {
+// CHECK: func.func @hloElementwiseOps(%arg0: tensor<4xf32>) -> tensor<4xf32> {
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[C4:.+]] = arith.constant 4 : index
// CHECK-NEXT: %0 = flow.dispatch @hloElementwiseOps_dispatch_0::@hloElementwiseOps_dispatch_0[%[[C4]], %[[C1]], %[[C1]]](%arg0) : (tensor<4xf32>) -> tensor<4xf32>
@@ -42,17 +42,17 @@
// CHECK-LABEL: flow.executable private @interleavedDot_dispatch_0 {
// CHECK-NEXT: flow.dispatch.entry public @interleavedDot_dispatch_0
// CHECK-NEXT: module {
-// CHECK-NEXT: func @interleavedDot_dispatch_0
+// CHECK-NEXT: func.func @interleavedDot_dispatch_0
// CHECK: %{{.+}} = linalg.generic
// CHECK: %{{.+}} = arith.addf %{{.+}}, %{{.+}} : f32
// CHECK: flow.executable private @interleavedDot_dispatch_1 {
// CHECK-NEXT: flow.dispatch.entry public @interleavedDot_dispatch_1
// CHECK-NEXT: module {
-// CHECK-NEXT: func @interleavedDot_dispatch_1
+// CHECK-NEXT: func.func @interleavedDot_dispatch_1
// CHECK: %{{.+}} = linalg.matmul
// CHECK: %{{.+}} = linalg.generic
// CHECK: %{{.+}} = arith.mulf %{{.+}}, %{{.+}} : f32
-// CHECK: func @interleavedDot(
+// CHECK: func.func @interleavedDot(
// CHECK-SAME: %[[ARG0:.+]]: tensor<4x4xf32>) -> tensor<4x4xf32> {
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[C4:.+]] = arith.constant 4 : index
@@ -76,10 +76,10 @@
// CHECK-LABEL: flow.executable private @reduction_dispatch_0 {
// CHECK-NEXT: flow.dispatch.entry public @reduction_dispatch_0
// CHECK-NEXT: module {
-// CHECK-NEXT: func @reduction_dispatch_0
+// CHECK-NEXT: func.func @reduction_dispatch_0
// CHECK: %{{.+}} = linalg.generic
// CHECK: %{{.+}} = arith.addf %{{.+}}, %{{.+}} : f32
-// CHECK: func @reduction(%arg0: tensor<4x8xf32>) -> tensor<4xf32> {
+// CHECK: func.func @reduction(%arg0: tensor<4x8xf32>) -> tensor<4xf32> {
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[C4:.+]] = arith.constant 4 : index
// CHECK-NEXT: %0 = flow.dispatch @reduction_dispatch_0::@reduction_dispatch_0[%[[C4]], %[[C1]], %[[C1]]](%arg0) : (tensor<4x8xf32>) -> tensor<4xf32>
diff --git a/compiler/src/iree/compiler/Dialect/HAL/IR/test/buffer_view_folding.mlir b/compiler/src/iree/compiler/Dialect/HAL/IR/test/buffer_view_folding.mlir
index 4576453..cec8a31 100644
--- a/compiler/src/iree/compiler/Dialect/HAL/IR/test/buffer_view_folding.mlir
+++ b/compiler/src/iree/compiler/Dialect/HAL/IR/test/buffer_view_folding.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --split-input-file --canonicalize -cse %s | iree-opt --allow-unregistered-dialect --split-input-file | FileCheck %s
-// CHECK-LABEL: func @skip_buffer_view_buffer
+// CHECK-LABEL: func.func @skip_buffer_view_buffer
// CHECK-SAME: %[[BUFFER:.+]]: !hal.buffer
func.func @skip_buffer_view_buffer(%buffer : !hal.buffer) -> !hal.buffer {
%c1 = arith.constant 1 : i32
@@ -18,7 +18,7 @@
// -----
-// CHECK-LABEL: func @expand_buffer_view_dims
+// CHECK-LABEL: func.func @expand_buffer_view_dims
// CHECK-SAME: %[[VIEW:.+]]: !hal.buffer_view
func.func @expand_buffer_view_dims(%view : !hal.buffer_view) -> (index, index, index) {
// CHECK-DAG: %[[D0:.+]] = hal.buffer_view.dim<%[[VIEW]] : !hal.buffer_view>[0] : index
diff --git a/compiler/src/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir b/compiler/src/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir
index b8587ca..d5160bd 100644
--- a/compiler/src/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir
+++ b/compiler/src/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir
@@ -95,7 +95,7 @@
// CHECK-NEXT: }
// CHECK-NEXT: }
//
-// CHECK: func @basic_linking() {
+// CHECK: func.func @basic_linking() {
// CHECK: hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@vmvx_linked::@vmvx_bytecode_fb::@dispatch_0) workgroups([%c1, %c1, %c1])
// CHECK-NEXT: hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@vmvx_linked::@vmvx_bytecode_fb::@dispatch_1) workgroups([%c1, %c1, %c1])
// CHECK-NEXT: hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@vmvx_linked::@vmvx_bytecode_fb::@dispatch_2) workgroups([%c1, %c1, %c1])
diff --git a/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir b/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir
index e33d710..a256ede 100644
--- a/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir
+++ b/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir
@@ -34,7 +34,7 @@
}
}
- // CHECK-LABEL: func @simpleDispatch
+ // CHECK-LABEL: func.func @simpleDispatch
// CHECK-SAME: (%[[ARG0:.+]]: !hal.buffer_view, %[[ARG1:.+]]: !hal.buffer_view) -> !hal.buffer_view
func.func @simpleDispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%c1 = arith.constant 1 : index
diff --git a/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/dump_executable_benchmarks.mlir b/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/dump_executable_benchmarks.mlir
index 1b1514f..7799588 100644
--- a/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/dump_executable_benchmarks.mlir
+++ b/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/dump_executable_benchmarks.mlir
@@ -66,7 +66,7 @@
// CHECK: %[[BUFFER:.+]] = hal.allocator.allocate<%{{.+}} : !hal.allocator> type("DeviceVisible|DeviceLocal") usage(Dispatch) : !hal.buffer{%c768}
// CHECK-NEXT: util.global.store %[[BUFFER]], @ex0_embedded_elf_x86_64_dispatch0_512x1x1_buffer : !hal.buffer
- // CHECK: func @ex0_embedded_elf_x86_64_dispatch0_512x1x1(%arg0: i32)
+ // CHECK: func.func @ex0_embedded_elf_x86_64_dispatch0_512x1x1(%arg0: i32)
// CHECK-SAME: attributes {iree.abi.stub, iree.reflection = {iree.benchmark = "dispatch"}} {
// CHECK: %[[BATCH_SIZE:.+]] = arith.index_cast %arg0 : i32 to index
@@ -98,14 +98,14 @@
// ===========================================================================
// CHECK: util.global private mutable @ex0_embedded_elf_x86_64_dispatch1_512x1x1_buffer : !hal.buffer
- // CHECK: func @ex0_embedded_elf_x86_64_dispatch1_512x1x1(%arg0: i32)
+ // CHECK: func.func @ex0_embedded_elf_x86_64_dispatch1_512x1x1(%arg0: i32)
// CHECK: hal.command_buffer.dispatch.symbol<%{{.+}} : !hal.command_buffer> target(@ex0::@embedded_elf_x86_64::@dispatch1) workgroups([%c128, %c1, %c1])
// CHECK: util.global private mutable @ex0_embedded_elf_x86_64_dispatch1_128x32x1_buffer : !hal.buffer
- // CHECK: func @ex0_embedded_elf_x86_64_dispatch1_128x32x1(%arg0: i32)
+ // CHECK: func.func @ex0_embedded_elf_x86_64_dispatch1_128x32x1(%arg0: i32)
// CHECK: hal.command_buffer.dispatch.symbol<%{{.+}} : !hal.command_buffer> target(@ex0::@embedded_elf_x86_64::@dispatch1) workgroups([%c32, %c1, %c1])
- func private @main() -> !stream.timepoint {
+ func.func private @main() -> !stream.timepoint {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c32 = arith.constant 32 : index
diff --git a/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/materialize_interfaces.mlir b/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/materialize_interfaces.mlir
index 68d41ce..a73860a 100644
--- a/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/materialize_interfaces.mlir
+++ b/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/materialize_interfaces.mlir
@@ -43,11 +43,11 @@
// CHECK: hal.executable.variant public @embedded_elf_arm_64, target = #executable_target_embedded_elf_arm_64
// CHECK: hal.executable.entry_point public @entry layout(#executable_layout)
// CHECK: builtin.module
-// CHECK-NEXT: func @entry()
+// CHECK-NEXT: func.func @entry()
// CHECK: hal.executable.variant public @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64
// CHECK: hal.executable.entry_point public @entry layout(#executable_layout)
// CHECK: builtin.module
-// CHECK-NEXT: func @entry()
+// CHECK-NEXT: func.func @entry()
// TODO(benvanik): test fixup of stream ops when attrs to specify the
// layout bindings are implemented.
diff --git a/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/memoize_device_queries.mlir b/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/memoize_device_queries.mlir
index 3e3bde3..6884811 100644
--- a/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/memoize_device_queries.mlir
+++ b/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/memoize_device_queries.mlir
@@ -18,7 +18,7 @@
// CHECK: util.global private @_device_query_2
-// CHECK-LABEL: func @device_matchers
+// CHECK-LABEL: func.func @device_matchers
func.func @device_matchers(%device : !hal.device) -> (i1, i1, i1, i1, i1, i1) {
// Same queries (same variables):
// CHECK-NEXT: = util.global.load @_device_query_0_ok : i1
diff --git a/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/pack_dispatch_operands.mlir b/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/pack_dispatch_operands.mlir
index f95adae..bf2b9f5 100644
--- a/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/pack_dispatch_operands.mlir
+++ b/compiler/src/iree/compiler/Dialect/HAL/Transforms/test/pack_dispatch_operands.mlir
@@ -3,7 +3,7 @@
stream.executable private @ex0 {
stream.executable.export public @device_i1
builtin.module {
- // CHECK-LABEL: func @device_i1
+ // CHECK-LABEL: func.func @device_i1
// CHECK-SAME: (%arg0: i32, %arg1: !stream.binding)
func.func @device_i1(%arg0: i1 {stream.values = [true, false]}, %arg1: !stream.binding) {
// CHECK-NEXT: %[[DEV_I1:.+]] = arith.trunci %arg0 {stream.values = [true, false]} : i32 to i1
@@ -33,7 +33,7 @@
stream.executable private @ex1 {
stream.executable.export public @device_bf16
builtin.module {
- // CHECK-LABEL: func @device_bf16
+ // CHECK-LABEL: func.func @device_bf16
// CHECK-SAME: (%arg0: i32, %arg1: !stream.binding)
func.func @device_bf16(%arg0: bf16, %arg1: !stream.binding) {
// CHECK-NEXT: %[[DEV_I16:.+]] = arith.trunci %arg0 : i32 to i16
@@ -66,7 +66,7 @@
// CHECK-LABEL: @device_i64
stream.executable.export public @device_i64
builtin.module {
- // CHECK-LABEL: func @device_i64
+ // CHECK-LABEL: func.func @device_i64
// CHECK-SAME: (%arg0: i32, %arg1: i32, %arg2: !stream.binding)
func.func @device_i64(%arg0: i64 {stream.values = [-1 : i64, 0x0000000200000003 : i64]}, %arg1: !stream.binding) {
// CHECK-DAG: %[[DEV_LO64:.+]] = arith.extui %arg0 : i32 to i64
@@ -101,7 +101,7 @@
stream.executable private @ex3 {
stream.executable.export public @device_index
builtin.module {
- // CHECK-LABEL: func @device_index
+ // CHECK-LABEL: func.func @device_index
// CHECK-SAME: (%arg0: i32, %arg1: !stream.binding)
func.func @device_index(%arg0: index {stream.alignment = 16 : index, stream.values = [0 : index, 1234 : index]}, %arg1: !stream.binding) {
// CHECK: %[[DEV_INDEX:.+]] = arith.index_cast %arg0 {
diff --git a/compiler/src/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/test/interface_ops.mlir b/compiler/src/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/test/interface_ops.mlir
index d765a58..b54a1d7 100644
--- a/compiler/src/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/test/interface_ops.mlir
+++ b/compiler/src/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/test/interface_ops.mlir
@@ -3,7 +3,7 @@
// CHECK: memref.global "private" constant @__constant_5xi32 : memref<5xi32> = dense<[1, 2, 3, 4, 5]>
memref.global "private" constant @__constant_5xi32 : memref<5xi32> = dense<[1, 2, 3, 4, 5]>
-// CHECK-LABEL: func @entry(
+// CHECK-LABEL: func.func @entry(
// CHECK-SAME: %[[SCRATCHPAD:.+]]: memref<?xi8>,
// CHECK-SAME: %[[CONSTANTS:.+]]: memref<?xi32>,
// CHECK-SAME: %[[BINDINGS:.+]]: !util.list<memref<?xi8>>,
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/test/executable_ops.mlir b/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/test/executable_ops.mlir
index 1a3f1b5..40997e9 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/test/executable_ops.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/Conversion/FlowToStream/test/executable_ops.mlir
@@ -4,7 +4,7 @@
flow.executable private @rank_0_binding {
flow.dispatch.entry public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[INPUT:.+]]: !stream.binding)
+ // CHECK: func.func @dispatch(%[[INPUT:.+]]: !stream.binding)
func.func @dispatch(%input: !flow.dispatch.tensor<readonly:i64>) {
// CHECK: %[[SUBSPAN:.+]] = stream.binding.subspan %[[INPUT]][%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:i64>
// CHECK: = flow.dispatch.tensor.load %[[SUBSPAN]]
@@ -21,7 +21,7 @@
flow.executable private @static_bindings {
flow.dispatch.entry public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[INPUT:.+]]: !stream.binding, %[[OUTPUT:.+]]: !stream.binding)
+ // CHECK: func.func @dispatch(%[[INPUT:.+]]: !stream.binding, %[[OUTPUT:.+]]: !stream.binding)
func.func @dispatch(%input: !flow.dispatch.tensor<readonly:1x4xf32>, %output: !flow.dispatch.tensor<writeonly:4xf32>) {
// CHECK-DAG: %[[TIED_INPUT:.+]] = stream.binding.subspan %[[INPUT]][%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:1x4xf32>
// CHECK-DAG: %[[TIED_OUTPUT:.+]] = stream.binding.subspan %[[OUTPUT]][%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:4xf32>
@@ -43,7 +43,7 @@
flow.executable private @dynamic_bindings {
flow.dispatch.entry public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[DIM:.+]]: index, %[[INPUT:.+]]: !stream.binding, %[[OUTPUT:.+]]: !stream.binding)
+ // CHECK: func.func @dispatch(%[[DIM:.+]]: index, %[[INPUT:.+]]: !stream.binding, %[[OUTPUT:.+]]: !stream.binding)
func.func @dispatch(%dim: index, %input: !flow.dispatch.tensor<readonly:1x?xf32>, %output: !flow.dispatch.tensor<writeonly:?xf32>) {
// CHECK-DAG: %[[TIED_INPUT:.+]] = stream.binding.subspan %[[INPUT]][%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:1x?xf32>{%[[DIM]]}
// CHECK-DAG: %[[TIED_OUTPUT:.+]] = stream.binding.subspan %[[OUTPUT]][%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:?xf32>{%[[DIM]]}
@@ -65,7 +65,7 @@
flow.executable private @indirect_dynamic_bindings {
flow.dispatch.entry public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[DIM_TENSOR:.+]]: !stream.binding, %[[INPUT:.+]]: !stream.binding, %[[OUTPUT:.+]]: !stream.binding)
+ // CHECK: func.func @dispatch(%[[DIM_TENSOR:.+]]: !stream.binding, %[[INPUT:.+]]: !stream.binding, %[[OUTPUT:.+]]: !stream.binding)
func.func @dispatch(%dim_tensor: !flow.dispatch.tensor<readonly:i64>, %input: !flow.dispatch.tensor<readonly:1x?xf32>, %output: !flow.dispatch.tensor<writeonly:?xf32>) {
// CHECK: %[[DIM_SUBSPAN:.+]] = stream.binding.subspan %[[DIM_TENSOR]][%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:i64>
// CHECK: %[[DIM_TILE:.+]] = flow.dispatch.tensor.load %[[DIM_SUBSPAN]]
@@ -95,7 +95,7 @@
flow.executable private @nested_bindings {
flow.dispatch.entry public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[DIM:.+]]: index, %[[INPUT:.+]]: !stream.binding, %[[OUTPUT:.+]]: !stream.binding)
+ // CHECK: func.func @dispatch(%[[DIM:.+]]: index, %[[INPUT:.+]]: !stream.binding, %[[OUTPUT:.+]]: !stream.binding)
func.func @dispatch(%dim: index, %input: !flow.dispatch.tensor<readonly:1x?xf32>, %output: !flow.dispatch.tensor<writeonly:?xf32>) {
// CHECK-DAG: %[[TIED_INPUT:.+]] = stream.binding.subspan %[[INPUT]][%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:1x?xf32>{%[[DIM]]}
// CHECK-DAG: %[[TIED_OUTPUT:.+]] = stream.binding.subspan %[[OUTPUT]][%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:?xf32>{%[[DIM]]}
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Conversion/StandardToStream/test/structural_ops.mlir b/compiler/src/iree/compiler/Dialect/Stream/Conversion/StandardToStream/test/structural_ops.mlir
index 3211d9a..6208c27 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Conversion/StandardToStream/test/structural_ops.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/Conversion/StandardToStream/test/structural_ops.mlir
@@ -14,7 +14,7 @@
return %0#0, %0#1, %0#2 : tensor<4x?xf32>, i1, tensor<i32>
}
-// CHECK: func private @callee
+// CHECK: func.func private @callee
func.func private @callee(%arg0: tensor<4x?xf32>, %arg1: i1, %arg2: tensor<i32>)
-> (tensor<4x?xf32>, i1, tensor<i32>)
diff --git a/compiler/src/iree/compiler/Dialect/Stream/IR/test/executable_ops.mlir b/compiler/src/iree/compiler/Dialect/Stream/IR/test/executable_ops.mlir
index 6e0d13b..5f09c12 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/IR/test/executable_ops.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/IR/test/executable_ops.mlir
@@ -6,7 +6,7 @@
stream.executable.export public @dispatch
// CHECK-NEXT: builtin.module
builtin.module {
- // CHECK-NEXT: func @dispatch(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
+ // CHECK-NEXT: func.func @dispatch(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
func.func @dispatch(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
// CHECK-DAG: = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:?x5x64xf32>{%arg2}
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/annotate_dispatch_arguments.mlir b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/annotate_dispatch_arguments.mlir
index 6989938..47be1c1 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/annotate_dispatch_arguments.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/annotate_dispatch_arguments.mlir
@@ -10,7 +10,7 @@
stream.executable private @annotatePotentialValuesEx {
stream.executable.export public @dispatch
builtin.module {
- // CHECK: func @dispatch(
+ // CHECK: func.func @dispatch(
// CHECK-SAME: %arg0: i32,
// CHECK-SAME: %arg1: index {stream.alignment = 4 : index, stream.values = [20 : index, 40 : index]},
// CHECK-SAME: %arg2: i1 {stream.values = [false, true]},
@@ -54,7 +54,7 @@
stream.executable private @annotateOperandAlignmentEx {
stream.executable.export public @dispatch
builtin.module {
- // CHECK: func @dispatch(
+ // CHECK: func.func @dispatch(
// CHECK-SAME: %arg0: index,
// CHECK-SAME: %arg1: index {stream.alignment = 16 : index},
// CHECK-SAME: %arg2: index {stream.values = [4096 : index, 4097 : index]},
@@ -103,7 +103,7 @@
stream.executable private @annotateBindingAlignmentEx {
stream.executable.export public @dispatch
builtin.module {
- // CHECK: func @dispatch(
+ // CHECK: func.func @dispatch(
// CHECK-SAME: %arg0: !stream.binding {stream.alignment = 64 : index},
// CHECK-SAME: %arg1: !stream.binding,
// CHECK-SAME: %arg2: !stream.binding {stream.alignment = 8 : index},
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/convert_to_stream.mlir b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/convert_to_stream.mlir
index ee6d9ee..c762126 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/convert_to_stream.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/convert_to_stream.mlir
@@ -5,7 +5,7 @@
// CHECK: stream.executable.export public @dispatch
flow.dispatch.entry public @dispatch attributes {workgroup_rank = 3 : index}
builtin.module {
- // CHECK: func @dispatch(%arg0: !stream.binding, %arg1: !stream.binding, %[[ARG0_DIM0:.+]]: index, %[[ARG1_DIM1:.+]]: index)
+ // CHECK: func.func @dispatch(%arg0: !stream.binding, %arg1: !stream.binding, %[[ARG0_DIM0:.+]]: index, %[[ARG1_DIM1:.+]]: index)
func.func @dispatch(%arg0: !flow.dispatch.tensor<readonly:?x4xf32>, %arg1: !flow.dispatch.tensor<writeonly:4x?xf32>,
%arg0_dim0: index, %arg1_dim1: index) {
// CHECK: %[[ARG0_TENSOR:.+]] = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:?x4xf32>{%[[ARG0_DIM0]]}
@@ -77,7 +77,7 @@
flow.dispatch.entry public @dispatch attributes {workgroup_rank = 3 : index}
// CHECK: builtin.module
builtin.module {
- // CHECK: func @dispatch(%[[BINDING0:.+]]: !stream.binding, %[[BINDING1:.+]]: !stream.binding)
+ // CHECK: func.func @dispatch(%[[BINDING0:.+]]: !stream.binding, %[[BINDING1:.+]]: !stream.binding)
func.func @dispatch(%arg0: !flow.dispatch.tensor<readonly:i32>, %arg1: !flow.dispatch.tensor<writeonly:i1>) {
%c3_i32 = arith.constant 3 : i32
// CHECK: %[[ARG0:.+]] = stream.binding.subspan %[[BINDING0]][%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:i32>
@@ -117,7 +117,7 @@
}
}
-// CHECK-LABEL: func @while_test
+// CHECK-LABEL: func.func @while_test
func.func @while_test() {
%c1 = arith.constant 1 : index
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fold_uniform_operands.mlir b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fold_uniform_operands.mlir
index b07adea..3f3dcf1 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fold_uniform_operands.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fold_uniform_operands.mlir
@@ -11,7 +11,7 @@
stream.executable private @deduplicateOperandsEx {
stream.executable.export public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[BINDING:.+]]: !stream.binding, %[[A01:.+]]: i32, %[[B0:.+]]: index, %[[C:.+]]: i1, %[[B1:.+]]: index)
+ // CHECK: func.func @dispatch(%[[BINDING:.+]]: !stream.binding, %[[A01:.+]]: i32, %[[B0:.+]]: index, %[[C:.+]]: i1, %[[B1:.+]]: index)
func.func @dispatch(%binding: !stream.binding, %a0: i32, %b0: index, %c: i1, %a1: i32, %b1: index) {
// CHECK-NEXT: util.do_not_optimize(%[[BINDING]]) : !stream.binding
util.do_not_optimize(%binding) : !stream.binding
@@ -29,7 +29,7 @@
}
}
}
-// CHECK: func @deduplicateOperands(%[[A:.+]]: i32, %[[B:.+]]: index, %[[C:.+]]: i1)
+// CHECK: func.func @deduplicateOperands(%[[A:.+]]: i32, %[[B:.+]]: index, %[[C:.+]]: i1)
func.func @deduplicateOperands(%a: i32, %b: index, %c: i1) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -60,7 +60,7 @@
stream.executable private @inlineConstantOperandsEx {
stream.executable.export public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[BINDING:.+]]: !stream.binding, %[[A:.+]]: i32, %[[C:.+]]: i1)
+ // CHECK: func.func @dispatch(%[[BINDING:.+]]: !stream.binding, %[[A:.+]]: i32, %[[C:.+]]: i1)
func.func @dispatch(%binding: !stream.binding, %a: i32, %b: index, %c: i1) {
// CHECK: %[[B:.+]] = arith.constant 20 : index
// CHECK-NEXT: util.do_not_optimize(%[[BINDING]]) : !stream.binding
@@ -75,7 +75,7 @@
}
}
}
-// CHECK: func @inlineConstantOperands(%[[A:.+]]: i32)
+// CHECK: func.func @inlineConstantOperands(%[[A:.+]]: i32)
func.func @inlineConstantOperands(%a: i32) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fuse_dispatch_bindings.mlir b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fuse_dispatch_bindings.mlir
index e3a2bd7..95295d3 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fuse_dispatch_bindings.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fuse_dispatch_bindings.mlir
@@ -12,7 +12,7 @@
stream.executable private @rebaseBindingsEx {
stream.executable.export public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[BINDING_A:.+]]: !stream.binding, %[[BINDING_B:.+]]: !stream.binding,
+ // CHECK: func.func @dispatch(%[[BINDING_A:.+]]: !stream.binding, %[[BINDING_B:.+]]: !stream.binding,
// CHECK-SAME: %[[OFFSET_A:.+]]: index, %[[OFFSET_B:.+]]: index, %[[OPERAND:.+]]: index)
func.func @dispatch(%binding_a: !stream.binding, %binding_b: !stream.binding, %operand: index) {
%c0 = arith.constant 0 : index
@@ -36,7 +36,7 @@
}
}
}
-// CHECK: func @rebaseBindings(%[[OPERAND:.+]]: index)
+// CHECK: func.func @rebaseBindings(%[[OPERAND:.+]]: index)
func.func @rebaseBindings(%operand: index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -90,7 +90,7 @@
stream.executable private @deduplicateBindingsEx {
stream.executable.export public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[BINDING_A:.+]]: !stream.binding, %[[BINDING_B:.+]]: !stream.binding,
+ // CHECK: func.func @dispatch(%[[BINDING_A:.+]]: !stream.binding, %[[BINDING_B:.+]]: !stream.binding,
// CHECK-SAME: %[[OFFSET_A:.+]]: index, %[[OFFSET_C:.+]]: index, %[[OFFSET_B:.+]]: index, %[[OPERAND:.+]]: index)
func.func @dispatch(%binding_a: !stream.binding, %binding_b: !stream.binding, %binding_c: !stream.binding, %operand: index) {
%c0 = arith.constant 0 : index
@@ -121,7 +121,7 @@
}
}
}
-// CHECK: func @deduplicateBindings(%[[OPERAND:.+]]: index)
+// CHECK: func.func @deduplicateBindings(%[[OPERAND:.+]]: index)
func.func @deduplicateBindings(%operand: index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fuse_dispatch_bindings_noalias.mlir b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fuse_dispatch_bindings_noalias.mlir
index a2903c3..d8d0e9f 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fuse_dispatch_bindings_noalias.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/fuse_dispatch_bindings_noalias.mlir
@@ -9,7 +9,7 @@
stream.executable private @deduplicateBindingsEx {
stream.executable.export public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[BINDING_A:.+]]: !stream.binding, %[[BINDING_C:.+]]: !stream.binding,
+ // CHECK: func.func @dispatch(%[[BINDING_A:.+]]: !stream.binding, %[[BINDING_C:.+]]: !stream.binding,
// CHECK-SAME: %[[OFFSET_A:.+]]: index, %[[OFFSET_B:.+]]: index, %[[OFFSET_C:.+]]: index, %[[OPERAND:.+]]: index)
func.func @dispatch(%binding_a: !stream.binding, %binding_b: !stream.binding, %binding_c: !stream.binding, %operand: index) {
%c0 = arith.constant 0 : index
@@ -40,7 +40,7 @@
}
}
}
-// CHECK: func @deduplicateBindings(%[[OPERAND:.+]]: index)
+// CHECK: func.func @deduplicateBindings(%[[OPERAND:.+]]: index)
func.func @deduplicateBindings(%operand: index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/materialize_builtins.mlir b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/materialize_builtins.mlir
index b8a83fd..9dbe18e 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/materialize_builtins.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/materialize_builtins.mlir
@@ -45,7 +45,7 @@
util.initializer.return
}
-// CHECK: func @otherUser
+// CHECK: func.func @otherUser
func.func @otherUser() -> !stream.resource<*> {
%c128 = arith.constant 128 : index
%c1_i64 = arith.constant 1 : i64
diff --git a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/specialize_dispatches.mlir b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/specialize_dispatches.mlir
index bba2ab3..ac940e1 100644
--- a/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/specialize_dispatches.mlir
+++ b/compiler/src/iree/compiler/Dialect/Stream/Transforms/test/specialize_dispatches.mlir
@@ -8,7 +8,7 @@
stream.executable private @specializeEx {
stream.executable.export public @dispatch
builtin.module {
- // CHECK: func @dispatch(%[[BINDING:.+]]: !stream.binding, %[[A:.+]]: i32, %[[SITE:.+]]: index)
+ // CHECK: func.func @dispatch(%[[BINDING:.+]]: !stream.binding, %[[A:.+]]: i32, %[[SITE:.+]]: index)
func.func @dispatch(%binding: !stream.binding, %a: i32, %b: index, %c: i1, %d: i1) {
// CHECK-NEXT: %[[LUT_I32:.+]] = arith.constant dense<[
// CHECK-SAME: [20],
@@ -39,7 +39,7 @@
}
}
}
-// CHECK: func @specialize(%[[A:.+]]: i32)
+// CHECK: func.func @specialize(%[[A:.+]]: i32)
func.func @specialize(%a: i32) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/compiler/src/iree/compiler/Dialect/Util/IR/test/global_ops.mlir b/compiler/src/iree/compiler/Dialect/Util/IR/test/global_ops.mlir
index b8acd77..551f4fe 100644
--- a/compiler/src/iree/compiler/Dialect/Util/IR/test/global_ops.mlir
+++ b/compiler/src/iree/compiler/Dialect/Util/IR/test/global_ops.mlir
@@ -31,8 +31,8 @@
util.global private @v_initialized : tensor<4xi32>
// CHECK-NEXT: util.initializer {
util.initializer {
- // CHECK-NEXT: %[[VALUE:.+]] = call @initializer() : () -> tensor<4xi32>
- %0 = call @initializer() : () -> tensor<4xi32>
+ // CHECK-NEXT: %[[VALUE:.+]] = func.call @initializer() : () -> tensor<4xi32>
+ %0 = func.call @initializer() : () -> tensor<4xi32>
// CHECK-NEXT: util.global.store %[[VALUE]], @v_initialized : tensor<4xi32>
util.global.store %0, @v_initialized : tensor<4xi32>
util.initializer.return
diff --git a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/combine_initializers.mlir b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/combine_initializers.mlir
index 35ed74e..da526d1 100644
--- a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/combine_initializers.mlir
+++ b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/combine_initializers.mlir
@@ -7,7 +7,7 @@
// CHECK: util.global private mutable @global0 : index
util.global private mutable @global0 : index
util.initializer {
- %value0 = call @extern() : () -> index
+ %value0 = func.call @extern() : () -> index
util.global.store %value0, @global0 : index
util.initializer.return
}
@@ -16,18 +16,18 @@
// CHECK-NEXT: util.global private @global2 : index
util.global private @global2 : index
util.initializer {
- %value1 = call @extern() : () -> index
+ %value1 = func.call @extern() : () -> index
util.global.store %value1, @global1 : index
- %value2 = call @extern() : () -> index
+ %value2 = func.call @extern() : () -> index
util.global.store %value2, @global2 : index
util.initializer.return
}
// CHECK-NEXT: util.initializer {
-// CHECK-NEXT: %[[VALUE0:.+]] = call @extern()
+// CHECK-NEXT: %[[VALUE0:.+]] = func.call @extern()
// CHECK-NEXT: util.global.store %[[VALUE0]], @global0
-// CHECK-NEXT: %[[VALUE1:.+]] = call @extern()
+// CHECK-NEXT: %[[VALUE1:.+]] = func.call @extern()
// CHECK-NEXT: util.global.store %[[VALUE1]], @global1
-// CHECK-NEXT: %[[VALUE2:.+]] = call @extern()
+// CHECK-NEXT: %[[VALUE2:.+]] = func.call @extern()
// CHECK-NEXT: util.global.store %[[VALUE2]], @global2
// CHECK-NEXT: util.initializer.return
diff --git a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_f32_to_f16.mlir b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_f32_to_f16.mlir
index 01f3476..2e0f9f7 100644
--- a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_f32_to_f16.mlir
+++ b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_f32_to_f16.mlir
@@ -3,7 +3,7 @@
// NOTE: for more comprehensive tests see demote_i64_to_i32.mlir.
// CHECK: util.global {{.*}} : tensor<4xf16>
-// CHECK-LABEL: func @simple_f32() -> tensor<4xf16>
+// CHECK-LABEL: func.func @simple_f32() -> tensor<4xf16>
// CHECK-NEXT: %{{.*}} = util.global.address @__global : !util.ptr<tensor<4xf16>>
// CHECK-NEXT: %{{.*}} = util.global.load.indirect %{{.*}} : !util.ptr<tensor<4xf16>> -> tensor<4xf16>
// CHECK-NEXT: return %{{.*}} : tensor<4xf16>
@@ -18,7 +18,7 @@
// CHECK: util.global
// CHECK-NOT: f32
-// CHECK-LABEL: func @nested_region_f32()
+// CHECK-LABEL: func.func @nested_region_f32()
// CHECK-NOT: f32
// CHECK: return %{{.*}} : tensor<4xf16>
util.global private @"__global" = dense<[1.000000e+01, 5.000000e+00, 1.000000e+01, 5.000000e+00]> : tensor<4xf32>
diff --git a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_f64_to_f32.mlir b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_f64_to_f32.mlir
index c349e48..051c224 100644
--- a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_f64_to_f32.mlir
+++ b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_f64_to_f32.mlir
@@ -2,7 +2,7 @@
// NOTE: for more comprehensive tests see demote_i64_to_i32.mlir.
-// CHECK-LABEL: func @constantF64
+// CHECK-LABEL: func.func @constantF64
// CHECK-SAME: () -> f32
func.func @constantF64() -> f64 {
// CHECK-NEXT: constant 123.{{.+}} : f32
@@ -12,7 +12,7 @@
// -----
-// CHECK-LABEL: func @tensorTypesF64
+// CHECK-LABEL: func.func @tensorTypesF64
// CHECK-SAME: (%arg0: tensor<4x4xf32>) -> tensor<4x4xf32>
func.func @tensorTypesF64(%arg0 : tensor<4x4xf64>) -> tensor<4x4xf64> {
// CHECK-NEXT: return %arg0 : tensor<4x4xf32>
@@ -22,7 +22,7 @@
// -----
// CHECK: util.global {{.*}} : tensor<4xf32>
-// CHECK-LABEL: func @simple_f64() -> tensor<4xf32>
+// CHECK-LABEL: func.func @simple_f64() -> tensor<4xf32>
// CHECK-NEXT: %{{.*}} = util.global.address @__global : !util.ptr<tensor<4xf32>>
// CHECK-NEXT: %{{.*}} = util.global.load.indirect %{{.*}} : !util.ptr<tensor<4xf32>> -> tensor<4xf32>
// CHECK-NEXT: return %{{.*}} : tensor<4xf32>
@@ -37,7 +37,7 @@
// CHECK: util.global
// CHECK-NOT: f64
-// CHECK-LABEL: func @nested_region_f64()
+// CHECK-LABEL: func.func @nested_region_f64()
// CHECK-NOT: f64
// CHECK: return %{{.*}} : tensor<4xf32>
util.global private @"__global" = dense<[1.000000e+01, 5.000000e+00, 1.000000e+01, 5.000000e+00]> : tensor<4xf64>
@@ -58,18 +58,18 @@
// Check handling of width-sensitive arith casts.
-// CHECK-LABEL: func @arith.truncf(
+// CHECK-LABEL: func.func @arith.truncf(
// CHECK-SAME: %[[ARG0:.*]]: f32) -> f32 {
// CHECK: return %[[ARG0]] : f32
-func @arith.truncf(%arg0: f64) -> f32 {
+func.func @arith.truncf(%arg0: f64) -> f32 {
%0 = arith.truncf %arg0 : f64 to f32
return %0 : f32
}
-// CHECK-LABEL: func @arith.extf(
+// CHECK-LABEL: func.func @arith.extf(
// CHECK-SAME: %[[ARG0:.*]]: f32) -> f32 {
// CHECK: return %[[ARG0]] : f32
-func @arith.extf(%arg0: f32) -> f64 {
+func.func @arith.extf(%arg0: f32) -> f64 {
%0 = arith.extf %arg0 : f32 to f64
return %0 : f64
}
diff --git a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_i64_to_i32.mlir b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_i64_to_i32.mlir
index cbec9e3..9d782c9 100644
--- a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_i64_to_i32.mlir
+++ b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/demote_i64_to_i32.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --split-input-file -iree-util-demote-i64-to-i32 %s | FileCheck %s
-// CHECK-LABEL: func @constant_i64
+// CHECK-LABEL: func.func @constant_i64
// CHECK-SAME: () -> i32
func.func @constant_i64() -> i64 {
// CHECK-NEXT: constant 123 : i32
@@ -10,7 +10,7 @@
// -----
-// CHECK-LABEL: func @constant_splat_i64
+// CHECK-LABEL: func.func @constant_splat_i64
// CHECK-SAME: () -> tensor<4xi32>
func.func @constant_splat_i64() -> tensor<4xi64> {
// CHECK-NEXT: constant dense<123> : tensor<4xi32>
@@ -20,7 +20,7 @@
// -----
-// CHECK-LABEL: func @constant_dense_i64
+// CHECK-LABEL: func.func @constant_dense_i64
// CHECK-SAME: () -> tensor<4xi32>
func.func @constant_dense_i64() -> tensor<4xi64> {
// CHECK-NEXT: constant dense<[0, 1, 2, 3]> : tensor<4xi32>
@@ -30,7 +30,7 @@
// -----
-// CHECK-LABEL: func @args_i64
+// CHECK-LABEL: func.func @args_i64
// CHECK-SAME: (%arg0: i32) -> i32
func.func @args_i64(%arg0: i64) -> i64 {
// CHECK-NEXT: return %arg0 : i32
@@ -39,7 +39,7 @@
// -----
-// CHECK-LABEL: func @args_ui64
+// CHECK-LABEL: func.func @args_ui64
// CHECK-SAME: (%arg0: ui32) -> ui32
func.func @args_ui64(%arg0: ui64) -> ui64 {
// CHECK-NEXT: return %arg0 : ui32
@@ -48,7 +48,7 @@
// -----
-// CHECK-LABEL: func @args_tensor_i64
+// CHECK-LABEL: func.func @args_tensor_i64
// CHECK-SAME: (%arg0: tensor<4x4xi32>) -> tensor<4x4xi32>
func.func @args_tensor_i64(%arg0: tensor<4x4xi64>) -> tensor<4x4xi64> {
// CHECK-NEXT: return %arg0 : tensor<4x4xi32>
@@ -57,7 +57,7 @@
// -----
-// CHECK-LABEL: func @mhlo_constant_i64
+// CHECK-LABEL: func.func @mhlo_constant_i64
// CHECK-SAME: () -> tensor<1xi32>
func.func @mhlo_constant_i64() -> tensor<1xi64> {
// CHECK-NEXT: mhlo.constant dense<123> : tensor<1xi32>
@@ -67,7 +67,7 @@
// -----
-// CHECK-LABEL: func @mhlo_constant_ui64
+// CHECK-LABEL: func.func @mhlo_constant_ui64
// CHECK-SAME: () -> tensor<1xui32>
func.func @mhlo_constant_ui64() -> tensor<1xui64> {
// CHECK-NEXT: mhlo.constant dense<123> : tensor<1xui32>
@@ -77,7 +77,7 @@
// -----
-// CHECK-LABEL: func @mhlo_compare_i64
+// CHECK-LABEL: func.func @mhlo_compare_i64
// CHECK-SAME: (%arg0: tensor<i32>, %arg1: tensor<i32>) -> (i1, tensor<i32>)
func.func @mhlo_compare_i64(%arg0 : tensor<i64>, %arg1 : tensor<i64>) -> (i1, tensor<i64>) {
// CHECK-NEXT: %0 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
@@ -98,7 +98,7 @@
// -----
-// CHECK-LABEL: func @linalg_matmul_i64
+// CHECK-LABEL: func.func @linalg_matmul_i64
func.func @linalg_matmul_i64(%arg0: tensor<2x3xi64>, %arg1: tensor<3x4xi64>, %arg2: tensor<2x4xi64>) -> tensor<2x4xi64> {
// CHECK: %[[T:.+]] = linalg.matmul ins(%arg0, %arg1 : tensor<2x3xi32>, tensor<3x4xi32>)
// CHECK-SAME: outs(%arg2 : tensor<2x4xi32>) -> tensor<2x4xi32>
@@ -110,7 +110,7 @@
// -----
-// CHECK-LABEL: func @linalg_generic_i64
+// CHECK-LABEL: func.func @linalg_generic_i64
// CHECK-SAME: (%[[ARG:.+]]: tensor<2xi32>) -> tensor<2xi32>
func.func @linalg_generic_i64(%arg: tensor<2xi64>) -> tensor<2xi64> {
// CHECK: %[[INIT:.+]] = linalg.init_tensor [2] : tensor<2xi32>
@@ -128,7 +128,7 @@
// -----
-// CHECK-LABEL: func @linalg_non_structured_op
+// CHECK-LABEL: func.func @linalg_non_structured_op
// CHECK-SAME: (%arg0: tensor<9xi32>) -> tensor<1x9xi32>
func.func @linalg_non_structured_op(%arg0: tensor<9xi64>) -> tensor<1x9xi64> {
// CHECK: %[[RES:.+]] = tensor.expand_shape %arg0 {{\[}}[0, 1]] : tensor<9xi32> into tensor<1x9xi32>
@@ -155,19 +155,19 @@
// CHECK: util.global private @{{.+}} : tensor<4xi32>
util.global private @v_initializer : tensor<4xi64>
util.initializer {
- // CHECK: %[[VALUE:.+]] = call @initializer() : () -> tensor<4xi32>
- %0 = call @initializer() : () -> tensor<4xi64>
+ // CHECK: %[[VALUE:.+]] = func.call @initializer() : () -> tensor<4xi32>
+ %0 = func.call @initializer() : () -> tensor<4xi64>
// CHECK: util.global.store %[[VALUE]], @v_initializer : tensor<4xi32>
util.global.store %0, @v_initializer : tensor<4xi64>
util.initializer.return
}
-// CHECK: func private @initializer() -> tensor<4xi32>
+// CHECK: func.func private @initializer() -> tensor<4xi32>
func.func private @initializer() -> tensor<4xi64>
// -----
// CHECK: util.global {{.*}} : tensor<4xi32>
-// CHECK-LABEL: func @simple_i64() -> tensor<4xi32>
+// CHECK-LABEL: func.func @simple_i64() -> tensor<4xi32>
// CHECK-NEXT: %{{.*}} = util.global.address @__global : !util.ptr<tensor<4xi32>>
// CHECK-NEXT: %{{.*}} = util.global.load.indirect %{{.*}} : !util.ptr<tensor<4xi32>> -> tensor<4xi32>
// CHECK-NEXT: return %{{.*}} : tensor<4xi32>
@@ -182,7 +182,7 @@
// CHECK: util.global {{.+}} : tensor<4xi32>
util.global private @"__global" = dense<[1, 2, 3, 4]> : tensor<4xi64>
-// CHECK-LABEL: func @nested_region_i64()
+// CHECK-LABEL: func.func @nested_region_i64()
func.func @nested_region_i64() -> (tensor<4xi64>) {
// CHECK-NEXT: util.global.address {{.+}} : !util.ptr<tensor<4xi32>>
%0 = util.global.address @"__global" : !util.ptr<tensor<4xi64>>
@@ -209,26 +209,26 @@
// Check handling of width-sensitive arith casts.
-// CHECK-LABEL: func @arith.trunci(
+// CHECK-LABEL: func.func @arith.trunci(
// CHECK-SAME: %[[ARG0:.*]]: i32) -> i32 {
// CHECK: return %[[ARG0]] : i32
-func @arith.trunci(%arg0: i64) -> i32 {
+func.func @arith.trunci(%arg0: i64) -> i32 {
%0 = arith.trunci %arg0 : i64 to i32
return %0 : i32
}
-// CHECK-LABEL: func @arith.extui(
+// CHECK-LABEL: func.func @arith.extui(
// CHECK-SAME: %[[ARG0:.*]]: i32) -> i32 {
// CHECK: return %[[ARG0]] : i32
-func @arith.extui(%arg0: i32) -> i64 {
+func.func @arith.extui(%arg0: i32) -> i64 {
%0 = arith.extui %arg0 : i32 to i64
return %0 : i64
}
-// CHECK-LABEL: func @arith.extsi(
+// CHECK-LABEL: func.func @arith.extsi(
// CHECK-SAME: %[[ARG0:.*]]: i32) -> i32 {
// CHECK: return %[[ARG0]] : i32
-func @arith.extsi(%arg0: i32) -> i64 {
+func.func @arith.extsi(%arg0: i32) -> i64 {
%0 = arith.extsi %arg0 : i32 to i64
return %0 : i64
}
diff --git a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals.mlir b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals.mlir
index de50c59..fbe4dd6 100644
--- a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals.mlir
+++ b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals.mlir
@@ -3,7 +3,7 @@
// CHECK-LABEL: @hoist_simple_const_expr
module @hoist_simple_const_expr {
// CHECK: util.global private @[[HOISTED_SYM:.*]] : i32
- // CHECK: func @main
+ // CHECK: func.func @main
func.func @main() -> (i32) {
%0 = arith.constant 0 : i32
%1 = arith.constant 1 : i32
@@ -28,7 +28,7 @@
// checks.
// CHECK-LABEL: @do_not_hoist_variable_op
// CHECK-NOT: util.global
-// CHECK: func @main
+// CHECK: func.func @main
// CHECK: %[[VAL:.*]] = "iree_unregistered.var_expr"
// CHECK: return %[[VAL]]
// CHECK-NOT: util.initializer
@@ -112,7 +112,7 @@
// CHECK: util.global private @latent_global : i32
util.global private @latent_global : i32
- // CHECK: func @main
+ // CHECK: func.func @main
func.func @main() -> (i32, i32, i32) {
// CHECK-DAG: %[[LOAD_HOISTED_0:.*]] = util.global.load @[[HOISTED_0]] : i32
// CHECK-DAG: %[[LOAD_HOISTED_1:.*]] = util.global.load @[[HOISTED_1]] : i32
@@ -149,7 +149,7 @@
// CHECK-LABEL: @hoist_non_leaf_const_expr
module @hoist_non_leaf_const_expr {
// CHECK: util.global private @[[HOISTED:.*]] : i32
- // CHECK: func @main
+ // CHECK: func.func @main
func.func @main() -> (i32) {
// CHECK: %[[LOAD_HOISTED:.*]] = util.global.load @[[HOISTED]] : i32
// CHECK: %[[RESULT:.*]] = "iree_unregistered.non_leaf_const_expr"(%hoisted)
@@ -175,7 +175,7 @@
// CHECK-LABEL: @hoist_implicit_capture
module @hoist_implicit_capture {
// CHECK: util.global private @[[HOISTED_SYM:.*]] : i32
- // CHECK: func @main
+ // CHECK: func.func @main
func.func @main() -> (i32) {
%0 = arith.constant 0 : i32
%1 = arith.constant 1 : i32
diff --git a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals_linalg.mlir b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals_linalg.mlir
index db983b6..57d1d0d 100644
--- a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals_linalg.mlir
+++ b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals_linalg.mlir
@@ -6,7 +6,7 @@
#map1 = affine_map<(d0, d1) -> (d0, d1)>
module @compute_hoisted {
// CHECK: util.global private @[[HOISTED:.*]] : tensor<5x6xf32>
- // CHECK: func @main
+ // CHECK: func.func @main
func.func @main() -> (tensor<5x6xf32>) {
%cst_0 = arith.constant dense<1.270000e+02> : tensor<f32>
@@ -41,7 +41,7 @@
#map1 = affine_map<(d0, d1) -> (d0, d1)>
module @broadcast_treated_as_leaf {
// CHECK-NOT: util.global
- // CHECK: func @main
+ // CHECK: func.func @main
func.func @main() -> (tensor<5x6xf32>) {
%cst_0 = arith.constant dense<1.270000e+02> : tensor<f32>
// CHECK: linalg.init_tensor
diff --git a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/promote_f16_to_f32.mlir b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/promote_f16_to_f32.mlir
index 93e80da..50d9a92 100644
--- a/compiler/src/iree/compiler/Dialect/Util/Transforms/test/promote_f16_to_f32.mlir
+++ b/compiler/src/iree/compiler/Dialect/Util/Transforms/test/promote_f16_to_f32.mlir
@@ -3,7 +3,7 @@
// NOTE: for more comprehensive tests see demote_i64_to_i32.mlir.
// CHECK: util.global {{.*}} : tensor<4xf32>
-// CHECK-LABEL: func @simple_f16() -> tensor<4xf32>
+// CHECK-LABEL: func.func @simple_f16() -> tensor<4xf32>
// CHECK-NEXT: %{{.*}} = util.global.address @__global : !util.ptr<tensor<4xf32>>
// CHECK-NEXT: %{{.*}} = util.global.load.indirect %{{.*}} : !util.ptr<tensor<4xf32>> -> tensor<4xf32>
// CHECK-NEXT: return %{{.*}} : tensor<4xf32>
@@ -18,7 +18,7 @@
// CHECK: util.global
// CHECK-NOT: f16
-// CHECK-LABEL: func @nested_region_f16()
+// CHECK-LABEL: func.func @nested_region_f16()
// CHECK-NOT: f16
// CHECK: return %{{.*}} : tensor<4xf32>
util.global private @"__global" = dense<[1.000000e+01, 5.000000e+00, 1.000000e+01, 5.000000e+00]> : tensor<4xf16>
diff --git a/compiler/src/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/control_flow_ops.mlir b/compiler/src/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/control_flow_ops.mlir
index c8830ea..abe3391 100644
--- a/compiler/src/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/control_flow_ops.mlir
+++ b/compiler/src/iree/compiler/Dialect/VM/Conversion/StandardToVM/test/control_flow_ops.mlir
@@ -78,7 +78,7 @@
module @t005_call {
module {
- func private @import_fn(%arg0 : i32) -> i32
+ func.func private @import_fn(%arg0 : i32) -> i32
// CHECK: vm.func private @my_fn
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]]
func.func @my_fn(%arg0 : i32) -> (i32) {
@@ -95,7 +95,7 @@
module @t005_call_int_promotion {
module {
- func private @import_fn(%arg0 : i1) -> i1
+ func.func private @import_fn(%arg0 : i1) -> i1
// CHECK: vm.func private @my_fn
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]]
func.func @my_fn(%arg0 : i1) -> (i1) {
diff --git a/compiler/src/iree/compiler/Dialect/VM/Conversion/UtilToVM/test/global_ops.mlir b/compiler/src/iree/compiler/Dialect/VM/Conversion/UtilToVM/test/global_ops.mlir
index ef1d5a8..a885114 100644
--- a/compiler/src/iree/compiler/Dialect/VM/Conversion/UtilToVM/test/global_ops.mlir
+++ b/compiler/src/iree/compiler/Dialect/VM/Conversion/UtilToVM/test/global_ops.mlir
@@ -16,12 +16,12 @@
// CHECK-NEXT: vm.return
// CHECK-NEXT: }
util.initializer {
- %0 = call @initializer() : () -> !hal.buffer
+ %0 = func.call @initializer() : () -> !hal.buffer
util.global.store %0, @v_initialized : !hal.buffer
util.initializer.return
}
// CHECK-NEXT: vm.func private @initializer() -> !vm.ref<!hal.buffer>
-func private @initializer() -> !hal.buffer
+func.func private @initializer() -> !hal.buffer
// -----
diff --git a/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/control_flow_ops.mlir b/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/control_flow_ops.mlir
index 6fc7d6a..db80f71 100644
--- a/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/control_flow_ops.mlir
+++ b/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/control_flow_ops.mlir
@@ -60,10 +60,10 @@
// Test vm.call conversion on an imported function.
vm.module @my_module {
- // CHECK: func @my_module_call_[[IMPORTFN:[^\(]+]]
+ // CHECK: func.func @my_module_call_[[IMPORTFN:[^\(]+]]
vm.import @imported_fn(%arg0 : i32) -> i32
- // CHECK: func @my_module_call_imported_fn
+ // CHECK: func.func @my_module_call_imported_fn
vm.func @call_imported_fn(%arg0 : i32) -> i32 {
// Lookup import from module struct.
@@ -114,10 +114,10 @@
// Test vm.call.variadic conversion on an imported function.
vm.module @my_module {
- // CHECK: func @my_module_call_[[VARIADICFN:[^\(]+]]
+ // CHECK: func.func @my_module_call_[[VARIADICFN:[^\(]+]]
vm.import @variadic_fn(%arg0 : i32 ...) -> i32
- // CHECK: func @my_module_call_variadic
+ // CHECK: func.func @my_module_call_variadic
vm.func @call_variadic(%arg0 : i32, %arg1 : i32) -> i32 {
// Lookup import from module struct.
@@ -237,7 +237,7 @@
// Test vm.import conversion on a void function.
vm.module @my_module {
- // CHECK-LABEL: func @my_module_call_0v_v_import_shim(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_stack_t">>, %arg1: !emitc.ptr<!emitc.opaque<"iree_vm_function_t">>)
+ // CHECK-LABEL: func.func @my_module_call_0v_v_import_shim(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_stack_t">>, %arg1: !emitc.ptr<!emitc.opaque<"iree_vm_function_t">>)
// CHECK-SAME: -> !emitc.opaque<"iree_status_t"> attributes {emitc.static} {
// Calculate the size of the arguments. To avoid empty structs we insert a dummy value.
@@ -316,7 +316,7 @@
// Test vm.import conversion on a variadic function.
vm.module @my_module {
- // CHECK-LABEL: func @my_module_call_0iCiD_i_2_import_shim(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_stack_t">>, %arg1: !emitc.ptr<!emitc.opaque<"iree_vm_function_t">>,
+ // CHECK-LABEL: func.func @my_module_call_0iCiD_i_2_import_shim(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_stack_t">>, %arg1: !emitc.ptr<!emitc.opaque<"iree_vm_function_t">>,
// CHECK-SAME: %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: !emitc.ptr<!emitc.opaque<"int32_t">>)
// CHECK-SAME: -> !emitc.opaque<"iree_status_t"> attributes {emitc.static} {
@@ -431,7 +431,7 @@
// Test vm.import conversion on a function with vm.ref arguments.
vm.module @my_module {
- // CHECK-LABEL: func @my_module_call_0r_r_import_shim(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_stack_t">>, %arg1: !emitc.ptr<!emitc.opaque<"iree_vm_function_t">>,
+ // CHECK-LABEL: func.func @my_module_call_0r_r_import_shim(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_stack_t">>, %arg1: !emitc.ptr<!emitc.opaque<"iree_vm_function_t">>,
// CHECK-SAME: %arg2: !emitc.ptr<!emitc.opaque<"iree_vm_ref_t">>, %arg3: !emitc.ptr<!emitc.opaque<"iree_vm_ref_t">>)
// CHECK-SAME: -> !emitc.opaque<"iree_status_t"> attributes {emitc.static} {
@@ -525,7 +525,7 @@
// CHECK: emitc.call "EMITC_TYPEDEF_STRUCT"() {args = [#emitc.opaque<"my_module_fn_result_t">, #emitc.opaque<"int32_t res0;">]} : () -> ()
// Create a new function to export with the adapted siganture.
- // CHECK: func @my_module_fn_export_shim(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_stack_t">>, %arg1: !emitc.ptr<!emitc.opaque<"iree_vm_function_call_t">>,
+ // CHECK: func.func @my_module_fn_export_shim(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_stack_t">>, %arg1: !emitc.ptr<!emitc.opaque<"iree_vm_function_call_t">>,
// CHECK-SAME: %arg2: !emitc.ptr<!emitc.opaque<"void">>, %arg3: !emitc.ptr<!emitc.opaque<"void">>, %arg4: !emitc.ptr<!emitc.opaque<"iree_vm_execution_result_t">>)
// CHECK-SAME: -> !emitc.opaque<"iree_status_t"> attributes {emitc.static, vm.calling_convention = "0i_i"}
diff --git a/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/func_op.mlir b/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/func_op.mlir
index 739ea8e..5cc4829 100644
--- a/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/func_op.mlir
+++ b/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/func_op.mlir
@@ -4,7 +4,7 @@
// some arguments are getting added. For more details see comments on the
// `ConvertVMToEmitCPass` class in ConvertVMToEmitC.cpp.
vm.module @my_module {
- // CHECK: func @my_module_fn(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_stack_t">>
+ // CHECK: func.func @my_module_fn(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_stack_t">>
// CHECK-SAME: %arg1: !emitc.ptr<!emitc.opaque<"my_module_t">>,
// CHECK-SAME: %arg2: !emitc.ptr<!emitc.opaque<"my_module_state_t">>,
// CHECK-SAME: %arg3: !emitc.ptr<!emitc.opaque<"iree_vm_ref_t">>,
diff --git a/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/type_conversion.mlir b/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/type_conversion.mlir
index 7685c2b..e65c40d 100644
--- a/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/type_conversion.mlir
+++ b/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/test/type_conversion.mlir
@@ -46,7 +46,7 @@
// vm.func conversion. All references in the signature should be converted to
// emitc pointers.
vm.module @my_module {
- // CHECK: func @fn(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_ref_t">>, %arg1: i32)
+ // CHECK: func.func @fn(%arg0: !emitc.ptr<!emitc.opaque<"iree_vm_ref_t">>, %arg1: i32)
func.func @fn(%arg0 : !vm.ref<?>, %arg1 : i32) -> () {
return
}
diff --git a/compiler/src/iree/compiler/InputConversion/Common/test/iree_import_public.mlir b/compiler/src/iree/compiler/InputConversion/Common/test/iree_import_public.mlir
index d9051c9..41a1ae0 100644
--- a/compiler/src/iree/compiler/InputConversion/Common/test/iree_import_public.mlir
+++ b/compiler/src/iree/compiler/InputConversion/Common/test/iree_import_public.mlir
@@ -1,6 +1,6 @@
// RUN: iree-opt --split-input-file --iree-import-public %s | FileCheck %s
-// CHECK-LABEL: func @bv_func
+// CHECK-LABEL: func.func @bv_func
// CHECK-SAME: (%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> (!hal.buffer_view, !hal.buffer_view)
// CHECK: return %arg0, %arg1 : !hal.buffer_view, !hal.buffer_view
func.func @bv_func(%arg0 : !iree_input.buffer_view, %arg1 : !iree_input.buffer_view) -> (!iree_input.buffer_view, !iree_input.buffer_view) {
@@ -8,14 +8,14 @@
}
// -----
-// CHECK-LABEL: func @list_func
+// CHECK-LABEL: func.func @list_func
// CHECK-SAME: (%arg0: !util.list<?>) -> !util.list<?>
func.func @list_func(%arg0 : !iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant> {
return %arg0 : !iree_input.list<!iree_input.variant>
}
// -----
-// CHECK-LABEL: func @list_func_retains_iree_abi
+// CHECK-LABEL: func.func @list_func_retains_iree_abi
// CHECK-SAME: (%arg0: !util.list<?>) -> !util.list<?>
// CHECK-SAME: iree.abi = "FOOBAR"
func.func @list_func_retains_iree_abi(%arg0 : !iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant>
@@ -24,7 +24,7 @@
}
// -----
-// CHECK-LABEL: func @list_func_call
+// CHECK-LABEL: func.func @list_func_call
// CHECK: call @list_func_call(%arg0) : (!util.list<?>) -> !util.list<?>
func.func @list_func_call(%arg0 : !iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant> {
call @list_func_call(%arg0) : (!iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant>
@@ -32,14 +32,14 @@
}
// -----
-// CHECK-LABEL: func @ptr_func
+// CHECK-LABEL: func.func @ptr_func
// CHECK-SAME: (%arg0: !util.ptr<!hal.buffer_view>) -> !util.ptr<!hal.buffer_view>
func.func @ptr_func(%arg0 : !iree_input.ptr<!iree_input.buffer_view>) -> !iree_input.ptr<!iree_input.buffer_view> {
return %arg0 : !iree_input.ptr<!iree_input.buffer_view>
}
// -----
-// CHECK-LABEL: func @null_op
+// CHECK-LABEL: func.func @null_op
// CHECK: util.null : !util.variant
func.func @null_op() -> !iree_input.variant {
%0 = iree_input.null : !iree_input.variant
@@ -47,7 +47,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_to_buffer_view
+// CHECK-LABEL: func.func @tensor_to_buffer_view
// CHECK: hal.tensor.export %arg0 : tensor<?x?x3xf32>{%arg1, %arg2} -> !hal.buffer_view
func.func @tensor_to_buffer_view(%arg0 : tensor<?x?x3xf32>, %arg1 : index, %arg2 : index) -> !iree_input.buffer_view {
%0 = iree_input.cast.tensor_to_buffer_view %arg0 : tensor<?x?x3xf32>{%arg1, %arg2} -> !iree_input.buffer_view
@@ -55,7 +55,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_to_buffer_view_static
+// CHECK-LABEL: func.func @tensor_to_buffer_view_static
// CHECK: hal.tensor.export %arg0 : tensor<3xf32> -> !hal.buffer_view
func.func @tensor_to_buffer_view_static(%arg0 : tensor<3xf32>) -> !iree_input.buffer_view {
%0 = iree_input.cast.tensor_to_buffer_view %arg0 : tensor<3xf32> -> !iree_input.buffer_view
@@ -63,7 +63,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_to_buffer_view_implicit_dims
+// CHECK-LABEL: func.func @tensor_to_buffer_view_implicit_dims
// CHECK: %[[ZERO:.*]] = arith.constant 0
// CHECK: %[[D0:.*]] = tensor.dim %arg0, %[[ZERO]]
// CHECK: %[[ONE:.*]] = arith.constant 1
@@ -75,7 +75,7 @@
}
// -----
-// CHECK-LABEL: func @buffer_view_to_tensor
+// CHECK-LABEL: func.func @buffer_view_to_tensor
// CHECK: hal.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3xf32>{%arg1, %arg2}
func.func @buffer_view_to_tensor(%arg0 : !iree_input.buffer_view, %arg1 : index, %arg2 : index) -> tensor<?x?x3xf32> {
%0 = iree_input.cast.buffer_view_to_tensor %arg0 : !iree_input.buffer_view -> tensor<?x?x3xf32>{%arg1, %arg2}
@@ -83,7 +83,7 @@
}
// -----
-// CHECK-LABEL: func @buffer_view_to_tensor_static
+// CHECK-LABEL: func.func @buffer_view_to_tensor_static
// CHECK: hal.tensor.import %arg0 : !hal.buffer_view -> tensor<3xf32>
func.func @buffer_view_to_tensor_static(%arg0 : !iree_input.buffer_view) -> tensor<3xf32> {
%0 = iree_input.cast.buffer_view_to_tensor %arg0 : !iree_input.buffer_view -> tensor<3xf32>
@@ -91,7 +91,7 @@
}
// -----
-// CHECK-LABEL: func @buffer_view_to_tensor_implicit_dims
+// CHECK-LABEL: func.func @buffer_view_to_tensor_implicit_dims
// CHECK: %[[D0:.*]] = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
// CHECK: %[[D1:.*]] = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index
// CHECK: hal.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3xf32>{%[[D0]], %[[D1]]}
@@ -101,7 +101,7 @@
}
// -----
-// CHECK-LABEL: func @buffer_view_rank
+// CHECK-LABEL: func.func @buffer_view_rank
// CHECK: hal.buffer_view.rank<%arg0 : !hal.buffer_view> : index
func.func @buffer_view_rank(%arg0 : !iree_input.buffer_view) -> index {
%0 = iree_input.buffer_view.rank %arg0 : index
@@ -109,7 +109,7 @@
}
// -----
-// CHECK-LABEL: func @buffer_view_dim
+// CHECK-LABEL: func.func @buffer_view_dim
// CHECK: hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
func.func @buffer_view_dim(%arg0 : !iree_input.buffer_view) -> index {
%0 = iree_input.buffer_view.dim %arg0, 0 : index
@@ -117,7 +117,7 @@
}
// -----
-// CHECK-LABEL: func @list_create
+// CHECK-LABEL: func.func @list_create
// CHECK: util.list.create %arg0 : !util.list<?>
func.func @list_create(%arg0 : index) -> !iree_input.list<!iree_input.variant> {
%0 = iree_input.list.create %arg0 : !iree_input.list<!iree_input.variant>
@@ -125,7 +125,7 @@
}
// -----
-// CHECK-LABEL: func @list_size
+// CHECK-LABEL: func.func @list_size
// CHECK: util.list.size %arg0 : !util.list<?>
func.func @list_size(%arg0 : !iree_input.list<!iree_input.variant>) -> index {
%0 = iree_input.list.size %arg0 : !iree_input.list<!iree_input.variant>
@@ -133,7 +133,7 @@
}
// -----
-// CHECK-LABEL: func @list_resize
+// CHECK-LABEL: func.func @list_resize
// CHECK: util.list.resize %arg0, %arg1 : !util.list<?>
func.func @list_resize(%arg0 : !iree_input.list<!iree_input.variant>, %arg1 : index) {
iree_input.list.resize %arg0, %arg1 : !iree_input.list<!iree_input.variant>
@@ -141,7 +141,7 @@
}
// -----
-// CHECK-LABEL: func @list_get
+// CHECK-LABEL: func.func @list_get
// CHECK: util.list.get %arg0[%arg1] : !util.list<?>
func.func @list_get(%arg0 : !iree_input.list<!iree_input.variant>, %arg1 : index) -> !iree_input.variant {
%0 = iree_input.list.get %arg0[%arg1] : !iree_input.list<!iree_input.variant> -> !iree_input.variant
@@ -149,7 +149,7 @@
}
// -----
-// CHECK-LABEL: func @list_set
+// CHECK-LABEL: func.func @list_set
// CHECK: util.list.set %arg0[%arg1], %arg2 : !util.list<?>
func.func @list_set(%arg0 : !iree_input.list<!iree_input.variant>, %arg1 : index, %arg2 : !iree_input.variant) {
iree_input.list.set %arg0[%arg1], %arg2 : !iree_input.list<!iree_input.variant>, !iree_input.variant
@@ -157,7 +157,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_reshape
+// CHECK-LABEL: func.func @tensor_reshape
// CHECK: flow.tensor.reshape %arg0 : tensor<?x?xf32>{%arg1, %arg2} -> tensor<?x?xf32>{%arg2, %arg1}
func.func @tensor_reshape(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index) -> tensor<?x?xf32> {
%0 = iree_input.tensor.reshape %arg0 : tensor<?x?xf32>{%arg1, %arg2} -> tensor<?x?xf32>{%arg2, %arg1}
@@ -165,7 +165,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_load
+// CHECK-LABEL: func.func @tensor_load
// CHECK: flow.tensor.load %arg0[%arg2, %arg3] : tensor<?x3xf32>{%arg1}
func.func @tensor_load(%arg0 : tensor<?x3xf32>, %arg1 : index, %arg2 : index, %arg3 : index) -> f32 {
%0 = iree_input.tensor.load %arg0[%arg2, %arg3] : tensor<?x3xf32>{%arg1}
@@ -173,7 +173,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_store
+// CHECK-LABEL: func.func @tensor_store
// CHECK: flow.tensor.store %arg4, %arg0[%arg2, %arg3] : tensor<?x3xf32>{%arg1}
func.func @tensor_store(%arg0 : tensor<?x3xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : f32) {
iree_input.tensor.store %arg4, %arg0[%arg2, %arg3] : tensor<?x3xf32>{%arg1}
@@ -181,7 +181,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_splat
+// CHECK-LABEL: func.func @tensor_splat
// CHECK: flow.tensor.splat %arg0 : tensor<?x?xf32>{%arg1, %arg2}
func.func @tensor_splat(%arg0 : f32, %arg1 : index, %arg2 : index) -> tensor<?x?xf32> {
%0 = iree_input.tensor.splat %arg0 : tensor<?x?xf32>{%arg1, %arg2}
@@ -189,7 +189,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_clone
+// CHECK-LABEL: func.func @tensor_clone
// CHECK: flow.tensor.clone %arg0 : tensor<?x?xf32>{%arg1, %arg2}
func.func @tensor_clone(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index) -> tensor<?x?xf32> {
%0 = iree_input.tensor.clone %arg0 : tensor<?x?xf32>{%arg1, %arg2}
@@ -197,7 +197,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_slice
+// CHECK-LABEL: func.func @tensor_slice
// CHECK: flow.tensor.slice %arg0[%arg1 for %arg2] : tensor<?xf32>{%arg3} -> tensor<?xf32>{%arg4}
func.func @tensor_slice(%arg0 : tensor<?xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index) -> tensor<?xf32> {
%0 = iree_input.tensor.slice %arg0[%arg1 for %arg2] : tensor<?xf32>{%arg3} -> tensor<?xf32>{%arg4}
@@ -205,7 +205,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_update
+// CHECK-LABEL: func.func @tensor_update
// CHECK: flow.tensor.update %arg3, %arg0[%arg1] : tensor<?xf32>{%arg2} -> %arg0 as tensor<?xf32>{%arg4}
func.func @tensor_update(%arg0 : tensor<?xf32>, %arg1 : index, %arg2 : index, %arg3 : tensor<?xf32>, %arg4 : index) -> tensor<?xf32> {
%0 = iree_input.tensor.update %arg3, %arg0[%arg1] : tensor<?xf32>{%arg2} -> tensor<?xf32>{%arg4}
@@ -213,7 +213,7 @@
}
// -----
-// CHECK-LABEL: func @tensor_trace
+// CHECK-LABEL: func.func @tensor_trace
// CHECK: flow.tensor.trace {key = "FOOBAR"} %arg0, %arg1 : tensor<5xf32>, tensor<3xf32>
func.func @tensor_trace(%arg0 : tensor<5xf32>, %arg1 : tensor<3xf32>) {
iree_input.tensor.trace "FOOBAR" %arg0, %arg1 : tensor<5xf32>, tensor<3xf32>
@@ -235,11 +235,11 @@
// CHECK: util.global public @global5 : tensor<4xi32>
iree_input.global @global5 initializer(@initializer) : tensor<4xi32>
// CHECK-NEXT: util.initializer {
- // CHECK-NEXT: %[[VALUE:.+]] = call @initializer() : () -> tensor<4xi32>
+ // CHECK-NEXT: %[[VALUE:.+]] = func.call @initializer() : () -> tensor<4xi32>
// CHECK-NEXT: util.global.store %[[VALUE]], @global5 : tensor<4xi32>
// CHECK-NEXT: util.initializer.return
// CHECK-NEXT: }
- // CHECK: func private @initializer() -> tensor<4xi32>
+ // CHECK: func.func private @initializer() -> tensor<4xi32>
func.func private @initializer() -> tensor<4xi32>
}
diff --git a/compiler/src/iree/compiler/InputConversion/Common/test/linalg_quantized_matmul_to_matmul.mlir b/compiler/src/iree/compiler/InputConversion/Common/test/linalg_quantized_matmul_to_matmul.mlir
index 42738cc..d2a987a 100644
--- a/compiler/src/iree/compiler/InputConversion/Common/test/linalg_quantized_matmul_to_matmul.mlir
+++ b/compiler/src/iree/compiler/InputConversion/Common/test/linalg_quantized_matmul_to_matmul.mlir
@@ -10,7 +10,7 @@
%1 = linalg.quantized_matmul ins(%lhs, %rhs, %lhs_zp, %rhs_zp : tensor<?x?xi8>, tensor<?x?xi8>, i32, i32) outs(%acc : tensor<?x?xi32>) -> tensor<?x?xi32>
return %1 : tensor<?x?xi32>
}
-// CHECK-LABEL: func @quantized_matmul_both_zp_0_dynamic
+// CHECK-LABEL: func.func @quantized_matmul_both_zp_0_dynamic
// CHECK-SAME: %[[LHS:.+]]: tensor<?x?xi8>, %[[RHS:.+]]: tensor<?x?xi8>
// CHECK-SAME: %[[ACC:.+]]: tensor<?x?xi32>
// CHECK: %[[MATMUL:.+]] = linalg.matmul ins(%[[LHS]], %[[RHS]] : tensor<?x?xi8>, tensor<?x?xi8>) outs(%[[ACC]] : tensor<?x?xi32>)
@@ -22,7 +22,7 @@
%1 = linalg.quantized_matmul ins(%lhs, %rhs, %lhs_zp, %rhs_zp : tensor<?x?xi8>, tensor<?x?xi8>, i32, i32) outs(%acc : tensor<?x?xi32>) -> tensor<?x?xi32>
return %1 : tensor<?x?xi32>
}
-// CHECK-LABEL: func @quantized_matmul_lhs_zp_0_dynamic
+// CHECK-LABEL: func.func @quantized_matmul_lhs_zp_0_dynamic
// CHECK-SAME: %[[LHS:.+]]: tensor<?x?xi8>, %[[RHS:.+]]: tensor<?x?xi8>
// CHECK-SAME: %[[RHS_ZP:.+]]: i32
// CHECK-SAME: %[[ACC:.+]]: tensor<?x?xi32>
@@ -49,7 +49,7 @@
%1 = linalg.quantized_matmul ins(%lhs, %rhs, %lhs_zp, %rhs_zp : tensor<?x?xi8>, tensor<?x?xi8>, i32, i32) outs(%acc : tensor<?x?xi32>) -> tensor<?x?xi32>
return %1 : tensor<?x?xi32>
}
-// CHECK-LABEL: func @quantized_matmul_rhs_zp_0_dynamic
+// CHECK-LABEL: func.func @quantized_matmul_rhs_zp_0_dynamic
// CHECK-SAME: %[[LHS:.+]]: tensor<?x?xi8>, %[[RHS:.+]]: tensor<?x?xi8>
// CHECK-SAME: %[[LHS_ZP:.+]]: i32
// CHECK-SAME: %[[ACC:.+]]: tensor<?x?xi32>
@@ -75,7 +75,7 @@
%1 = linalg.quantized_matmul ins(%lhs, %rhs, %lhs_zp, %rhs_zp : tensor<?x?xi8>, tensor<?x?xi8>, i32, i32) outs(%acc : tensor<?x?xi32>) -> tensor<?x?xi32>
return %1 : tensor<?x?xi32>
}
-// CHECK-LABEL: func @quantized_matmul_neither_zp_0_dynamic
+// CHECK-LABEL: func.func @quantized_matmul_neither_zp_0_dynamic
// CHECK-SAME: %[[LHS:.+]]: tensor<?x?xi8>, %[[RHS:.+]]: tensor<?x?xi8>
// CHECK-SAME: %[[LHS_ZP:.+]]: i32, %[[RHS_ZP:.+]]: i32
// CHECK-SAME: %[[ACC:.+]]: tensor<?x?xi32>
@@ -114,7 +114,7 @@
%1 = linalg.quantized_matmul ins(%lhs, %rhs, %lhs_zp, %rhs_zp : tensor<3x4xi8>, tensor<4x5xi8>, i32, i32) outs(%acc : tensor<3x5xi32>) -> tensor<3x5xi32>
return %1 : tensor<3x5xi32>
}
-// CHECK-LABEL: func @quantized_matmul_neither_zp_0_3x4x5
+// CHECK-LABEL: func.func @quantized_matmul_neither_zp_0_3x4x5
// CHECK-SAME: %[[LHS:.+]]: tensor<3x4xi8>, %[[RHS:.+]]: tensor<4x5xi8>
// CHECK-SAME: %[[LHS_ZP:.+]]: i32, %[[RHS_ZP:.+]]: i32
// CHECK-SAME: %[[ACC:.+]]: tensor<3x5xi32>
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/test/broadcasting.mlir b/compiler/src/iree/compiler/InputConversion/MHLO/test/broadcasting.mlir
index ab8582a..b4b3721 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/test/broadcasting.mlir
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/test/broadcasting.mlir
@@ -84,7 +84,7 @@
}
// -----
-// CHECK-LABEL: func @selectv2
+// CHECK-LABEL: func.func @selectv2
func.func @selectv2(%arg0: tensor<2xi1>, %arg1: tensor<2xi32>, %arg2: tensor<2xi32>) -> tensor<2xi32> {
// All same type: should just short-circtuit to one mhlo.select / one generic.
// CHECK: linalg.generic
@@ -97,7 +97,7 @@
// -----
// CHECK: #map0 = affine_map<(d0) -> ()>
// CHECK: #map1 = affine_map<(d0) -> (d0)>
-// CHECK-LABEL: func @selectv2_pred_scalar
+// CHECK-LABEL: func.func @selectv2_pred_scalar
func.func @selectv2_pred_scalar(%arg0: tensor<i1>, %arg1: tensor<2xi32>, %arg2: tensor<2xi32>) -> tensor<2xi32> {
// CHECK: %[[INIT_0:.*]] = linalg.init_tensor [2] : tensor<2xi1>
// CHECK: %[[BCAST_PRED:.*]] = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel"]} ins(%arg0 : tensor<i1>) outs(%[[INIT_0]] : tensor<2xi1>)
@@ -112,7 +112,7 @@
// CHECK: #map0 = affine_map<(d0, d1, d2) -> ()>
// CHECK: #map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK: #map2 = affine_map<(d0, d1, d2) -> (d1, 0)>
-// CHECK-LABEL: func @selectv2_broadcast_then
+// CHECK-LABEL: func.func @selectv2_broadcast_then
func.func @selectv2_broadcast_then(%arg0: tensor<i1>, %arg1: tensor<8x1xi32>, %arg2: tensor<2x8x8xi32>) -> tensor<2x8x8xi32> {
// CHECK: %[[BCAST_PRED:.*]] = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg0 : tensor<i1>)
// CHECK: %[[BCAST_THEN:.*]] = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8x1xi32>)
@@ -127,7 +127,7 @@
// CHECK: #map0 = affine_map<(d0, d1, d2) -> ()>
// CHECK: #map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK: #map2 = affine_map<(d0, d1, d2) -> (d1, 0)>
-// CHECK-LABEL: func @selectv2_broadcast_else
+// CHECK-LABEL: func.func @selectv2_broadcast_else
func.func @selectv2_broadcast_else(%arg0: tensor<i1>, %arg1: tensor<2x8x8xi32>, %arg2: tensor<8x1xi32>) -> tensor<2x8x8xi32> {
// CHECK: %[[BCAST_PRED:.*]] = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg0 : tensor<i1>)
// CHECK: %[[BCAST_ELSE:.*]] = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg2 : tensor<8x1xi32>)
@@ -141,7 +141,7 @@
// -----
// CHECK: #map0 = affine_map<(d0, d1, d2) -> (0)>
// CHECK: #map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-// CHECK-LABEL: func @selectv2_broadcast_pred
+// CHECK-LABEL: func.func @selectv2_broadcast_pred
func.func @selectv2_broadcast_pred(%arg0: tensor<1xi1>, %arg1: tensor<2x8x8xi32>, %arg2: tensor<2x8x8xi32>) -> tensor<2x8x8xi32> {
// CHECK: %[[BCAST_PRED:.*]] = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1xi1>)
// CHECK: linalg.generic
@@ -156,7 +156,7 @@
// CHECK: #map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK: #map2 = affine_map<(d0, d1, d2) -> (0, d1, 0)>
// CHECK: #map3 = affine_map<(d0, d1, d2) -> (0, 0, d2)>
-// CHECK-LABEL: func @selectv2_broadcast_all
+// CHECK-LABEL: func.func @selectv2_broadcast_all
func.func @selectv2_broadcast_all(%arg0: tensor<8x1x1xi1>, %arg1: tensor<1x8x1xi32>, %arg2: tensor<1x1x8xi32>) -> tensor<8x8x8xi32> {
// CHECK: %[[BCAST_PRED:.*]] = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg0 : tensor<8x1x1xi1>)
// CHECK: %[[BCAST_THEN:.*]] = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<1x8x1xi32>)
@@ -172,7 +172,7 @@
// CHECK: #map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK: #map2 = affine_map<(d0, d1, d2) -> (0, d1, 0)>
// CHECK: #map3 = affine_map<(d0, d1, d2) -> (0, 0, d2)>
-// CHECK-LABEL: func @selectv2_broadcast_dyn_pred
+// CHECK-LABEL: func.func @selectv2_broadcast_dyn_pred
func.func @selectv2_broadcast_dyn_pred(%arg0: tensor<?x1x1xi1>, %arg1: tensor<1x8x1xi32>, %arg2: tensor<1x1x8xi32>) -> tensor<?x8x8xi32> {
// CHECK: %[[C0_0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM_PRED_0:.*]] = tensor.dim %arg0, %[[C0_0]]
@@ -199,7 +199,7 @@
}
// -----
-// CHECK-LABEL: func @selectv2_broadcast_dyn_then
+// CHECK-LABEL: func.func @selectv2_broadcast_dyn_then
func.func @selectv2_broadcast_dyn_then(%arg0: tensor<8x1x1xi1>, %arg1: tensor<1x?x1xi32>, %arg2: tensor<1x1x8xi32>) -> tensor<8x?x8xi32> {
// CHECK: %[[C1_0:.*]] = arith.constant 1 : index
// CHECK: %[[DIM_THEN_1:.*]] = tensor.dim %arg1, %[[C1_0]]
@@ -226,7 +226,7 @@
}
// -----
-// CHECK-LABEL: func @selectv2_broadcast_dyn_else
+// CHECK-LABEL: func.func @selectv2_broadcast_dyn_else
func.func @selectv2_broadcast_dyn_else(%arg0: tensor<8x1x1xi1>, %arg1: tensor<1x8x1xi32>, %arg2: tensor<1x1x?xi32>) -> tensor<8x8x?xi32> {
// CHECK: %[[C2_0:.*]] = arith.constant 2 : index
// CHECK: %[[DIM_ELSE_2:.*]] = tensor.dim %arg2, %[[C2_0]]
@@ -254,7 +254,7 @@
}
// -----
-// CHECK-LABEL: func @selectv2_broadcast_dyn_all
+// CHECK-LABEL: func.func @selectv2_broadcast_dyn_all
func.func @selectv2_broadcast_dyn_all(%arg0: tensor<?x1x1xi1>, %arg1: tensor<?x8x1xi32>, %arg2: tensor<?x1x?xi32>) -> tensor<?x8x?xi32> {
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[PRED_D0:.*]] = tensor.dim %arg0, %[[C0]] : tensor<?x1x1xi1>
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir b/compiler/src/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir
index 11cf5aa..aef4a81 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir
@@ -10,7 +10,7 @@
}) {dimension = 0 : i64, is_stable = false} : (tensor<128xi32>) -> (tensor<128xi32>)
return %0 : tensor<128xi32>
}
-// CHECK-LABEL: func @sort_1d(
+// CHECK-LABEL: func.func @sort_1d(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: )
// CHECK: %[[SORT:.+]] = iree_linalg_ext.sort
@@ -31,7 +31,7 @@
}) {dimension = 0 : i64, is_stable = false} : (tensor<128xui32>) -> (tensor<128xui32>)
return %0 : tensor<128xui32>
}
-// CHECK-LABEL: func @sort_1d_ui(
+// CHECK-LABEL: func.func @sort_1d_ui(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: )
// CHECK: %[[CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]] : tensor<128xui32> to tensor<128xi32>
@@ -56,7 +56,7 @@
return %1 : tensor<1x10xi32>
}
-// CHECK-LABEL: func @sort_cst_capture(
+// CHECK-LABEL: func.func @sort_cst_capture(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: )
// CHECK: %[[SCALAR:.+]] = arith.constant 0 : i32
@@ -78,7 +78,7 @@
return %1 : tensor<1x10xi32>
}
-// CHECK-LABEL: func @sort_argument_capture(
+// CHECK-LABEL: func.func @sort_argument_capture(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK-SAME: )
@@ -100,7 +100,7 @@
}) {dimension = 0 : i64, is_stable = false} : (tensor<16x32xi32>) -> (tensor<16x32xi32>)
return %0 : tensor<16x32xi32>
}
-// CHECK-LABEL: func @sort_2d(
+// CHECK-LABEL: func.func @sort_2d(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: )
// CHECK: %[[SORT:.+]] = iree_linalg_ext.sort
@@ -124,7 +124,7 @@
return %1 : tensor<1x5xf32>
}
-// CHECK-LABEL: func @sort_unsigned(
+// CHECK-LABEL: func.func @sort_unsigned(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: )
// CHECK: %[[SORT:.+]] = iree_linalg_ext.sort
@@ -150,7 +150,7 @@
return %1 : tensor<1x5xf32>
}
-// CHECK-LABEL: func @sort_unsigned_cst_capture(
+// CHECK-LABEL: func.func @sort_unsigned_cst_capture(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: )
// CHECK: %[[UI32:.+]] = mhlo.constant dense<2> : tensor<ui32>
@@ -182,7 +182,7 @@
return %1 : tensor<1x5xf32>
}
-// CHECK-LABEL: func @sort_complex(
+// CHECK-LABEL: func.func @sort_complex(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK-SAME: )
@@ -205,7 +205,7 @@
}) {dimension = 0 : i64, is_stable = false} : (tensor<128xi32>, tensor<128xi32>) -> (tensor<128xi32>, tensor<128xi32>)
return %0#0 : tensor<128xi32>
}
-// CHECK-LABEL: func @topk
+// CHECK-LABEL: func.func @topk
// CHECK: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK: %[[SORT:.+]]:2 = iree_linalg_ext.sort
@@ -234,7 +234,7 @@
} : (tensor<8xi32>, tensor<4x1xi32>, tensor<4xi32>) -> tensor<8xi32>
return %0 : tensor<8xi32>
}
-// CHECK-LABEL: func @scatter_update_scalar_1D
+// CHECK-LABEL: func.func @scatter_update_scalar_1D
// CHECK: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK: %[[ARG2:[a-zA-Z0-9]+]]
@@ -263,7 +263,7 @@
} : (tensor<4x3xi32>, tensor<3x2xi32>, tensor<3xi32>) -> tensor<4x3xi32>
return %0 : tensor<4x3xi32>
}
-// CHECK-LABEL: func @scatter_update_scalar_2D
+// CHECK-LABEL: func.func @scatter_update_scalar_2D
// CHECK: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK: %[[ARG2:[a-zA-Z0-9]+]]
@@ -294,7 +294,7 @@
} : (tensor<6x3xi32>, tensor<2x1xi32>, tensor<2x3xi32>) -> tensor<6x3xi32>
return %0 : tensor<6x3xi32>
}
-// CHECK-LABEL: func @scatter_update_slice_2D
+// CHECK-LABEL: func.func @scatter_update_slice_2D
// CHECK: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK: %[[ARG2:[a-zA-Z0-9]+]]
@@ -326,7 +326,7 @@
} : (tensor<6x3xi32>, tensor<2x1xi32>, tensor<2x3xi32>) -> tensor<6x3xi32>
return %0 : tensor<6x3xi32>
}
-// CHECK-LABEL: func @scatter_add_slice_2D
+// CHECK-LABEL: func.func @scatter_add_slice_2D
// CHECK: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK: %[[ARG2:[a-zA-Z0-9]+]]
@@ -359,7 +359,7 @@
} : (tensor<8xi32>, tensor<3x4x1xi32>, tensor<3x4xi32>) -> tensor<8xi32>
return %0 : tensor<8xi32>
}
-// CHECK-LABEL: func @scatter_update_batch_scalar_1D
+// CHECK-LABEL: func.func @scatter_update_batch_scalar_1D
// CHECK: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK: %[[ARG2:[a-zA-Z0-9]+]]
@@ -393,7 +393,7 @@
} : (tensor<1x24x512xi32>, tensor<?x3x2xi32>, tensor<?x3x512xi32>) -> tensor<1x24x512xi32>
return %0 : tensor<1x24x512xi32>
}
-// CHECK-LABEL: func @scatter_update_batch_slice_3D_dynamic
+// CHECK-LABEL: func.func @scatter_update_batch_slice_3D_dynamic
// CHECK: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK: %[[ARG2:[a-zA-Z0-9]+]]
@@ -420,7 +420,7 @@
return %1, %2 : tensor<5xf32>, tensor<5xf32>
}
// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0) -> (d0)>
-// CHECK: func @rfft_1d
+// CHECK: func.func @rfft_1d
// CHECK-SAME: %[[REAL:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[INDICES:.+]] = arith.constant dense<[0, 4, 2, 6, 1, 5, 3, 7]> : tensor<8xi32>
// CHECK-DAG: %[[INIT_TENSOR:.+]] = linalg.init_tensor [8] : tensor<8xf32>
@@ -468,7 +468,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK: func @rfft_2d
+// CHECK: func.func @rfft_2d
// CHECK-SAME: %[[REAL:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[INDICES:.+]] = arith.constant dense<[0, 4, 2, 6, 1, 5, 3, 7]> : tensor<8xi32>
// CHECK-DAG: %[[INIT_TENSOR:.+]] = linalg.init_tensor [4, 8] : tensor<4x8xf32>
@@ -513,7 +513,7 @@
} : (tensor<3x5xi32>) -> tensor<3x5xi32>
return %0 : tensor<3x5xi32>
}
-// CHECK-LABEL: func @reverse_dim1
+// CHECK-LABEL: func.func @reverse_dim1
// CHECK-SAME: %[[IN:[a-zA-Z0-9]+]]
// CHECK: %[[INIT:.+]] = linalg.init_tensor [3, 5] : tensor<3x5xi32>
// CHECK: %[[REV:.+]] = iree_linalg_ext.reverse
@@ -530,7 +530,7 @@
} : (tensor<?x?xi32>) -> tensor<?x?xi32>
return %0 : tensor<?x?xi32>
}
-// CHECK-LABEL: func @reverse_multi_dim
+// CHECK-LABEL: func.func @reverse_multi_dim
// CHECK-SAME: %[[IN:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/test/convert_structural_types.mlir b/compiler/src/iree/compiler/InputConversion/MHLO/test/convert_structural_types.mlir
index 3aa23ac..964b5a1 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/test/convert_structural_types.mlir
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/test/convert_structural_types.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: @func_cfg_conversion
module @func_cfg_conversion {
- // CHECK: func @caller(%arg0: tensor<2xi32>, %arg1: i1) -> tensor<2xi32>
+ // CHECK: func.func @caller(%arg0: tensor<2xi32>, %arg1: i1) -> tensor<2xi32>
func.func @caller(%arg0: tensor<2xui32>, %arg1 : i1) -> tensor<2xui32> {
// CHECK: %[[RESULT:.*]] = call @callee(%arg0, %arg1) : (tensor<2xi32>, i1) -> tensor<2xi32>
%1 = call @callee(%arg0, %arg1) : (tensor<2xui32>, i1) -> tensor<2xui32>
@@ -10,7 +10,7 @@
return %1 : tensor<2xui32>
}
- // CHECK: func @callee(%arg0: tensor<2xi32>, %arg1: i1) -> tensor<2xi32>
+ // CHECK: func.func @callee(%arg0: tensor<2xi32>, %arg1: i1) -> tensor<2xi32>
func.func @callee(%arg0: tensor<2xui32>, %arg1: i1) -> tensor<2xui32> {
// CHECK: cf.cond_br %arg1, ^bb1(%arg0 : tensor<2xi32>), ^bb2(%arg0 : tensor<2xi32>)
cf.cond_br %arg1, ^bb1(%arg0 : tensor<2xui32>), ^bb2(%arg0 : tensor<2xui32>)
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/test/dynamic_shape.mlir b/compiler/src/iree/compiler/InputConversion/MHLO/test/dynamic_shape.mlir
index ebb02b9..2e05d0e 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/test/dynamic_shape.mlir
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/test/dynamic_shape.mlir
@@ -7,7 +7,7 @@
}
// CHECK: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK: func @dynamic_shape
+// CHECK: func.func @dynamic_shape
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>
// CHECK: %[[SHAPE:.+]] = shape.shape_of %[[ARG0]]
// CHECK: %[[C0:.+]] = arith.constant 0 : index
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/test/fft.mlir b/compiler/src/iree/compiler/InputConversion/MHLO/test/fft.mlir
index 39722f7..f39475b 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/test/fft.mlir
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/test/fft.mlir
@@ -8,7 +8,7 @@
%2 = "mhlo.imag"(%0) : (tensor<17xcomplex<f32>>) -> tensor<17xf32>
return %1, %2 : tensor<17xf32>, tensor<17xf32>
}
-// CHECK: func @rfft_1d
+// CHECK: func.func @rfft_1d
// CHECK-SAME: %[[Arg0:[a-zA-Z0-9_]*]]
// CHECK-DAG: %[[RealMatrix:.+]] = arith.constant dense<"0x0000803F{{.*}}"> : tensor<32x17xf32>
// CHECK-DAG: %[[ImagMatrix:.+]] = arith.constant dense<"0x00000080{{.*}}"> : tensor<32x17xf32>
@@ -39,7 +39,7 @@
%2 = "mhlo.imag"(%0) : (tensor<1x17xcomplex<f32>>) -> tensor<1x17xf32>
return %1, %2 : tensor<1x17xf32>, tensor<1x17xf32>
}
-// CHECK: func @rfft_2d
+// CHECK: func.func @rfft_2d
// CHECK-SAME: %[[Arg0:[a-zA-Z0-9_]*]]
// CHECK-DAG: %[[RealMatrix:.+]] = arith.constant dense<"0x0000803F{{.*}}"> : tensor<32x17xf32>
// CHECK-DAG: %[[ImagMatrix:.+]] = arith.constant dense<"0x00000080{{.*}}"> : tensor<32x17xf32>
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/test/flatten_tuples_in_cfg.mlir b/compiler/src/iree/compiler/InputConversion/MHLO/test/flatten_tuples_in_cfg.mlir
index e4e7218..107ea14 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/test/flatten_tuples_in_cfg.mlir
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/test/flatten_tuples_in_cfg.mlir
@@ -6,7 +6,7 @@
// CHECK-LABEL: @flatten_func
module @flatten_func {
- // CHECK: func @caller(%arg0: i1, %arg1: tensor<f32>) -> tensor<f32>
+ // CHECK: func.func @caller(%arg0: i1, %arg1: tensor<f32>) -> tensor<f32>
func.func @caller(%arg0 : i1, %arg1: tensor<f32>) -> tensor<f32> {
// CHECK: %[[RESULT:.*]]:2 = call @callee(%arg0, %arg1, %arg1, %arg1) : (i1, tensor<f32>, tensor<f32>, tensor<f32>) -> (tensor<f32>, tensor<f32>)
%0 = "mhlo.tuple"(%arg1, %arg1) : (tensor<f32>, tensor<f32>) -> tuple<tensor<f32>, tensor<f32>>
@@ -17,7 +17,7 @@
return %3 : tensor<f32>
}
- // CHECK: func private @callee(%arg0: i1, %arg1: tensor<f32>, %arg2: tensor<f32>, %arg3: tensor<f32>) -> (tensor<f32>, tensor<f32>)
+ // CHECK: func.func private @callee(%arg0: i1, %arg1: tensor<f32>, %arg2: tensor<f32>, %arg3: tensor<f32>) -> (tensor<f32>, tensor<f32>)
func.func private @callee(%arg0: i1, %arg1: tuple<tensor<f32>, tuple<tensor<f32>, tensor<f32>>>) -> tuple<tensor<f32>, tensor<f32>> {
// CHECK-DAG: %[[RESULT0:.*]] = arith.select %arg0, %arg2, %arg1 : tensor<f32>
// CHECK-DAG: %[[RESULT1:.*]] = arith.select %arg0, %arg3, %arg1 : tensor<f32>
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_linalg.mlir b/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_linalg.mlir
index 4e71972..1841d00 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_linalg.mlir
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_linalg.mlir
@@ -5,7 +5,7 @@
%0 = "mhlo.concatenate"(%arg0, %cst, %arg1) {dimension = 1} : (tensor<2x2xi32>, tensor<2x3xi32>, tensor<2x4xi32>) -> tensor<2x9xi32>
return %0 : tensor<2x9xi32>
}
-// CHECK: func @concatenate
+// CHECK: func.func @concatenate
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]]
// CHECK: %[[CST:.+]] = arith.constant dense<514> : tensor<2x3xi32>
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing.mlir b/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing.mlir
index 69f24d4..52b5c80 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing.mlir
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing.mlir
@@ -242,7 +242,7 @@
%0 = "mhlo.rng_normal"(%arg0, %arg1, %shape) : (tensor<f32>, tensor<f32>, tensor<2xi64>) -> tensor<3x5xf32>
return %0 : tensor<3x5xf32>
}
-// CHECK-LABEL: func @rng_normal
+// CHECK-LABEL: func.func @rng_normal
// CHECK: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK-DAG: %{{.*}} = mhlo.constant dense<{{.*}}> : tensor<8xf32>
@@ -280,7 +280,7 @@
return %0 : tensor<5x5xi32>
}
-// CHECK-LABEL: func @scatter_rank0
+// CHECK-LABEL: func.func @scatter_rank0
// CHECK-DAG: %[[RE_I:.+]] = "mhlo.reshape"(%arg1) : (tensor<2xi32>) -> tensor<1x2xi32>
// CHECK-DAG: %[[RE_U:.+]] = "mhlo.reshape"(%arg2) : (tensor<i32>) -> tensor<1xi32>
// CHECK: %[[SCATTER:.+]] = "mhlo.scatter"(%arg0, %[[RE_I]], %[[RE_U]])
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_canoncalize_dot_general.mlir b/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_canoncalize_dot_general.mlir
index d09cea3..8948289 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_canoncalize_dot_general.mlir
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_canoncalize_dot_general.mlir
@@ -123,7 +123,7 @@
return %0 : tensor<1x8x1x512xf32>
}
-// CHECK-LABEL: func @dot_general_4d_transposed
+// CHECK-LABEL: func.func @dot_general_4d_transposed
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]
// CHECK: %[[ARG0_TRANSPOSED:.+]] = "mhlo.transpose"(%[[ARG0]])
@@ -149,7 +149,7 @@
return %0 : tensor<4x64x155x309xf32>
}
-// CHECK-LABEL: func @dot_general_1d_batching_1d_contracting
+// CHECK-LABEL: func.func @dot_general_1d_batching_1d_contracting
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]
// CHECK: %[[ARG0_RESHAPED_TR:.+]] = "mhlo.transpose"(%[[ARG0]])
diff --git a/compiler/src/iree/compiler/InputConversion/TMTensor/test/convert_tm_tensor_to_linalg_ext.mlir b/compiler/src/iree/compiler/InputConversion/TMTensor/test/convert_tm_tensor_to_linalg_ext.mlir
index 784526a..3692865 100644
--- a/compiler/src/iree/compiler/InputConversion/TMTensor/test/convert_tm_tensor_to_linalg_ext.mlir
+++ b/compiler/src/iree/compiler/InputConversion/TMTensor/test/convert_tm_tensor_to_linalg_ext.mlir
@@ -10,7 +10,7 @@
} -> tensor<128xi32>, tensor<i32>
return %ret_out, %ret_acc: tensor<128xi32>, tensor<i32>
}
-// CHECK-LABEL: func @scan(
+// CHECK-LABEL: func.func @scan(
// CHECK-SAME: %[[IN:.*]]: tensor<128xi32>, %[[OUT:.*]]: tensor<128xi32>,
// CHECK-SAME: %[[ACC:.*]]: tensor<i32>) -> (tensor<128xi32>, tensor<i32>) {
// CHECK: %[[SCAN:.*]]:2 = iree_linalg_ext.scan
@@ -36,7 +36,7 @@
} -> tensor<8xi32>
return %0 : tensor<8xi32>
}
-// CHECK-LABEL: func @scatter_update(
+// CHECK-LABEL: func.func @scatter_update(
// CHECK-SAME: %[[ORIGINAL:.*]]: tensor<8xi32>,
// CHECK-SAME: %[[INDICES:.*]]: tensor<3x1xi32>,
// CHECK-SAME: %[[UPDATES:.*]]: tensor<3xi32>) -> tensor<8xi32> {
@@ -62,7 +62,7 @@
} -> tensor<8xi32>
return %0 : tensor<8xi32>
}
-// CHECK-LABEL: func @scatter_add(
+// CHECK-LABEL: func.func @scatter_add(
// CHECK-SAME: %[[ORIGINAL:.*]]: tensor<8xi32>,
// CHECK-SAME: %[[INDICES:.*]]: tensor<3x1xi32>,
// CHECK-SAME: %[[UPDATES:.*]]: tensor<3xi32>) -> tensor<8xi32> {
diff --git a/integrations/tensorflow/WORKSPACE b/integrations/tensorflow/WORKSPACE
index 3531c01..c0f8740 100644
--- a/integrations/tensorflow/WORKSPACE
+++ b/integrations/tensorflow/WORKSPACE
@@ -7,7 +7,7 @@
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
-TENSORFLOW_COMMIT = "d913bc9cb995a21723719055babc0036be4c9227"
+TENSORFLOW_COMMIT = "cc0f90465bbc60243fb00f7d96b931f2e33000bd"
git_repository(
name = "org_tensorflow",
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/bufferize-in-parallel.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/bufferize-in-parallel.mlir
index d87e752..149c4eb 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/bufferize-in-parallel.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/bufferize-in-parallel.mlir
@@ -4,7 +4,7 @@
// CHECK-SAME: %[[idx:.*]]: index, %[[idx2:.*]]: index,
// CHECK-SAME: %[[arg1:.*]]: memref<?xf32, #{{.*}}>,
// CHECK-SAME: %[[arg2:.*]]: memref<?xf32, #{{.*}}>
-func @parallel_insert_slice_no_conflict(
+func.func @parallel_insert_slice_no_conflict(
%idx: index, %idx2: index,
%arg1: tensor<?xf32> {bufferization.writable = true},
%arg2: tensor<?xf32> {bufferization.writable = true}) -> (tensor<?xf32>, f32)
@@ -43,7 +43,7 @@
// CHECK-SAME: %[[idx:.*]]: index, %[[idx2:.*]]: index,
// CHECK-SAME: %[[arg1:.*]]: memref<?xf32, #{{.*}}>,
// CHECK-SAME: %[[arg2:.*]]: memref<?xf32, #{{.*}}>
-func @parallel_insert_slice_with_conflict(
+func.func @parallel_insert_slice_with_conflict(
%idx: index, %idx2: index,
%arg1: tensor<?xf32> {bufferization.writable = true},
%arg2: tensor<?xf32> {bufferization.writable = true}) -> (f32, f32)
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/canonicalize.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/canonicalize.mlir
index 5a6d816..faca068 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/canonicalize.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/canonicalize.mlir
@@ -1,7 +1,7 @@
// RUN: iree-dialects-opt --canonicalize --split-input-file %s | FileCheck %s
// CHECK-LABEL: func @tensor.cast(
-func @tensor.cast(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
+func.func @tensor.cast(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
%init = linalg.init_tensor [3, 5] : tensor<3x5xi32>
%casted_arg0 = tensor.cast %arg0 : tensor<3x5xi32> to tensor<?x?xi32>
@@ -23,7 +23,7 @@
// CHECK-LABEL: func @canonicalize_insert_slice_indices(
// CHECK-SAME: %[[arg0:.*]]: tensor<?x?xf32>, %[[arg1:.*]]: tensor<?x?xf32>,
// CHECK-SAME: %[[idx:.*]]: index
-func @canonicalize_insert_slice_indices(
+func.func @canonicalize_insert_slice_indices(
%arg0 : tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
%idx : index) -> tensor<?x?xf32>
{
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/convert_to_loops.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/convert_to_loops.mlir
index 3be02b0..06b94e7 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/convert_to_loops.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/convert_to_loops.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --split-input-file --iree-linalg-ext-to-loops %s | FileCheck %s
-func @sort_1d(%arg0: memref<128xi32>) {
+func.func @sort_1d(%arg0: memref<128xi32>) {
iree_linalg_ext.sort dimension(0)
outs(%arg0 : memref<128xi32>) {
^bb0(%arg2: i32, %arg3: i32): // no predecessors
@@ -30,7 +30,7 @@
// -----
-func @sort_2d(%arg0: memref<16x32xi32>) {
+func.func @sort_2d(%arg0: memref<16x32xi32>) {
iree_linalg_ext.sort dimension(0)
outs(%arg0 : memref<16x32xi32>) {
^bb0(%arg2: i32, %arg3: i32): // no predecessors
@@ -62,7 +62,7 @@
// -----
-func @sort_multi(%arg0: memref<128xf32>, %arg1: memref<128xi32>) {
+func.func @sort_multi(%arg0: memref<128xf32>, %arg1: memref<128xi32>) {
iree_linalg_ext.sort
dimension(0)
outs(%arg0, %arg1 : memref<128xf32>, memref<128xi32>) {
@@ -98,7 +98,7 @@
// -----
-func @scatter_update_scalar_1D(
+func.func @scatter_update_scalar_1D(
%original: memref<8xi32>, %indices: memref<3x1xi32>,
%updates: memref<3xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -124,7 +124,7 @@
// -----
-func @scatter_add_scalar_2D(
+func.func @scatter_add_scalar_2D(
%original: memref<4x3xi32>, %indices: memref<3x2xi32>,
%updates: memref<3xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -155,7 +155,7 @@
// -----
-func @scatter_update_slice_2D(
+func.func @scatter_update_slice_2D(
%original: memref<4x3xi32>, %indices: memref<2x1xi32>,
%updates: memref<2x3xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -185,7 +185,7 @@
// -----
-func @scatter_add_scalar_1D(
+func.func @scatter_add_scalar_1D(
%original: memref<8xi32>, %indices: memref<3x1xi32>,
%updates: memref<3xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -214,7 +214,7 @@
// -----
-func @scatter_add_slice_2D(
+func.func @scatter_add_slice_2D(
%original: memref<4x3xi32>, %indices: memref<2x1xi32>,
%updates: memref<2x3xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -244,7 +244,7 @@
// -----
-func @scatter_update_scalar_dynamic_1D(
+func.func @scatter_update_scalar_dynamic_1D(
%original: memref<?xi32>, %indices: memref<?x1xi32>,
%updates: memref<?xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -270,7 +270,7 @@
// -----
-func @scatter_add_scalar_dynamic_2D(
+func.func @scatter_add_scalar_dynamic_2D(
%original: memref<?x?xi32>, %indices: memref<?x2xi32>,
%updates: memref<?xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -301,7 +301,7 @@
// -----
-func @scatter_update_slice_dynamic_2D(
+func.func @scatter_update_slice_dynamic_2D(
%original: memref<?x?xi32>, %indices: memref<?x1xi32>,
%updates: memref<?x?xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -329,7 +329,7 @@
// -----
-func @scatter_partial_slices(%arg0: memref<2x64x12xf32>, %arg1: memref<2x3xi32>, %arg2: memref<2x1x12xf32>) {
+func.func @scatter_partial_slices(%arg0: memref<2x64x12xf32>, %arg1: memref<2x3xi32>, %arg2: memref<2x1x12xf32>) {
iree_linalg_ext.scatter
unique_indices(true)
ins(%arg2, %arg1 : memref<2x1x12xf32>, memref<2x3xi32>)
@@ -364,7 +364,7 @@
// -----
-func @fft_1D(%real: memref<16xf32>, %imag: memref<16xf32>) {
+func.func @fft_1D(%real: memref<16xf32>, %imag: memref<16xf32>) {
%stage = arith.constant 1 : index
iree_linalg_ext.fft
ins(%stage: index)
@@ -422,7 +422,7 @@
// -----
-func @fft_2D(%real: memref<?x16xf32>, %imag: memref<?x16xf32>) {
+func.func @fft_2D(%real: memref<?x16xf32>, %imag: memref<?x16xf32>) {
%stage = arith.constant 2 : index
iree_linalg_ext.fft
ins(%stage: index)
@@ -456,7 +456,7 @@
// -----
-func @fft_2D_coef_buf(%real: memref<?x16xf32>, %imag: memref<?x16xf32>,
+func.func @fft_2D_coef_buf(%real: memref<?x16xf32>, %imag: memref<?x16xf32>,
%coef_real: memref<1xf32>, %coef_imag: memref<1xf32>) {
%stage = arith.constant 1 : index
iree_linalg_ext.fft
@@ -509,7 +509,7 @@
// -----
-func @reverse_dim_0(%arg0: memref<?x?xi32>, %arg1: memref<?x?xi32>) {
+func.func @reverse_dim_0(%arg0: memref<?x?xi32>, %arg1: memref<?x?xi32>) {
iree_linalg_ext.reverse
dimensions(dense<0> : tensor<1xi64>)
ins(%arg0 : memref<?x?xi32>)
@@ -531,7 +531,7 @@
// CHECK: %[[V0:.+]] = memref.load %[[IN]][%[[I]], %[[J]]]
// CHECK: memref.store %[[V0]], %[[OUT]][%[[T2]], %[[J]]] : memref<?x?xi32>
-func @scan_1d_inclusive(%0: memref<128xi32>, %1: memref<128xi32>) {
+func.func @scan_1d_inclusive(%0: memref<128xi32>, %1: memref<128xi32>) {
%c0 = memref.alloc() : memref<i32>
iree_linalg_ext.scan dimension(0) inclusive(true)
ins(%0 : memref<128xi32>) outs(%1, %c0 : memref<128xi32>, memref<i32>) {
@@ -564,7 +564,7 @@
// -----
-func @scan_1d_exclusive(%0: memref<128xi32>, %1: memref<128xi32>) {
+func.func @scan_1d_exclusive(%0: memref<128xi32>, %1: memref<128xi32>) {
%c0 = memref.alloc() : memref<i32>
iree_linalg_ext.scan dimension(0) inclusive(false)
ins(%0 : memref<128xi32>) outs(%1, %c0 : memref<128xi32>, memref<i32>) {
@@ -597,7 +597,7 @@
// -----
-func @scan_2d(%0: memref<16x32xi32>, %1: memref<16x32xi32>) {
+func.func @scan_2d(%0: memref<16x32xi32>, %1: memref<16x32xi32>) {
%t0 = memref.alloc() : memref<32xi32>
iree_linalg_ext.scan dimension(0) inclusive(true)
ins(%0 : memref<16x32xi32>) outs(%1, %t0 : memref<16x32xi32>, memref<32xi32>) {
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir
index e45ab16..f5652cd 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --split-input-file --verify-diagnostics %s
-func @sort_invalid_dimension(%arg0: tensor<128xi32>) -> tensor<128xi32> {
+func.func @sort_invalid_dimension(%arg0: tensor<128xi32>) -> tensor<128xi32> {
// expected-error @+1 {{dimension must be within (0, 1]}}
%0 = iree_linalg_ext.sort dimension(1)
outs(%arg0 : tensor<128xi32>) {
@@ -13,7 +13,7 @@
// -----
-func @sort_mismatch_rank(%arg0: tensor<?x?xi32>, %arg1: tensor<?xf32>)
+func.func @sort_mismatch_rank(%arg0: tensor<?x?xi32>, %arg1: tensor<?xf32>)
-> (tensor<?x?xi32>, tensor<?xf32>) {
// expected-error @+1 {{expected operand 1 to be rank 2, same as other operands}}
%0:2 = iree_linalg_ext.sort dimension(0)
@@ -27,7 +27,7 @@
// -----
-func @sort_mismatch_shape(%arg0: tensor<?xi32>, %arg1: tensor<42xf32>)
+func.func @sort_mismatch_shape(%arg0: tensor<?xi32>, %arg1: tensor<42xf32>)
-> (tensor<?xi32>, tensor<42xf32>) {
// expected-error @+1 {{expected operand 1 to have same shape as other operands}}
%0:2 = iree_linalg_ext.sort dimension(0)
@@ -41,7 +41,7 @@
// -----
-func @scatter_mixed_tensor_memref(
+func.func @scatter_mixed_tensor_memref(
%update : memref<?x?xf32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{expected inputs and outputs to be RankedTensorType or scalar}}
@@ -57,7 +57,7 @@
// -----
-func @scatter_mixed_tensor_memref(
+func.func @scatter_mixed_tensor_memref(
%update : tensor<?x?xf32>, %indices : memref<?x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{expected inputs and outputs to be RankedTensorType or scalar}}
@@ -73,7 +73,7 @@
// -----
-func @scatter_extra_outputs(
+func.func @scatter_extra_outputs(
%update : tensor<?x?xf32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xf32>) -> (tensor<?x?xf32>, tensor<?x?xf32>) {
// expected-error @+1 {{expected number of outputs to be same as the number of results}}
@@ -89,7 +89,7 @@
// -----
-func @scatter_mixed_tensor_memref(
+func.func @scatter_mixed_tensor_memref(
%update : tensor<?x?xf32>, %indices : tensor<?x1xi32>,
%original : memref<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{expected inputs and outputs to be RankedTensorType or scalar}}
@@ -105,7 +105,7 @@
// -----
-func @scatter_output_type_mismatch(
+func.func @scatter_output_type_mismatch(
%update : tensor<?x?xf32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xf32>) -> tensor<4x?xf32> {
// expected-error @+1 {{expected type of `outs` operand #0 'tensor<?x?xf32>' to be same as result type 'tensor<4x?xf32>'}}
@@ -121,7 +121,7 @@
// -----
-func @scatter_mixed_tensor_memref(
+func.func @scatter_mixed_tensor_memref(
%update : memref<?x?xf32>, %indices : tensor<?x1xi32>,
%original : memref<?x?xf32>) {
// expected-error @+1 {{expected inputs and outputs to be MemRefType or scalar}}
@@ -137,7 +137,7 @@
// -----
-func @scatter_mixed_tensor_memref(
+func.func @scatter_mixed_tensor_memref(
%update : memref<?x?xf32>, %indices : memref<?x1xi32>,
%original : tensor<?x?xf32>) {
// expected-error @+1 {{expected inputs and outputs to be MemRefType or scalar}}
@@ -153,7 +153,7 @@
// -----
-func @scatter_dim_mismatch(
+func.func @scatter_dim_mismatch(
%update : tensor<?x?xf32>, %indices : tensor<48x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{mismatch in shape of indices and update value at dim#0}}
@@ -169,7 +169,7 @@
// -----
-func @scatter_dim_mismatch(
+func.func @scatter_dim_mismatch(
%update : tensor<64x?xf32>, %indices : tensor<48x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{mismatch in shape of indices and update value at dim#0}}
@@ -185,7 +185,7 @@
// -----
-func @scatter_dim_mismatch(
+func.func @scatter_dim_mismatch(
%update : tensor<?x?x?x?xf32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{op update value rank exceeds the rank of the original value}}
@@ -201,7 +201,7 @@
// -----
-func @scatter_dim_mismatch(
+func.func @scatter_dim_mismatch(
%update : tensor<?x4xf32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{mismatch in shape of update value dim#1 and original value at dim#1}}
@@ -217,7 +217,7 @@
// -----
-func @scatter_region_type_mismatch(
+func.func @scatter_region_type_mismatch(
%update : tensor<?x?xi32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi32>) -> tensor<?x?xi32> {
// expected-error @+1 {{expected region to have scalar argument of integer or float types}}
@@ -234,7 +234,7 @@
// -----
-func @scatter_region_type_mismatch(
+func.func @scatter_region_type_mismatch(
%update : tensor<?x?xi32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi32>) -> tensor<?x?xi32> {
// expected-error @+1 {{mismatch in argument 0 of region 'i64' and element type of update value 'i32'}}
@@ -251,7 +251,7 @@
// -----
-func @scatter_region_type_mismatch(
+func.func @scatter_region_type_mismatch(
%update : tensor<?x?xi32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi32>) -> tensor<?x?xi32> {
// expected-error @+1 {{mismatch in argument 1 of region 'i64' and element type of original value 'i32'}}
@@ -268,7 +268,7 @@
// -----
-func @scatter_region_type_mismatch(
+func.func @scatter_region_type_mismatch(
%update : tensor<?x?xi32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
// expected-error @+1 {{mismatch in region argument types 'i32' and 'i64'}}
@@ -285,7 +285,7 @@
// -----
-func @scatter_region_type_mismatch(
+func.func @scatter_region_type_mismatch(
%update : tensor<?x?xi64>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
// expected-error @+1 {{expected region to have two arguments}}
@@ -302,7 +302,7 @@
// -----
-func @scatter_yield_mismatch(
+func.func @scatter_yield_mismatch(
%update : tensor<?x?xi64>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
%0 = iree_linalg_ext.scatter unique_indices(true)
@@ -319,7 +319,7 @@
// -----
-func @scatter_yield_mismatch(
+func.func @scatter_yield_mismatch(
%update : tensor<?x?xi64>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
%0 = iree_linalg_ext.scatter unique_indices(true)
@@ -336,7 +336,7 @@
// -----
-func @scatter_index_depth_dynamic(
+func.func @scatter_index_depth_dynamic(
%update : tensor<?x?xi64>, %indices : tensor<?x?xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
// expected-error @+1 {{expected index depth is static}}
@@ -353,7 +353,7 @@
// -----
-func @scatter_original_rank_mismatch(
+func.func @scatter_original_rank_mismatch(
%update : tensor<?xi64>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
// expected-error @+1 {{op index depth and update value does not cover rank of original value}}
@@ -370,7 +370,7 @@
// -----
-func @reverse_diff_element_type(%arg0: tensor<3x5xi32>) -> tensor<3x5xf32> {
+func.func @reverse_diff_element_type(%arg0: tensor<3x5xi32>) -> tensor<3x5xf32> {
%init = linalg.init_tensor [3, 5] : tensor<3x5xf32>
// expected-error @+1 {{expected input/output element types to be identical}}
%0 = iree_linalg_ext.reverse
@@ -382,7 +382,7 @@
// -----
-func @reverse_diff_shape(%arg0: tensor<3x5xi32>) -> tensor<3x6xi32> {
+func.func @reverse_diff_shape(%arg0: tensor<3x5xi32>) -> tensor<3x6xi32> {
%init = linalg.init_tensor [3, 6] : tensor<3x6xi32>
// expected-error @+1 {{incompatible input/output shapes}}
%0 = iree_linalg_ext.reverse
@@ -394,7 +394,7 @@
// -----
-func @reverse_dup_dims(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
+func.func @reverse_dup_dims(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
%init = linalg.init_tensor [3, 5] : tensor<3x5xi32>
// expected-error @+1 {{expected dimensions numbers are all unique}}
%0 = iree_linalg_ext.reverse
@@ -406,7 +406,7 @@
// -----
-func @not_enough_results() -> () {
+func.func @not_enough_results() -> () {
%num_threads = arith.constant 100 : index
// expected-error@+1 {{'iree_linalg_ext.in_parallel' op produces 1 results, but its terminator yields 0 values}}
%result = iree_linalg_ext.in_parallel %num_threads -> tensor<100xf32> {
@@ -417,7 +417,7 @@
// -----
-func @too_many_results(%1 : tensor<1xf32>, %out : tensor<100xf32>) -> () {
+func.func @too_many_results(%1 : tensor<1xf32>, %out : tensor<100xf32>) -> () {
%num_threads = arith.constant 100 : index
// expected-error@+1 {{'iree_linalg_ext.in_parallel' op produces 1 results, but its terminator yields 2 values}}
%result = iree_linalg_ext.in_parallel %num_threads -> tensor<100xf32> {
@@ -434,7 +434,7 @@
// -----
-func @type_mismatch(%1 : tensor<1xf32>, %out : tensor<200xf32>) -> () {
+func.func @type_mismatch(%1 : tensor<1xf32>, %out : tensor<200xf32>) -> () {
%num_threads = arith.constant 100 : index
// expected-error@+1 {{'iree_linalg_ext.in_parallel' op type mismatch between 0th result of in_parallel ('tensor<200xf32>') and 0th result yielded by its terminator ('tensor<100xf32>')}}
%result = iree_linalg_ext.in_parallel %num_threads -> tensor<100xf32> {
@@ -449,7 +449,7 @@
// -----
-func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{expected two input operands}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -464,7 +464,7 @@
// -----
-func @topk_invalid(%input_values: tensor<2x10xi32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<2x10xi32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{expected input/output value types to be identical}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -479,7 +479,7 @@
// -----
-func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xf32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xf32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{expected input/output indices types to be int}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -494,7 +494,7 @@
// -----
-func @topk_invalid(%input_values: tensor<10x2x10xf32>, %input_indices: tensor<10x2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<10x2x10xf32>, %input_indices: tensor<10x2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{expected input/output to have the same rank}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -509,7 +509,7 @@
// -----
-func @topk_invalid(%input_values: tensor<3x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<3x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{input indices/values shape must match}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -524,7 +524,7 @@
// -----
-func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<3x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<3x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<3x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<3x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{output indices/values shape must match}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -539,7 +539,7 @@
// -----
-func @topk_invalid(%input_values: tensor<3x10xf32>, %input_indices: tensor<3x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<3x10xf32>, %input_indices: tensor<3x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{incompatible input/output shapes}}
%0:2 = iree_linalg_ext.topk
dimension(1)
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/pad_contraction_to_block_size.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/pad_contraction_to_block_size.mlir
index 8cf3fa0..681a7d3 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/pad_contraction_to_block_size.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/pad_contraction_to_block_size.mlir
@@ -20,7 +20,7 @@
// CHECK: %[[VAL_15:.*]] = linalg.matmul ins(%[[VAL_16:.*]], %[[VAL_17:.*]] : tensor<256x512xf32>, tensor<512x1024xf32>) outs(%[[VAL_18:.*]] : tensor<256x1024xf32>) -> tensor<256x1024xf32>
// CHECK: %[[VAL_19:.*]] = tensor.extract_slice %[[VAL_15]][0, 0] [250, 1020] [1, 1] : tensor<256x1024xf32> to tensor<250x1020xf32>
// CHECK: return %[[VAL_19]] : tensor<250x1020xf32>
-func @pad_matmul_static(%arg0 : tensor<250x500xf32>, %arg1 : tensor<500x1020xf32>,
+func.func @pad_matmul_static(%arg0 : tensor<250x500xf32>, %arg1 : tensor<500x1020xf32>,
%arg2 : tensor<250x1020xf32>) -> tensor<250x1020xf32> {
%matmul = linalg.matmul
ins(%arg0, %arg1 : tensor<250x500xf32>, tensor<500x1020xf32>)
@@ -32,7 +32,7 @@
// CHECK-LABEL: @pad_matmul_noop
// CHECK-NOT: pad_tensor
// CHECK-NOT: extract_slice
-func @pad_matmul_noop(%arg0 : tensor<256x512xf32>, %arg1 : tensor<512x1024xf32>,
+func.func @pad_matmul_noop(%arg0 : tensor<256x512xf32>, %arg1 : tensor<512x1024xf32>,
%arg2 : tensor<256x1024xf32>) -> tensor<256x1024xf32> {
%matmul = linalg.matmul
ins(%arg0, %arg1 : tensor<256x512xf32>, tensor<512x1024xf32>)
@@ -60,7 +60,7 @@
// CHECK: %[[ORIG_DIM_VALUE:.*]] = tensor.dim %arg2, %[[DIM0]]
// CHECK: %[[RETURN:.*]] = tensor.extract_slice %[[PADDED_RESULT]][0, 0] {{\[}}%[[ORIG_DIM_VALUE]], 1024] [1, 1] : tensor<?x1024xf32> to tensor<?x1024xf32>
// CHECK: return %[[RETURN]] : tensor<?x1024xf32>
-func @pad_matmul_dynamic_row(%arg0 : tensor<?x512xf32>, %arg1 : tensor<512x1024xf32>,
+func.func @pad_matmul_dynamic_row(%arg0 : tensor<?x512xf32>, %arg1 : tensor<512x1024xf32>,
%arg2 : tensor<?x1024xf32>) -> tensor<?x1024xf32> {
%matmul = linalg.matmul
ins(%arg0, %arg1 : tensor<?x512xf32>, tensor<512x1024xf32>)
@@ -83,7 +83,7 @@
// CHECK: } : tensor<256x?xf32> to tensor<256x?xf32>
// Matmul:
// CHECK: %{{.*}} = linalg.matmul ins(%arg0, %[[RHS_PADDED]] : tensor<256x512xf32>, tensor<512x?xf32>) outs(%[[OUTPUT_PADDED]] : tensor<256x?xf32>) -> tensor<256x?xf32>
-func @pad_matmul_dynamic_col(%arg0 : tensor<256x512xf32>, %arg1 : tensor<512x?xf32>,
+func.func @pad_matmul_dynamic_col(%arg0 : tensor<256x512xf32>, %arg1 : tensor<512x?xf32>,
%arg2 : tensor<256x?xf32>) -> tensor<256x?xf32> {
%matmul = linalg.matmul
ins(%arg0, %arg1 : tensor<256x512xf32>, tensor<512x?xf32>)
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/pad_tiling.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/pad_tiling.mlir
index 5867d33..7380591 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/pad_tiling.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/pad_tiling.mlir
@@ -3,7 +3,7 @@
// TODO: Re-enable when upstream tensor.pad op properly implements the tiling
// interface.
-func @pad_tensor(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index,
+func.func @pad_tensor(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index,
%arg3 : index, %arg4 : index, %arg5 : f32) -> tensor<?x?xf32> {
%0 = tensor.pad %arg0 low[%arg1, %arg2] high[%arg3, %arg4] {
^bb0(%arg6 : index, %arg7 : index):
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir
index c2bd60a..9929976 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir
@@ -5,7 +5,7 @@
// CHECK-SAME: dimension(0)
// CHECK-SAME: outs({{.*}})
// CHECK: iree_linalg_ext.yield
-func @sort_tensor(%arg0: tensor<128xi32>) -> tensor<128xi32> {
+func.func @sort_tensor(%arg0: tensor<128xi32>) -> tensor<128xi32> {
%0 = iree_linalg_ext.sort
dimension(0)
outs(%arg0 : tensor<128xi32>) {
@@ -23,7 +23,7 @@
// CHECK-SAME: dimension(0)
// CHECK-SAME: outs({{.*}})
// CHECK: iree_linalg_ext.yield
-func @sort_memref(%arg0: memref<128xi32>) {
+func.func @sort_memref(%arg0: memref<128xi32>) {
iree_linalg_ext.sort dimension(0)
outs(%arg0 : memref<128xi32>) {
^bb0(%arg1: i32, %arg2: i32): // no predecessors
@@ -35,7 +35,7 @@
// -----
-func @sort_multi_result_tensor(
+func.func @sort_multi_result_tensor(
%arg0: tensor<?x?xi32>, %arg1: tensor<?x?xf32>)
-> (tensor<?x?xi32>, tensor<?x?xf32>) {
%0:2 = iree_linalg_ext.sort dimension(0)
@@ -55,7 +55,7 @@
// -----
-func @sort_multi_result_memref(
+func.func @sort_multi_result_memref(
%arg0: memref<?x?xi32>, %arg1: memref<?x?xf32>) {
iree_linalg_ext.sort dimension(0)
outs(%arg0, %arg1 : memref<?x?xi32>, memref<?x?xf32>) {
@@ -73,7 +73,7 @@
// -----
-func @scatter_tensor_dynamic(
+func.func @scatter_tensor_dynamic(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update: tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -99,7 +99,7 @@
// -----
-func @scatter_repeated_tensor_dynamic(
+func.func @scatter_repeated_tensor_dynamic(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update: tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -125,7 +125,7 @@
// -----
-func @scatter_tensor_static(
+func.func @scatter_tensor_static(
%original: tensor<128x3xf32>, %indices: tensor<48x1xi32>,
%update: tensor<48x3xf32>) -> tensor<128x3xf32> {
%0 = iree_linalg_ext.scatter
@@ -151,7 +151,7 @@
// -----
-func @scatter_tensor_multi_index_depth(
+func.func @scatter_tensor_multi_index_depth(
%original: tensor<1x128x3xf32>, %indices: tensor<48x2xi32>,
%update: tensor<48x3xf32>) -> tensor<1x128x3xf32> {
%0 = iree_linalg_ext.scatter
@@ -177,7 +177,7 @@
// -----
-func @scatter_memref_dynamic(
+func.func @scatter_memref_dynamic(
%original: memref<?x?xf32>, %indices: memref<?x1xi32>,
%update: memref<?x?xf32>) {
iree_linalg_ext.scatter
@@ -203,7 +203,7 @@
// -----
-func @scatter_memref_static(
+func.func @scatter_memref_static(
%original: memref<128x3xf32>, %indices: memref<48x1xi32>,
%update: memref<48x3xf32>) {
iree_linalg_ext.scatter
@@ -229,7 +229,7 @@
// -----
-func @scatter_memref_multi_index_depth(
+func.func @scatter_memref_multi_index_depth(
%original: memref<1x128x3xf32>, %indices: memref<48x2xi32>,
%update: memref<48x3xf32>) {
iree_linalg_ext.scatter
@@ -255,7 +255,7 @@
// -----
-func @scatter_update_scalar_1D(
+func.func @scatter_update_scalar_1D(
%original: tensor<8xi32>, %indices: tensor<3x1xi32>,
%updates: tensor<3xi32>) -> tensor<8xi32> {
%0 = iree_linalg_ext.scatter
@@ -280,7 +280,7 @@
// -----
-func @scatter_update_scalar_2D(
+func.func @scatter_update_scalar_2D(
%original: tensor<4x3xi32>, %indices: tensor<3x2xi32>,
%updates: tensor<3xi32>) -> tensor<4x3xi32> {
%0 = iree_linalg_ext.scatter
@@ -305,7 +305,7 @@
// -----
-func @scatter_update_slice_2D(
+func.func @scatter_update_slice_2D(
%original: tensor<4x3xi32>, %indices: tensor<1x1xi32>,
%updates: tensor<1x3xi32>) -> tensor<4x3xi32> {
%0 = iree_linalg_ext.scatter
@@ -330,7 +330,7 @@
// -----
-func @fft_tensor(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>)
+func.func @fft_tensor(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>)
-> (tensor<1024xf32>, tensor<1024xf32>) {
%cst1 = arith.constant 1 : index
%0:2 = iree_linalg_ext.fft
@@ -351,7 +351,7 @@
// -----
-func @fft_memref(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>) {
+func.func @fft_memref(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>) {
%cst1 = arith.constant 1 : index
iree_linalg_ext.fft
ins(%cst1: index)
@@ -369,7 +369,7 @@
// -----
-func @fft_tensor_coef(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
+func.func @fft_tensor_coef(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
%arg2: tensor<1xf32>, %arg3: tensor<1xf32>) -> (tensor<1024xf32>, tensor<1024xf32>) {
%cst1 = arith.constant 1 : index
%0:2 = iree_linalg_ext.fft
@@ -392,7 +392,7 @@
// -----
-func @fft_memref_coef(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>,
+func.func @fft_memref_coef(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>,
%arg2: memref<1xf32>, %arg3: memref<1xf32>) {
%cst1 = arith.constant 1 : index
iree_linalg_ext.fft
@@ -414,7 +414,7 @@
// -----
// The size of coefficient tensor is 2^(stage-1).
-func @fft_tensor_coef_stage_5(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
+func.func @fft_tensor_coef_stage_5(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
%arg2: tensor<16xf32>, %arg3: tensor<16xf32>) -> (tensor<1024xf32>, tensor<1024xf32>) {
%cst1 = arith.constant 5 : index
%0:2 = iree_linalg_ext.fft
@@ -437,7 +437,7 @@
// -----
-func @reverse_tensor(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
+func.func @reverse_tensor(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
%init = linalg.init_tensor [3, 5] : tensor<3x5xi32>
%0 = iree_linalg_ext.reverse
dimensions(dense<0> : tensor<1xi64>)
@@ -455,7 +455,7 @@
// -----
-func @reverse_memref(%arg0: memref<3x5xi32>, %arg1: memref<3x5xi32>) {
+func.func @reverse_memref(%arg0: memref<3x5xi32>, %arg1: memref<3x5xi32>) {
iree_linalg_ext.reverse
dimensions(dense<0> : tensor<1xi64>)
ins(%arg0 : memref<3x5xi32>)
@@ -472,7 +472,7 @@
// -----
-func @reverse_dynamic_tensor(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
+func.func @reverse_dynamic_tensor(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xi32>
@@ -498,7 +498,7 @@
// -----
-func @reverse_static_dynamic_tensor(%arg0: tensor<3x5xi32>) -> tensor<?x?xi32> {
+func.func @reverse_static_dynamic_tensor(%arg0: tensor<3x5xi32>) -> tensor<?x?xi32> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<3x5xi32>
@@ -524,7 +524,7 @@
// -----
-func @reverse_multi_dims(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
+func.func @reverse_multi_dims(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
%init = linalg.init_tensor [3, 5] : tensor<3x5xi32>
%0 = iree_linalg_ext.reverse
dimensions(dense<[0, 1]> : tensor<2xi64>)
@@ -542,7 +542,7 @@
// -----
-func @topk_tensor(%input_values: tensor<20x10x8x4xf32>, %input_indices: tensor<20x10x8x4xi32>) -> (tensor<20x10x3x4xf32>, tensor<20x10x3x4xi32>) {
+func.func @topk_tensor(%input_values: tensor<20x10x8x4xf32>, %input_indices: tensor<20x10x8x4xi32>) -> (tensor<20x10x3x4xf32>, tensor<20x10x3x4xi32>) {
%out_values = linalg.init_tensor [20, 10, 3, 4] : tensor<20x10x3x4xf32>
%out_indices = linalg.init_tensor [20, 10, 3, 4] : tensor<20x10x3x4xi32>
%0:2 = iree_linalg_ext.topk
@@ -570,7 +570,7 @@
// -----
-func @topk_memref(%input_values: memref<4x10xf32>, %input_indices: memref<4x10xi32>, %out_values: memref<4x3xf32>, %out_indices: memref<4x3xi32>) {
+func.func @topk_memref(%input_values: memref<4x10xf32>, %input_indices: memref<4x10xi32>, %out_values: memref<4x3xf32>, %out_indices: memref<4x3xi32>) {
iree_linalg_ext.topk
dimension(1)
ins(%input_values, %input_indices : memref<4x10xf32> , memref<4x10xi32>)
@@ -594,7 +594,7 @@
// -----
-func @topk_dynamic_tensor(%input_values: tensor<?x?xf32>, %input_indices: tensor<?x?xi32>, %out_values: tensor<?x?xf32>, %out_indices: tensor<?x?xi32>) -> (tensor<?x?xf32>, tensor<?x?xi32>) {
+func.func @topk_dynamic_tensor(%input_values: tensor<?x?xf32>, %input_indices: tensor<?x?xi32>, %out_values: tensor<?x?xf32>, %out_indices: tensor<?x?xi32>) -> (tensor<?x?xf32>, tensor<?x?xi32>) {
%0:2 = iree_linalg_ext.topk
dimension(1)
ins(%input_values, %input_indices : tensor<?x?xf32> , tensor<?x?xi32>)
@@ -620,7 +620,7 @@
// -----
// CHECK-LABEL: func @static_tile
-func @static_tile(%chunk_size: index, %in: tensor<?xf32>, %out: tensor<?xf32>, %out2: tensor<?xf32>) -> (tensor<?xf32>) {
+func.func @static_tile(%chunk_size: index, %in: tensor<?xf32>, %out: tensor<?xf32>, %out2: tensor<?xf32>) -> (tensor<?xf32>) {
%c0 = arith.constant 0: index
//%d0 = tensor.dim %out, %c0: tensor<?xf32>
@@ -650,7 +650,7 @@
// -----
// CHECK-LABEL: func @simple_example
-func @simple_example(%in: tensor<100xf32>, %out: tensor<100xf32>) -> (tensor<100xf32>) {
+func.func @simple_example(%in: tensor<100xf32>, %out: tensor<100xf32>) -> (tensor<100xf32>) {
%num_threads = arith.constant 100 : index
%result = iree_linalg_ext.in_parallel %num_threads -> tensor<100xf32> {
^bb0(%thread_idx : index):
@@ -664,7 +664,7 @@
return %result : tensor<100xf32>
}
-func @no_terminator() -> () {
+func.func @no_terminator() -> () {
%num_threads = arith.constant 100 : index
iree_linalg_ext.in_parallel %num_threads -> () {
^bb0(%thread_idx : index):
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir
index 9a4a8c7..8b6b070 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --iree-linalg-ext-tile --split-input-file --verify-diagnostics %s | FileCheck %s
-func @scatter_tiling(
+func.func @scatter_tiling(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update : tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -52,7 +52,7 @@
// -----
-func @scatter_tiling_memref(
+func.func @scatter_tiling_memref(
%original: memref<?x?xf32>, %indices: memref<?x1xi32>,
%update : memref<?x?xf32>) {
iree_linalg_ext.scatter
@@ -97,7 +97,7 @@
// -----
-func @scatter_tiling_distribution(
+func.func @scatter_tiling_distribution(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update : tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -147,7 +147,7 @@
// -----
-func @scatter_no_tiling(
+func.func @scatter_no_tiling(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update : tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -174,7 +174,7 @@
// -----
-func @scatter_repeated_indices_tiling(
+func.func @scatter_repeated_indices_tiling(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update : tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -221,7 +221,7 @@
// -----
-func @scatter_repeated_indices_no_tiling(
+func.func @scatter_repeated_indices_no_tiling(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{unimplemented tiling of non-parallel loop iterator type}}
@@ -239,7 +239,7 @@
// -----
-func @sort_1d(%arg0: tensor<?xi32>) -> tensor<?xi32> {
+func.func @sort_1d(%arg0: tensor<?xi32>) -> tensor<?xi32> {
%0 = iree_linalg_ext.sort
{__internal_linalg_transform__ = "outer_reduce_input"}
dimension(0)
@@ -259,7 +259,7 @@
// -----
-func @sort_2d(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
+func.func @sort_2d(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
%0 = iree_linalg_ext.sort
{__internal_linalg_transform__ = "inner_reduce_input"}
dimension(1)
@@ -293,7 +293,7 @@
// -----
-func @sort_2d_inner_parallel(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
+func.func @sort_2d_inner_parallel(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
%0 = iree_linalg_ext.sort
{__internal_linalg_transform__ = "outer_reduce_input"}
dimension(0)
@@ -327,7 +327,7 @@
// -----
-func @sort_2d_multi_result(
+func.func @sort_2d_multi_result(
%arg0: tensor<?x?xi32>, %arg1: tensor<?x?xf32>)
-> (tensor<?x?xi32>, tensor<?x?xf32>) {
%0:2 = iree_linalg_ext.sort
@@ -368,7 +368,7 @@
// -----
-func @sort_2d_multi_result_memref(
+func.func @sort_2d_multi_result_memref(
%arg0: memref<?x?xi32>, %arg1: memref<?x?xf32>) {
iree_linalg_ext.sort
{__internal_linalg_transform__ = "outer_reduce_input"}
@@ -401,7 +401,7 @@
// -----
-func @sort_3d_multi_result_distribute(
+func.func @sort_3d_multi_result_distribute(
%arg0: tensor<?x?x?xi32>, %arg1 : tensor<?x?x?xf32>)
-> (tensor<?x?x?xi32>, tensor<?x?x?xf32>) {
%0, %1 = iree_linalg_ext.sort
@@ -460,7 +460,7 @@
// -----
-func @sort_3d_multi_result_distribute_memref(
+func.func @sort_3d_multi_result_distribute_memref(
%arg0: memref<?x?x?xi32>, %arg1 : memref<?x?x?xf32>) {
iree_linalg_ext.sort
{__internal_linalg_transform__ = "distribute_input"}
@@ -509,7 +509,7 @@
// -----
-func @fft_1d_stage_5(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
+func.func @fft_1d_stage_5(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
%arg2: tensor<16xf32>, %arg3: tensor<16xf32>) -> (tensor<1024xf32>, tensor<1024xf32>) {
%cst1 = arith.constant 5 : index
%0:2 = iree_linalg_ext.fft
@@ -546,7 +546,7 @@
// -----
-func @fft_2d_stage_5(%arg0: tensor<3x1024xf32>, %arg1: tensor<3x1024xf32>,
+func.func @fft_2d_stage_5(%arg0: tensor<3x1024xf32>, %arg1: tensor<3x1024xf32>,
%arg2: tensor<16xf32>, %arg3: tensor<16xf32>) -> (tensor<3x1024xf32>, tensor<3x1024xf32>) {
%cst1 = arith.constant 5 : index
%0:2 = iree_linalg_ext.fft
@@ -588,7 +588,7 @@
// -----
-func @fft_1d_stage_5_memref(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>,
+func.func @fft_1d_stage_5_memref(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>,
%arg2: memref<16xf32>, %arg3: memref<16xf32>) {
%cst1 = arith.constant 5 : index
iree_linalg_ext.fft
@@ -619,7 +619,7 @@
// -----
-func @reverse_memref(%arg0: memref<?xi32>, %arg1: memref<?xi32>) {
+func.func @reverse_memref(%arg0: memref<?xi32>, %arg1: memref<?xi32>) {
iree_linalg_ext.reverse
{__internal_linalg_transform__ = "tiling_input"}
dimensions(dense<0> : tensor<1xi64>)
@@ -650,7 +650,7 @@
// -----
-func @reverse_tensor_multi_dim(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
+func.func @reverse_tensor_multi_dim(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xi32>
@@ -704,7 +704,7 @@
// -----
-func @scan_1d(%0: tensor<128xi32>) -> tensor<128xi32> {
+func.func @scan_1d(%0: tensor<128xi32>) -> tensor<128xi32> {
%c0 = linalg.init_tensor [] : tensor<i32>
%1 = linalg.init_tensor [128] : tensor<128xi32>
%2:2 = iree_linalg_ext.scan
@@ -729,7 +729,7 @@
// -----
-func @scan_2d(%0: tensor<16x32xi32>) -> tensor<16x32xi32> {
+func.func @scan_2d(%0: tensor<16x32xi32>) -> tensor<16x32xi32> {
%c0 = linalg.init_tensor [32] : tensor<32xi32>
%1 = linalg.init_tensor [16, 32] : tensor<16x32xi32>
%2:2 = iree_linalg_ext.scan
@@ -771,7 +771,7 @@
// -----
-func @scan_2d_memref(%0: memref<16x32xi32>, %1: memref<16x32xi32>) {
+func.func @scan_2d_memref(%0: memref<16x32xi32>, %1: memref<16x32xi32>) {
%c0 = memref.alloc() : memref<32xi32>
iree_linalg_ext.scan
{__internal_linalg_transform__ = "outer_reduce_input"}
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir
index ef68c11..354f435 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir
@@ -5,7 +5,7 @@
// CHECK-SAME: %[[TB:[0-9a-z]+]]: memref<128x128xf32
// CHECK-SAME: %[[TC:[0-9a-z]+]]: memref<128x128xf32
// CHECK-NOT: -> tensor
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
// CHECK: linalg.matmul ins(%[[TA]], %[[TB]] : memref{{.*}}, memref{{.*}} outs(%[[TC]] : memref{{.*}})
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir
index 87a8108..b6d2a90 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir
@@ -3,7 +3,7 @@
// This test is verifying that a non-trivial 2*tiling+padding+vectorization transformation completes successfully
// CHECK-LABEL: func @matmul_tensors(
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
// Pack transposed padding of 1st operand.
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir
index 68aa510..42e0492 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --linalg-drop-schedule %s | FileCheck %s
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
%0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/expert.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/expert.mlir
index 6b58600..143ea42 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/expert.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/expert.mlir
@@ -4,7 +4,7 @@
// CHECK-LABEL: func @matmul_tensors
// CHECK-NOT: linalg
// CHECK: llvm
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
%0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
@@ -87,7 +87,7 @@
// CHECK-LABEL: func @matmul_tensors2
// CHECK-NOT: linalg
// CHECK: llvm
-func @matmul_tensors2(
+func.func @matmul_tensors2(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
%0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/failure.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/failure.mlir
index 9fb43f1..e0ed84f 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/failure.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/failure.mlir
@@ -2,7 +2,7 @@
// This cannot be vectorized because of dynamic tensor shapes. We expect the
// pass fail and report an error at the vectorization operation below.
-func public @non_vectorizable(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
+func.func public @non_vectorizable(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%0 = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
iterator_types = ["parallel"]}
@@ -29,7 +29,7 @@
// -----
-func public @no_loop(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
+func.func public @no_loop(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%0 = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
iterator_types = ["parallel"]}
@@ -57,7 +57,7 @@
// -----
-func private @prevent_dce()
+func.func private @prevent_dce()
pdl.pattern @something : benefit(1) {
%0 = operands
@@ -65,7 +65,7 @@
rewrite %2 with "iree_linalg_transform.apply"
}
-func public @loop(%lb: index, %ub: index, %step: index) {
+func.func public @loop(%lb: index, %ub: index, %step: index) {
scf.for %i = %lb to %ub step %step {
call @prevent_dce() : () -> ()
}
@@ -83,7 +83,7 @@
// -----
-func public @no_outlining() {
+func.func public @no_outlining() {
"some.operation"() ({}, {}) : () -> ()
return
}
@@ -102,7 +102,7 @@
// -----
-func @no_replacement(
+func.func @no_replacement(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
%arg2: tensor<128x128xf32> {linalg.inplaceable = true})
-> tensor<128x128xf32> {
@@ -133,7 +133,7 @@
// -----
-func @repeated_match(
+func.func @repeated_match(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
%arg2: tensor<128x128xf32> {linalg.inplaceable = true})
-> tensor<128x128xf32> {
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/fuse-and-peel.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/fuse-and-peel.mlir
index 4df2200..0cae462 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/fuse-and-peel.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/fuse-and-peel.mlir
@@ -1,7 +1,7 @@
// RUN: iree-dialects-opt --linalg-interp-transforms %s | FileCheck %s
// CHECK-LABEL: func @fuse_unary
-func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+func.func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK: scf.for
// CHECK: scf.for
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/fuse.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/fuse.mlir
index 1ea8d3c..548d5b5 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/fuse.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/fuse.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: func @fuse_unary
-func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+func.func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK: scf.for
// CHECK: scf.for
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/generalize.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/generalize.mlir
index 52740b5..b22c2f3 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/generalize.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/generalize.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: func @generalize_unary
-func @generalize_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+func.func @generalize_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK-NOT: linalg.elemwise_unary
// CHECK: linalg.generic
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/interchange.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/interchange.mlir
index ace31a6..1b2c424 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/interchange.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/interchange.mlir
@@ -3,7 +3,7 @@
// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1) -> (d1, d0)>
// CHECK-LABEL: func @interchange_generic
-func @interchange_generic(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+func.func @interchange_generic(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[$MAP]], #[[$MAP]]
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/pad.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/pad.mlir
index b6a00e6..a2b0849 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/pad.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/pad.mlir
@@ -3,7 +3,7 @@
#map = affine_map<()[s0] -> (-s0 + 12, 5)>
// CHECK-LABEL: func @pad_unary
-func @pad_unary(%arg0: tensor<24x12xf32>,
+func.func @pad_unary(%arg0: tensor<24x12xf32>,
%arg1: tensor<24x12xf32>) -> tensor<24x12xf32> {
// CHECK: %[[C0:.*]] = arith.constant 0 : index
%c0 = arith.constant 0 : index
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/peel.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/peel.mlir
index d9853ef..638fab0 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/peel.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/peel.mlir
@@ -22,7 +22,7 @@
// CHECK: }
// CHECK: return %[[RESULT]]
#map = affine_map<(d0, d1)[s0] -> (s0, d0 - d1)>
-func @fully_dynamic_bounds(%lb : index, %ub: index, %step: index) -> i32 {
+func.func @fully_dynamic_bounds(%lb : index, %ub: index, %step: index) -> i32 {
%c0 = arith.constant 0 : i32
%r = scf.for %iv = %lb to %ub step %step iter_args(%arg = %c0) -> i32 {
%s = affine.min #map(%ub, %iv)[%step]
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/scalarize.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/scalarize.mlir
index bf607ec..9c039fc 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/scalarize.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/scalarize.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --linalg-interp-transforms %s | FileCheck %s
-func @fun_to_benchmark(%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>) ->
+func.func @fun_to_benchmark(%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>) ->
tensor<128x128xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
// With scalarization we expect vectorization to still work albeit with a leading
// `1` dimension.
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/scoped.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/scoped.mlir
index f2f837e..7e66f09 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/scoped.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/scoped.mlir
@@ -3,7 +3,7 @@
// WRAP-LABEL: @test_wrap
// WRAP-SAME: (%[[ARG0:.*]]: i32) -> i32
-func @test_wrap(%arg0: i32) -> i32 {
+func.func @test_wrap(%arg0: i32) -> i32 {
// WRAP: %[[V:.*]] = iree_linalg_transform.util.scope(%[[ARG0]], %[[ARG0]]) {
// WRAP-NEXT: ^[[B:.*]](%[[ARG1:.*]]: i32, %[[ARG2:.*]]: i32):
// WRAP-NEXT: %[[ADD:.*]] = arith.addi %[[ARG2]], %[[ARG2]]
@@ -16,7 +16,7 @@
// UNWRAP-LABEL: @test_unwrap
// UNWRAP-SAME: (%[[ARG0:.*]]: i32) -> (i32, i32)
-func @test_unwrap(%arg0: i32) -> (i32, i32) {
+func.func @test_unwrap(%arg0: i32) -> (i32, i32) {
// UNWRAP: %[[V0:.*]] = arith.addi %[[ARG0]], %[[ARG0]]
// UNWRAP-NEXT: %[[V1:.*]] = arith.addi %[[V0]], %[[ARG0]]
%0:2 = iree_linalg_transform.util.scope(%arg0) {
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir
index 3435b53..531ef88 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir
@@ -1,7 +1,7 @@
// RUN: iree-dialects-opt %s --linalg-interp-transforms --split-input-file | FileCheck %s
// CHECK-LABEL: func @matmul_tensors(
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>,
%arg3: tensor<128x128xf32>, %arg4: tensor<128x128xf32>, %arg5: tensor<128x128xf32>,
%arg6: tensor<128x128xf32> {linalg.inplaceable = true})
@@ -80,7 +80,7 @@
// -----
// CHECK-LABEL: @vectorize_one
-func @vectorize_one(
+func.func @vectorize_one(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>,
%arg3: tensor<128x128xf32> {linalg.inplaceable = true})
-> tensor<128x128xf32> {
@@ -116,7 +116,7 @@
// -----
// CHECK-LABEL: @vectorize_all
-func @vectorize_all(
+func.func @vectorize_all(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>,
%arg3: tensor<128x128xf32> {linalg.inplaceable = true})
-> tensor<128x128xf32> {
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir
index 7c70cd4..157e6b3 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir
@@ -3,7 +3,7 @@
// CHECK-LABEL: func @matmul_tensors
// CHECK-NOT: linalg
// CHECK: llvm
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
%0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile-and-peel.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile-and-peel.mlir
index 009c85b..5429c60 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile-and-peel.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile-and-peel.mlir
@@ -1,7 +1,7 @@
// RUN: iree-dialects-opt --linalg-interp-transforms %s | FileCheck %s
// CHECK-LABEL: func @matmul_tensors(
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<126x127xf32>, %arg1: tensor<127x128xf32>, %arg2: tensor<126x128xf32> { linalg.inplaceable = true})
-> tensor<126x128xf32> {
// CHECK-DAG: %[[c124:.*]] = arith.constant 124 : index
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir
index 7403487..c5288fd 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir
@@ -9,7 +9,7 @@
// CHECK-LABEL: @matmul_021
// CHECK-NOT: linalg.generic
// CHECK: vector.contract
-func public @matmul_021(%arg0: tensor<39x154xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<154x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<39x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<39x5xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
+func.func public @matmul_021(%arg0: tensor<39x154xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<154x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<39x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<39x5xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
%0 = linalg.generic {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<39x154xf32>, tensor<154x5xf32>) outs(%arg2 : tensor<39x5xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%1 = arith.mulf %arg3, %arg4 : f32
@@ -47,7 +47,7 @@
// CHECK-LABEL: @matmul_210
// CHECK-NOT: linalg.generic
// CHECK: vector.contract
-func public @matmul_210(%arg0: tensor<39x154xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<154x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<39x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<39x5xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
+func.func public @matmul_210(%arg0: tensor<39x154xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<154x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<39x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<39x5xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
%0 = linalg.generic {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<39x154xf32>, tensor<154x5xf32>) outs(%arg2 : tensor<39x5xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%1 = arith.mulf %arg3, %arg4 : f32
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile.mlir
index 41f9db9..9c015df 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/tile.mlir
@@ -5,7 +5,7 @@
// CHECK-SAME: %[[TB:[0-9a-z]+]]: tensor<128x128xf32>
// CHECK-SAME: %[[TC:[0-9a-z]+]]: tensor<128x128xf32>
// CHECK-SAME: -> tensor<128x128xf32> {
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
// CHECK: %[[TD0:.*]] = scf.for {{.*}} to {{.*}} step {{.*}} iter_args(%[[TC0:.*]] = %[[TC]]) -> (tensor<128x128xf32>) {
diff --git a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir
index f6fdde0..1e3dc22 100644
--- a/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir
@@ -5,7 +5,7 @@
// CHECK-SAME: %[[TB:[0-9a-z]+]]: tensor<128x128xf32>
// CHECK-SAME: %[[TC:[0-9a-z]+]]: tensor<128x128xf32>
// CHECK-SAME: -> tensor<128x128xf32> {
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
// CHECK: %[[VA:.*]] = vector.transfer_read %[[TA]]
diff --git a/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-canonicalize.mlir b/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-canonicalize.mlir
index 9de08af..40d7cda 100644
--- a/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-canonicalize.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-canonicalize.mlir
@@ -6,7 +6,7 @@
// CHECK-LABEL: @select_same_val
// CHECK: return %arg1
-func @select_same_val(%arg0: i1, %arg1: i64) -> i64 {
+func.func @select_same_val(%arg0: i1, %arg1: i64) -> i64 {
%0 = arith.select %arg0, %arg1, %arg1 : i64
return %0 : i64
}
@@ -15,7 +15,7 @@
// CHECK-LABEL: @select_cmp_eq_select
// CHECK: return %arg1
-func @select_cmp_eq_select(%arg0: i64, %arg1: i64) -> i64 {
+func.func @select_cmp_eq_select(%arg0: i64, %arg1: i64) -> i64 {
%0 = arith.cmpi eq, %arg0, %arg1 : i64
%1 = arith.select %0, %arg0, %arg1 : i64
return %1 : i64
@@ -25,7 +25,7 @@
// CHECK-LABEL: @select_cmp_ne_select
// CHECK: return %arg0
-func @select_cmp_ne_select(%arg0: i64, %arg1: i64) -> i64 {
+func.func @select_cmp_ne_select(%arg0: i64, %arg1: i64) -> i64 {
%0 = arith.cmpi ne, %arg0, %arg1 : i64
%1 = arith.select %0, %arg0, %arg1 : i64
return %1 : i64
@@ -36,7 +36,7 @@
// CHECK-LABEL: @select_extui
// CHECK: %[[res:.+]] = arith.extui %arg0 : i1 to i64
// CHECK: return %[[res]]
-func @select_extui(%arg0: i1) -> i64 {
+func.func @select_extui(%arg0: i1) -> i64 {
%c0_i64 = arith.constant 0 : i64
%c1_i64 = arith.constant 1 : i64
%res = arith.select %arg0, %c1_i64, %c0_i64 : i64
@@ -48,7 +48,7 @@
// CHECK-DAG: %[[xor:.+]] = arith.xori %arg0, %true : i1
// CHECK-DAG: %[[res:.+]] = arith.extui %[[xor]] : i1 to i64
// CHECK: return %[[res]]
-func @select_extui2(%arg0: i1) -> i64 {
+func.func @select_extui2(%arg0: i1) -> i64 {
%c0_i64 = arith.constant 0 : i64
%c1_i64 = arith.constant 1 : i64
%res = arith.select %arg0, %c0_i64, %c1_i64 : i64
@@ -59,7 +59,7 @@
// CHECK-LABEL: @select_extui_i1
// CHECK-NEXT: return %arg0
-func @select_extui_i1(%arg0: i1) -> i1 {
+func.func @select_extui_i1(%arg0: i1) -> i1 {
%c0_i1 = arith.constant false
%c1_i1 = arith.constant true
%res = arith.select %arg0, %c1_i1, %c0_i1 : i1
@@ -73,7 +73,7 @@
// CHECK: %[[falseval:.+]] = arith.constant false
// CHECK: "test.consumer1"(%[[trueval]]) : (i1) -> ()
// CHECK: "test.consumer2"(%[[falseval]]) : (i1) -> ()
-func @branchCondProp(%arg0: i1) {
+func.func @branchCondProp(%arg0: i1) {
cf.cond_br %arg0, ^trueB, ^falseB
^trueB:
@@ -93,7 +93,7 @@
// CHECK-LABEL: @selToNot
// CHECK: %[[trueval:.+]] = arith.constant true
// CHECK: %{{.+}} = arith.xori %arg0, %[[trueval]] : i1
-func @selToNot(%arg0: i1) -> i1 {
+func.func @selToNot(%arg0: i1) -> i1 {
%true = arith.constant true
%false = arith.constant false
%res = arith.select %arg0, %false, %true : i1
diff --git a/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-cse.mlir b/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-cse.mlir
index 434add5..44b7d90 100644
--- a/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-cse.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-cse.mlir
@@ -8,7 +8,7 @@
#map0 = affine_map<(d0) -> (d0 mod 2)>
// CHECK-LABEL: @simple_constant
-func @simple_constant() -> (i32, i32) {
+func.func @simple_constant() -> (i32, i32) {
// CHECK-NEXT: %c1_i32 = arith.constant 1 : i32
%0 = arith.constant 1 : i32
@@ -18,7 +18,7 @@
}
// CHECK-LABEL: @basic
-func @basic() -> (index, index) {
+func.func @basic() -> (index, index) {
// CHECK: %c0 = arith.constant 0 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 0 : index
@@ -32,7 +32,7 @@
}
// CHECK-LABEL: @many
-func @many(f32, f32) -> (f32) {
+func.func @many(f32, f32) -> (f32) {
^bb0(%a : f32, %b : f32):
// CHECK-NEXT: %0 = arith.addf %arg0, %arg1 : f32
%c = arith.addf %a, %b : f32
@@ -58,7 +58,7 @@
/// Check that operations are not eliminated if they have different operands.
// CHECK-LABEL: @different_ops
-func @different_ops() -> (i32, i32) {
+func.func @different_ops() -> (i32, i32) {
// CHECK: %c0_i32 = arith.constant 0 : i32
// CHECK: %c1_i32 = arith.constant 1 : i32
%0 = arith.constant 0 : i32
@@ -71,7 +71,7 @@
/// Check that operations are not eliminated if they have different result
/// types.
// CHECK-LABEL: @different_results
-func @different_results(%arg0: tensor<*xf32>) -> (tensor<?x?xf32>, tensor<4x?xf32>) {
+func.func @different_results(%arg0: tensor<*xf32>) -> (tensor<?x?xf32>, tensor<4x?xf32>) {
// CHECK: %0 = tensor.cast %arg0 : tensor<*xf32> to tensor<?x?xf32>
// CHECK-NEXT: %1 = tensor.cast %arg0 : tensor<*xf32> to tensor<4x?xf32>
%0 = tensor.cast %arg0 : tensor<*xf32> to tensor<?x?xf32>
@@ -83,7 +83,7 @@
/// Check that operations are not eliminated if they have different attributes.
// CHECK-LABEL: @different_attributes
-func @different_attributes(index, index) -> (i1, i1, i1) {
+func.func @different_attributes(index, index) -> (i1, i1, i1) {
^bb0(%a : index, %b : index):
// CHECK: %0 = arith.cmpi slt, %arg0, %arg1 : index
%0 = arith.cmpi slt, %a, %b : index
@@ -99,7 +99,7 @@
/// Check that operations with side effects are not eliminated.
// CHECK-LABEL: @side_effect
-func @side_effect() -> (memref<2x1xf32>, memref<2x1xf32>) {
+func.func @side_effect() -> (memref<2x1xf32>, memref<2x1xf32>) {
// CHECK: %0 = memref.alloc() : memref<2x1xf32>
%0 = memref.alloc() : memref<2x1xf32>
@@ -113,7 +113,7 @@
/// Check that operation definitions are properly propagated down the dominance
/// tree.
// CHECK-LABEL: @down_propagate_for
-func @down_propagate_for() {
+func.func @down_propagate_for() {
// CHECK: %c1_i32 = arith.constant 1 : i32
%0 = arith.constant 1 : i32
@@ -127,7 +127,7 @@
}
// CHECK-LABEL: @down_propagate
-func @down_propagate() -> i32 {
+func.func @down_propagate() -> i32 {
// CHECK-NEXT: %c1_i32 = arith.constant 1 : i32
%0 = arith.constant 1 : i32
@@ -148,7 +148,7 @@
/// Check that operation definitions are NOT propagated up the dominance tree.
// CHECK-LABEL: @up_propagate_for
-func @up_propagate_for() -> i32 {
+func.func @up_propagate_for() -> i32 {
// CHECK: affine.for {{.*}} = 0 to 4 {
affine.for %i = 0 to 4 {
// CHECK-NEXT: %c1_i32_0 = arith.constant 1 : i32
@@ -164,7 +164,7 @@
}
// CHECK-LABEL: func @up_propagate
-func @up_propagate() -> i32 {
+func.func @up_propagate() -> i32 {
// CHECK-NEXT: %c0_i32 = arith.constant 0 : i32
%0 = arith.constant 0 : i32
@@ -195,7 +195,7 @@
/// The same test as above except that we are testing on a cfg embedded within
/// an operation region.
// CHECK-LABEL: func @up_propagate_region
-func @up_propagate_region() -> i32 {
+func.func @up_propagate_region() -> i32 {
// CHECK-NEXT: %0 = "foo.region"
%0 = "foo.region"() ({
// CHECK-NEXT: %c0_i32 = arith.constant 0 : i32
@@ -228,7 +228,7 @@
/// This test checks that nested regions that are isolated from above are
/// properly handled.
// CHECK-LABEL: @nested_isolated
-func @nested_isolated() -> i32 {
+func.func @nested_isolated() -> i32 {
// CHECK-NEXT: arith.constant 1
%0 = arith.constant 1 : i32
diff --git a/integrations/tensorflow/iree-dialects/test/Transforms/test-with-listener.mlir b/integrations/tensorflow/iree-dialects/test/Transforms/test-with-listener.mlir
index f8dcc30..16ba1fe 100644
--- a/integrations/tensorflow/iree-dialects/test/Transforms/test-with-listener.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Transforms/test-with-listener.mlir
@@ -1,7 +1,7 @@
// RUN: iree-dialects-opt --test-listener-canonicalize='listener=1' %s | FileCheck %s --check-prefix CANON
// RUN: iree-dialects-opt --test-listener-cse='listener=1' %s | FileCheck %s --check-prefix CSE
-func @test_canonicalize(%arg0: i32) -> (i32, i32) {
+func.func @test_canonicalize(%arg0: i32) -> (i32, i32) {
// CANON: REPLACED arith.addi
// CANON: REMOVED arith.addi
%c5 = arith.constant -5 : i32
@@ -10,7 +10,7 @@
return %0, %1 : i32, i32
}
-func @test_cse(%arg0: i32) -> (i32, i32) {
+func.func @test_cse(%arg0: i32) -> (i32, i32) {
// CSE: REPLACED arith.addi
// CSE: REMOVED arith.addi
%c5 = arith.constant -5 : i32
diff --git a/integrations/tensorflow/iree_tf_compiler/MHLO/test/emit_default_iree_abi.mlir b/integrations/tensorflow/iree_tf_compiler/MHLO/test/emit_default_iree_abi.mlir
index cf4624c..b8a6db8 100644
--- a/integrations/tensorflow/iree_tf_compiler/MHLO/test/emit_default_iree_abi.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/MHLO/test/emit_default_iree_abi.mlir
@@ -1,15 +1,15 @@
// RUN: iree-tf-opt %s --iree-mhlo-emit-default-iree-abi --split-input-file --verify-diagnostics | FileCheck %s
-// CHECK-LABEL: func @valid
+// CHECK-LABEL: func.func @valid
// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22ndarray\22,\22f32\22,2,2,3],[\22ndarray\22,\22f32\22,1,3]],\22r\22:[[\22ndarray\22,\22f32\22,1,3],[\22ndarray\22,\22f32\22,2,2,3]],\22v\22:1}"
-func @valid(%arg0: tensor<2x3xf32>, %arg1: tensor<3xf32>) -> (tensor<3xf32>, tensor<2x3xf32>) {
+func.func @valid(%arg0: tensor<2x3xf32>, %arg1: tensor<3xf32>) -> (tensor<3xf32>, tensor<2x3xf32>) {
return %arg1, %arg0 : tensor<3xf32>, tensor<2x3xf32>
}
// -----
-// CHECK-LABEL: func @tupled
+// CHECK-LABEL: func.func @tupled
// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22ndarray\22,\22f32\22,1,3],[\22ndarray\22,\22f32\22,2,2,3]],\22r\22:[[\22ndarray\22,\22f32\22,1,3],[\22ndarray\22,\22f32\22,2,2,3]],\22v\22:1}"
-func @tupled(%arg0: tuple<tensor<3xf32>, tensor<2x3xf32>>) -> tuple<tensor<3xf32>, tensor<2x3xf32>> {
+func.func @tupled(%arg0: tuple<tensor<3xf32>, tensor<2x3xf32>>) -> tuple<tensor<3xf32>, tensor<2x3xf32>> {
return %arg0 : tuple<tensor<3xf32>, tensor<2x3xf32>>
}
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/convert_to_mhlo.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/convert_to_mhlo.mlir
index 611007d..14512a5 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/convert_to_mhlo.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/convert_to_mhlo.mlir
@@ -1,7 +1,7 @@
// RUN: iree-tf-opt --iree-tf-convert-to-mhlo --split-input-file %s | FileCheck %s
// CHECK-LABEL: @sigmoid
-func @sigmoid(%arg0: tensor<2xf32>) -> tensor<2xf32> {
+func.func @sigmoid(%arg0: tensor<2xf32>) -> tensor<2xf32> {
// CHECK-DAG: [[SCALAR:%.+]] = mhlo.constant dense<5.000000e-01> : tensor<f32>
// CHECK-DAG: [[SHAPE_OF:%.+]] = shape.shape_of %arg0 : tensor<2xf32> -> tensor<1xindex>
// CHECK-DAG: [[SHAPE_VAL:%.+]] = shape.to_extent_tensor [[SHAPE_OF]] : tensor<1xindex> -> tensor<1xindex>
@@ -15,7 +15,7 @@
}
// CHECK-LABEL: @sigmoid_complex
-func @sigmoid_complex(%arg0: tensor<2xcomplex<f32>>) -> tensor<2xcomplex<f32>> {
+func.func @sigmoid_complex(%arg0: tensor<2xcomplex<f32>>) -> tensor<2xcomplex<f32>> {
// CHECK: [[R0:%.+]] = mhlo.constant dense<(5.000000e-01,0.000000e+00)> : tensor<complex<f32>>
// CHECK-NOT: tf.Sigmoid
%0 = "tf.Sigmoid"(%arg0) : (tensor<2xcomplex<f32>>) -> tensor<2xcomplex<f32>>
@@ -23,7 +23,7 @@
}
// CHECK-LABEL: @sigmoid_unranked
-func @sigmoid_unranked(%arg0: tensor<*xf32>) -> tensor<*xf32> {
+func.func @sigmoid_unranked(%arg0: tensor<*xf32>) -> tensor<*xf32> {
// CHECK-DAG: [[SCALAR:%.+]] = mhlo.constant dense<5.000000e-01> : tensor<f32>
// CHECK-DAG: [[SHAPE_OF:%.+]] = shape.shape_of %arg0 : tensor<*xf32> -> tensor<?xindex>
// CHECK-DAG: [[SHAPE_VAL:%.+]] = shape.to_extent_tensor [[SHAPE_OF]] : tensor<?xindex> -> tensor<?xindex>
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/direct_lowering.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/direct_lowering.mlir
index 02d1317..7252984 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/direct_lowering.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/direct_lowering.mlir
@@ -1,7 +1,7 @@
// RUN: iree-tf-opt --iree-tf-convert-to-mhlo --split-input-file %s | FileCheck %s
// CHECK-LABEL: @expand_dims
-func @expand_dims(%arg0: tensor<?x?x?xf32>) -> tensor<?x?x?x1xf32> {
+func.func @expand_dims(%arg0: tensor<?x?x?xf32>) -> tensor<?x?x?x1xf32> {
// CHECK: %[[R:.*]] = tensor.expand_shape %arg0 {{\[}}[0], [1], [2, 3]] : tensor<?x?x?xf32> into tensor<?x?x?x1xf32>
// CHECK: return %[[R]]
%axis = "tf.Const"() {value = dense<3> : tensor<i32>} : () -> (tensor<i32>)
@@ -13,7 +13,7 @@
// CHECK-LABEL: @expand_dims_mismatch
// Verifies that the fallback lowering to reshape is used if the static
// information in the shape does not match the request expansion dim.
-func @expand_dims_mismatch(%arg0: tensor<?x?x?xf32>) -> tensor<?x?x?x?xf32> {
+func.func @expand_dims_mismatch(%arg0: tensor<?x?x?xf32>) -> tensor<?x?x?x?xf32> {
// CHECK: mhlo.dynamic_reshape
%axis = "tf.Const"() {value = dense<3> : tensor<i32>} : () -> (tensor<i32>)
%0 = "tf.ExpandDims"(%arg0, %axis) : (tensor<?x?x?xf32>, tensor<i32>) -> (tensor<?x?x?x?xf32>)
@@ -22,7 +22,7 @@
// -----
// CHECK-LABEL: @squeeze
-func @squeeze(%arg0 : tensor<?x1x1x1001xf32>) -> tensor<?x1001xf32> {
+func.func @squeeze(%arg0 : tensor<?x1x1x1001xf32>) -> tensor<?x1001xf32> {
// CHECK: %[[R:.*]] = tensor.collapse_shape %arg0 {{\[}}[0], [1, 2, 3]] : tensor<?x1x1x1001xf32> into tensor<?x1001xf32>
// CHECK: return %[[R]]
%0 = "tf.Squeeze"(%arg0) {device = "", squeeze_dims = [1, 2]} : (tensor<?x1x1x1001xf32>) -> tensor<?x1001xf32>
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors.mlir
index ea57818..49e2604 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors.mlir
@@ -7,13 +7,13 @@
// I think when "type" is a partial type that flow will not model it correctly.
// CHECK: iree_input.global private mutable [[V:@.+]] : tensor<?xf32> = dense<1.000000e+00> : tensor<1xf32>
-// CHECK: func @f() -> (tensor<?xf32> {tf_saved_model.index_path = []})
+// CHECK: func.func @f() -> (tensor<?xf32> {tf_saved_model.index_path = []})
// CHECK-NEXT: [[PTR:%.+]] = iree_input.global.address [[V]] : !iree_input.ptr<tensor<?xf32>>
// CHECK-NEXT: [[T:%.+]] = iree_input.global.load.indirect [[PTR]] : !iree_input.ptr<tensor<?xf32>> -> tensor<?xf32>
// CHECK-NEXT: return [[T]] : tensor<?xf32>
"tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> ()
- func @f(%arg0: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v})
+ func.func @f(%arg0: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v})
-> (tensor<?xf32> {tf_saved_model.index_path = []})
attributes {tf_saved_model.exported_names = ["f"]} {
%0 = "tf.ReadVariableOp"(%arg0) : (tensor<!tf_type.resource<tensor<?xf32>>>) -> tensor<?xf32>
@@ -27,13 +27,13 @@
module attributes {tf_saved_model.semantics} {
// CHECK: iree_input.global private mutable [[V:@.+]] : tensor<?xf32> = dense<1.000000e+00> : tensor<1xf32>
-// CHECK: func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]})
+// CHECK: func.func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]})
// CHECK-NEXT: [[PTR:%.+]] = iree_input.global.address [[V]] : !iree_input.ptr<tensor<?xf32>>
// CHECK-NEXT: iree_input.global.store.indirect %arg0, [[PTR]] : tensor<?xf32> -> !iree_input.ptr<tensor<?xf32>>
// CHECK-NEXT: return
"tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> ()
- func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}, %arg1: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v})
+ func.func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}, %arg1: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v})
attributes {tf_saved_model.exported_names = ["f"]} {
"tf.AssignVariableOp"(%arg1, %arg0) : (tensor<!tf_type.resource<tensor<?xf32>>>, tensor<?xf32>) -> ()
return
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_complex.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_complex.mlir
index d743cdf..ac204d0 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_complex.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_complex.mlir
@@ -6,7 +6,7 @@
module attributes {tf_saved_model.semantics} {
// CHECK: iree_input.global private mutable [[V:@.+]] : tensor<?xf32> = dense<1.000000e+00> : tensor<1xf32>
-// CHECK: func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} {
+// CHECK: func.func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} {
// CHECK-NEXT: [[PTR:%.+]] = iree_input.global.address [[V]] : !iree_input.ptr<tensor<?xf32>>
// CHECK-NEXT: cf.br ^bb1([[PTR]] : !iree_input.ptr<tensor<?xf32>>)
// CHECK-NEXT: ^bb1([[PTR1:%.+]]: !iree_input.ptr<tensor<?xf32>>): // pred: ^bb0
@@ -15,7 +15,7 @@
"tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> ()
- func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}, %arg1: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v}) attributes {tf_saved_model.exported_names = ["f"]} {
+ func.func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}, %arg1: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v}) attributes {tf_saved_model.exported_names = ["f"]} {
cf.br ^bb1(%arg1 : tensor<!tf_type.resource<tensor<?xf32>>>)
^bb1(%r: tensor<!tf_type.resource<tensor<?xf32>>>):
"tf.AssignVariableOp"(%r, %arg0) : (tensor<!tf_type.resource<tensor<?xf32>>>, tensor<?xf32>) -> ()
@@ -30,7 +30,7 @@
// CHECK: iree_input.global private mutable [[V:@.+]] : tensor<?xf32> = dense<1.000000e+00> : tensor<1xf32>
// CHECK: iree_input.global private mutable [[V1:@.+]] : tensor<?xf32> = dense<1.000000e+00> : tensor<1xf32>
-// CHECK: func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}) -> (tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} {
+// CHECK: func.func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}) -> (tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} {
// CHECK-NEXT: [[PTR0:%.+]] = iree_input.global.address [[V]] : !iree_input.ptr<tensor<?xf32>>
// CHECK-NEXT: [[PTR1:%.+]] = iree_input.global.address [[V1]] : !iree_input.ptr<tensor<?xf32>>
// CHECK-NEXT: %[[FALSE:.+]] = arith.constant false
@@ -41,7 +41,7 @@
"tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> ()
"tf_saved_model.global_tensor"() { is_mutable, sym_name = "v1", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> ()
- func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}, %v: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v}, %v1: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v1}) -> (tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} {
+ func.func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}, %v: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v}, %v1: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v1}) -> (tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} {
%pred = arith.constant false
cf.cond_br %pred, ^bb1(%v : tensor<!tf_type.resource<tensor<?xf32>>>), ^bb1(%v1 : tensor<!tf_type.resource<tensor<?xf32>>>)
^bb1(%either: tensor<!tf_type.resource<tensor<?xf32>>>):
@@ -56,7 +56,7 @@
module attributes {tf_saved_model.semantics} {
// CHECK: iree_input.global private mutable [[V:@.+]] : tensor<?xf32> = dense<1.000000e+00> : tensor<1xf32>
-// CHECK: func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} {
+// CHECK: func.func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}) attributes {tf_saved_model.exported_names = ["f"]} {
// CHECK-NEXT: [[PTR:%.+]] = iree_input.global.address [[V]] : !iree_input.ptr<tensor<?xf32>>
// CHECK-NEXT: cf.br ^bb1([[PTR]], [[PTR]], [[PTR]] : !iree_input.ptr<tensor<?xf32>>, !iree_input.ptr<tensor<?xf32>>, !iree_input.ptr<tensor<?xf32>>)
// CHECK-NEXT: ^bb1([[PTR0:%.+]]: !iree_input.ptr<tensor<?xf32>>, [[PTR1:%.+]]: !iree_input.ptr<tensor<?xf32>>, [[PTR2:%.+]]: !iree_input.ptr<tensor<?xf32>>): // 2 preds: ^bb0, ^bb1
@@ -64,7 +64,7 @@
// CHECK-NEXT: cf.br ^bb1([[PTR1]], [[PTR2]], [[PTR0]] : !iree_input.ptr<tensor<?xf32>>, !iree_input.ptr<tensor<?xf32>>, !iree_input.ptr<tensor<?xf32>>)
"tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> ()
- func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}, %arg1: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v}) attributes {tf_saved_model.exported_names = ["f"]} {
+ func.func @f(%arg0: tensor<?xf32> {tf_saved_model.index_path = [0]}, %arg1: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v}) attributes {tf_saved_model.exported_names = ["f"]} {
cf.br ^bb1(%arg1, %arg1, %arg1 : tensor<!tf_type.resource<tensor<?xf32>>>, tensor<!tf_type.resource<tensor<?xf32>>>, tensor<!tf_type.resource<tensor<?xf32>>>)
^bb1(%0: tensor<!tf_type.resource<tensor<?xf32>>>, %1: tensor<!tf_type.resource<tensor<?xf32>>>, %2: tensor<!tf_type.resource<tensor<?xf32>>>):
"tf.AssignVariableOp"(%0, %arg0) : (tensor<!tf_type.resource<tensor<?xf32>>>, tensor<?xf32>) -> ()
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_invalid.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_invalid.mlir
index a3374be..fbb283c 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_invalid.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/lower_global_tensors_invalid.mlir
@@ -2,7 +2,7 @@
module attributes {tf_saved_model.semantics} {
"tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> ()
- func @f(%arg0: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v})
+ func.func @f(%arg0: tensor<!tf_type.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v})
attributes {tf_saved_model.exported_names = ["f"]} {
// expected-error@+1 {{could not lower resource op to flow: tf.SomeUnknownVariableOp}}
"tf.SomeUnknownVariableOp"(%arg0) : (tensor<!tf_type.resource<tensor<?xf32>>>) -> ()
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/propagate_resource_casts.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/propagate_resource_casts.mlir
index 6bfe8f5..2f8999f 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/propagate_resource_casts.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/propagate_resource_casts.mlir
@@ -1,7 +1,7 @@
// RUN: iree-tf-opt --split-input-file --verify-diagnostics --iree-tf-propagate-resource-casts %s | FileCheck %s
// CHECK-LABEL: @noop_cast
-func @noop_cast(%arg0: tensor<!tf_type.resource>) -> tensor<*xi16> {
+func.func @noop_cast(%arg0: tensor<!tf_type.resource>) -> tensor<*xi16> {
%0 = "tf.Cast"(%arg0) : (tensor<!tf_type.resource>) -> (tensor<!tf_type.resource>)
// CHECK: "tf.ReadVariableOp"(%arg0) : (tensor<!tf_type.resource>)
%1 = "tf.ReadVariableOp"(%0) : (tensor<!tf_type.resource>) -> tensor<*xi16>
@@ -11,7 +11,7 @@
// -----
// CHECK-LABEL: @simple_bypass
-func @simple_bypass(%arg0: tensor<!tf_type.resource<tensor<*xi16>>>) -> tensor<*xi16> {
+func.func @simple_bypass(%arg0: tensor<!tf_type.resource<tensor<*xi16>>>) -> tensor<*xi16> {
%0 = "tf.Cast"(%arg0) : (tensor<!tf_type.resource<tensor<*xi16>>>) -> (tensor<!tf_type.resource>)
// CHECK: "tf.ReadVariableOp"(%arg0) : (tensor<!tf_type.resource<tensor<*xi16>>>)
%1 = "tf.ReadVariableOp"(%0) : (tensor<!tf_type.resource>) -> tensor<*xi16>
@@ -21,7 +21,7 @@
// -----
// CHECK-LABEL: @simple_no_bypass
-func @simple_no_bypass(%arg0: tensor<!tf_type.resource>) -> tensor<*xi16> {
+func.func @simple_no_bypass(%arg0: tensor<!tf_type.resource>) -> tensor<*xi16> {
// CHECK: [[V:%.+]] = "tf.Cast"(%arg0)
%0 = "tf.Cast"(%arg0) : (tensor<!tf_type.resource>) -> (tensor<!tf_type.resource<tensor<*xi16>>>)
// CHECK: "tf.ReadVariableOp"([[V]]) : (tensor<!tf_type.resource<tensor<*xi16>>>)
@@ -32,7 +32,7 @@
// -----
// CHECK-LABEL: @dynamic_bypass
-func @dynamic_bypass(%arg0: tensor<!tf_type.resource<tensor<?xi16>>>) -> tensor<?xi16> {
+func.func @dynamic_bypass(%arg0: tensor<!tf_type.resource<tensor<?xi16>>>) -> tensor<?xi16> {
%0 = "tf.Cast"(%arg0) : (tensor<!tf_type.resource<tensor<?xi16>>>) -> (tensor<!tf_type.resource>)
// CHECK: "tf.ReadVariableOp"(%arg0) : (tensor<!tf_type.resource<tensor<?xi16>>>)
%1 = "tf.ReadVariableOp"(%0) : (tensor<!tf_type.resource>) -> tensor<?xi16>
@@ -42,7 +42,7 @@
// -----
// CHECK-LABEL: @dynamic_no_bypass
-func @dynamic_no_bypass(%arg0: tensor<!tf_type.resource>) -> tensor<?xi16> {
+func.func @dynamic_no_bypass(%arg0: tensor<!tf_type.resource>) -> tensor<?xi16> {
// CHECK: [[V:%.+]] = "tf.Cast"(%arg0)
%0 = "tf.Cast"(%arg0) : (tensor<!tf_type.resource>) -> (tensor<!tf_type.resource<tensor<?xi16>>>)
// CHECK: "tf.ReadVariableOp"([[V]]) : (tensor<!tf_type.resource<tensor<?xi16>>>)
@@ -53,7 +53,7 @@
// -----
// CHECK-LABEL: @static_bypass
-func @static_bypass(%arg0: tensor<!tf_type.resource<tensor<5xi16>>>) -> tensor<5xi16> {
+func.func @static_bypass(%arg0: tensor<!tf_type.resource<tensor<5xi16>>>) -> tensor<5xi16> {
%0 = "tf.Cast"(%arg0) : (tensor<!tf_type.resource<tensor<5xi16>>>) -> (tensor<!tf_type.resource<tensor<?xi16>>>)
// CHECK: "tf.ReadVariableOp"(%arg0) : (tensor<!tf_type.resource<tensor<5xi16>>>)
%1 = "tf.ReadVariableOp"(%0) : (tensor<!tf_type.resource<tensor<?xi16>>>) -> tensor<5xi16>
@@ -63,7 +63,7 @@
// -----
// CHECK-LABEL: @static_bypass_to_unranked
-func @static_bypass_to_unranked(%arg0: tensor<!tf_type.resource<tensor<5xi16>>>) -> tensor<*xi16> {
+func.func @static_bypass_to_unranked(%arg0: tensor<!tf_type.resource<tensor<5xi16>>>) -> tensor<*xi16> {
%0 = "tf.Cast"(%arg0) : (tensor<!tf_type.resource<tensor<5xi16>>>) -> (tensor<!tf_type.resource<tensor<*xi16>>>)
// CHECK: "tf.ReadVariableOp"(%arg0) : (tensor<!tf_type.resource<tensor<5xi16>>>)
%1 = "tf.ReadVariableOp"(%0) : (tensor<!tf_type.resource<tensor<*xi16>>>) -> tensor<*xi16>
@@ -73,7 +73,7 @@
// -----
// CHECK-LABEL: @static_no_bypass
-func @static_no_bypass(%arg0: tensor<!tf_type.resource<tensor<?xi16>>>) -> tensor<5xi16> {
+func.func @static_no_bypass(%arg0: tensor<!tf_type.resource<tensor<?xi16>>>) -> tensor<5xi16> {
// CHECK: [[V:%.+]] = "tf.Cast"(%arg0)
%0 = "tf.Cast"(%arg0) : (tensor<!tf_type.resource<tensor<?xi16>>>) -> (tensor<!tf_type.resource<tensor<5xi16>>>)
// CHECK: "tf.ReadVariableOp"([[V]]) : (tensor<!tf_type.resource<tensor<5xi16>>>)
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir
index 0b109ea..8a9405f 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: module @binary_func
// Should just be a pass through.
-// CHECK: func @binary_func
+// CHECK: func.func @binary_func
// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]],\22r\22:[[\22stuple\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]],\22v\22:1}"
// CHECK: %[[ARG0_TENSOR:.*]] = iree_input.cast.buffer_view_to_tensor %arg0 : !iree_input.buffer_view -> tensor<16xf32>
// CHECK: %[[ARG1_TENSOR:.*]] = iree_input.cast.buffer_view_to_tensor %arg1 : !iree_input.buffer_view -> tensor<16xf32>
@@ -10,7 +10,7 @@
// CHECK: %[[R0_BV:.*]] = iree_input.cast.tensor_to_buffer_view %[[R]]#0 : tensor<16xf32> -> !iree_input.buffer_view
// CHECK: %[[R1_BV:.*]] = iree_input.cast.tensor_to_buffer_view %[[R]]#1 : tensor<16xf32> -> !iree_input.buffer_view
// CHECK: return %[[R0_BV]], %[[R1_BV]] : !iree_input.buffer_view, !iree_input.buffer_view
-// CHECK: func private @__inference_binary_func_70
+// CHECK: func.func private @__inference_binary_func_70
// CHECK-NOT: tf_saved_model
builtin.module @binary_func attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} {
func.func @__inference_binary_func_70(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}, %arg1: tensor<16xf32> {tf._user_specified_name = "b", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = [0]}, tensor<16xf32> {tf_saved_model.index_path = [1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>], tf_saved_model.exported_names = ["binary_func"]} {
@@ -22,13 +22,13 @@
// -----
// CHECK-LABEL: module @unary_func
-// CHECK: func @unary_func
+// CHECK: func.func @unary_func
// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22ndarray\22,\22f32\22,1,16]],\22r\22:[[\22ndarray\22,\22f32\22,1,16]],\22v\22:1}"
// CHECK: %[[ARG0_TENSOR:.*]] = iree_input.cast.buffer_view_to_tensor %arg0 : !iree_input.buffer_view -> tensor<16xf32>
// CHECK: %[[R:.*]] = call @__inference_unary_func_240(%[[ARG0_TENSOR]])
// CHECK: %[[R0_BV:.*]] = iree_input.cast.tensor_to_buffer_view %[[R]] : tensor<16xf32> -> !iree_input.buffer_view
// CHECK: return %[[R0_BV]] : !iree_input.buffer_view
-// CHECK: func private @__inference_unary_func_240
+// CHECK: func.func private @__inference_unary_func_240
// CHECK-NOT: tf_saved_model
builtin.module @unary_func attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} {
func.func @__inference_unary_func_240(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}) -> (tensor<16xf32> {tf_saved_model.index_path = []}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>], tf_saved_model.exported_names = ["unary_func"]} {
@@ -39,7 +39,7 @@
// -----
// CHECK-LABEL: module @return_list
-// CHECK: func @return_list
+// CHECK: func.func @return_list
// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]],\22r\22:[[\22stuple\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]],\22v\22:1}"
// CHECK: %[[ARG0_TENSOR:.*]] = iree_input.cast.buffer_view_to_tensor %arg0 : !iree_input.buffer_view -> tensor<16xf32>
// CHECK: %[[ARG1_TENSOR:.*]] = iree_input.cast.buffer_view_to_tensor %arg1 : !iree_input.buffer_view -> tensor<16xf32>
@@ -47,7 +47,7 @@
// CHECK: %[[R0_BV:.*]] = iree_input.cast.tensor_to_buffer_view %[[R]]#0 : tensor<16xf32> -> !iree_input.buffer_view
// CHECK: %[[R1_BV:.*]] = iree_input.cast.tensor_to_buffer_view %[[R]]#1 : tensor<16xf32> -> !iree_input.buffer_view
// CHECK: return %[[R0_BV]], %[[R1_BV]] : !iree_input.buffer_view, !iree_input.buffer_view
-// CHECK: func private @__inference_return_list_260
+// CHECK: func.func private @__inference_return_list_260
// CHECK-NOT: tf_saved_model
builtin.module @return_list attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} {
func.func @__inference_return_list_260(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}, %arg1: tensor<16xf32> {tf._user_specified_name = "b", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = [0]}, tensor<16xf32> {tf_saved_model.index_path = [1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>], tf_saved_model.exported_names = ["return_list"]} {
@@ -59,7 +59,7 @@
// -----
// CHECK-LABEL: module @dict_nest
-// CHECK: func @dict_nest(%arg0: !iree_input.list<!iree_input.variant>, %arg1: !iree_input.buffer_view) -> (!iree_input.list<!iree_input.variant>, !iree_input.list<!iree_input.variant>)
+// CHECK: func.func @dict_nest(%arg0: !iree_input.list<!iree_input.variant>, %arg1: !iree_input.buffer_view) -> (!iree_input.list<!iree_input.variant>, !iree_input.list<!iree_input.variant>)
// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22sdict\22,[\22dict\22,[\22sdict\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]]]],[\22list\22,[\22slist\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]]],[\22ndarray\22,\22f32\22,0]],\22r\22:[[\22sdict\22,[\22dict\22,[\22sdict\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]]]],[\22list\22,[\22stuple\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]]]],\22v\22:1}"
// CHECK: %[[c0:.+]] = arith.constant 0 : index
// CHECK: %[[L0:.+]] = iree_input.list.get %arg0[%[[c0]]] : !iree_input.list<!iree_input.variant> -> !iree_input.list<!iree_input.variant>
@@ -98,7 +98,7 @@
// CHECK: %[[c1_10:.+]] = arith.constant 1 : index
// CHECK: iree_input.list.set %[[R9]][%[[c1_10]]], %[[R3_BV]] : !iree_input.list<!iree_input.variant>, !iree_input.buffer_view
// return %[[R7]], %[[R8]] : !iree_input.list<!iree_input.variant>, !iree_input.list<!iree_input.variant>
-// CHECK: func private @__inference_dict_nest_190
+// CHECK: func.func private @__inference_dict_nest_190
// CHECK-NOT: tf_saved_model
builtin.module @dict_nest attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} {
func.func @__inference_dict_nest_190(
@@ -117,7 +117,7 @@
// -----
// CHECK-LABEL: module @kwargs
-// CHECK: func @dict_nest(%arg0: !iree_input.buffer_view, %arg1: !iree_input.buffer_view, %arg2: !iree_input.buffer_view) -> !iree_input.list<!iree_input.variant>
+// CHECK: func.func @dict_nest(%arg0: !iree_input.buffer_view, %arg1: !iree_input.buffer_view, %arg2: !iree_input.buffer_view) -> !iree_input.list<!iree_input.variant>
// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22named\22,\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22named\22,\22b\22,[\22ndarray\22,\22f32\22,1,16]],[\22named\22,\22scalar\22,[\22ndarray\22,\22f32\22,0]]],\22r\22:[[\22sdict\22,[\22dict\22,[\22sdict\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]],[\22scalar\22,[\22ndarray\22,\22f32\22,0]]]]]],\22v\22:1}"
builtin.module @kwargs attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} {
func.func @__inference_dict_nest_190(
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/strip_asserts.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/strip_asserts.mlir
index 0d57a49..5bed968 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/strip_asserts.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/strip_asserts.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: @asserts
// CHECK-NOT: tf.Assert
-func @asserts(%arg0 : tensor<*xi1>, %arg1 : tensor<!tf_type.string>,
+func.func @asserts(%arg0 : tensor<*xi1>, %arg1 : tensor<!tf_type.string>,
%arg2 : tensor<!tf_type.string>, %arg3 : tensor<!tf_type.string>,
%arg4 : tensor<i32>, %arg5 : tensor<!tf_type.string>, %arg6 : tensor<i32>) {
"tf.Assert"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6)
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/strip_metadata.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/strip_metadata.mlir
index 07c9635..1e294f1 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/strip_metadata.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/strip_metadata.mlir
@@ -4,12 +4,12 @@
// CHECK-NOT: attributes
// CHECK-NOT: tf.versions
module @tf_module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 586 : i32}} {
- // CHECK: func @multiply__2_2__i32__uniform
+ // CHECK: func.func @multiply__2_2__i32__uniform
// CHECK: iree.reflection
// CHECK-NOT: tf._user_specified_name
// CHECK-NOT: tf._user_specified_name
// CHECK-NOT: tf._input_shapes
- func @multiply__2_2__i32__uniform(%arg0: tensor<2xi32> {tf._user_specified_name = "args_0"}, %arg1: tensor<2xi32> {tf._user_specified_name = "args_1"}) -> tensor<2xi32> attributes {iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I12!S9!k0_0k1_1R3!_0"}, tf._input_shapes = [#tf_type.shape<2>, #tf_type.shape<2>]} {
+ func.func @multiply__2_2__i32__uniform(%arg0: tensor<2xi32> {tf._user_specified_name = "args_0"}, %arg1: tensor<2xi32> {tf._user_specified_name = "args_1"}) -> tensor<2xi32> attributes {iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I12!S9!k0_0k1_1R3!_0"}, tf._input_shapes = [#tf_type.shape<2>, #tf_type.shape<2>]} {
// CHECK-NEXT: mhlo.multiply
%0 = mhlo.multiply %arg0, %arg1 : tensor<2xi32>
return %0 : tensor<2xi32>
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/verify_fully_converted.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/verify_fully_converted.mlir
index dee7fc7..1025960 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/verify_fully_converted.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/verify_fully_converted.mlir
@@ -1,7 +1,7 @@
// RUN: iree-tf-opt %s --iree-tf-verify-fully-converted --split-input-file -verify-diagnostics
-// CHECK-LABEL: func @f
-func @f() -> (tensor<i32>) {
+// CHECK-LABEL: func.func @f
+func.func @f() -> (tensor<i32>) {
// CHECK: [[VAL0:%.+]] = mhlo.constant dense<3>
%0 = mhlo.constant dense<3> : tensor<i32>
return %0 : tensor<i32>
@@ -10,7 +10,7 @@
// -----
// expected-error@below {{The following illegal operations still remain}}
-func @f() -> (tensor<i32>) {
+func.func @f() -> (tensor<i32>) {
// expected-error@+1 {{'tf.Const' op : illegal op still exists}}
%0 = "tf.Const"() {value = dense<-1> : tensor<i32>} : () -> tensor<i32>
return %0 : tensor<i32>
@@ -19,7 +19,7 @@
// -----
// expected-error@below {{The following illegal operations still remain}}
-func @f(%arg0 : tensor<i32>) -> (tensor<i32>) {
+func.func @f(%arg0 : tensor<i32>) -> (tensor<i32>) {
// expected-error@+1 {{'tf.Const' op : illegal op still exists}}
%0 = "tf.Const"() {value = dense<-1> : tensor<i32>} : () -> tensor<i32>
// expected-error@+1 {{'tf.Add' op : illegal op still exists}}
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/test/convert_metadata.mlir b/integrations/tensorflow/iree_tf_compiler/TFL/test/convert_metadata.mlir
index 5e78a5b..46d9776 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/test/convert_metadata.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/test/convert_metadata.mlir
@@ -1,13 +1,13 @@
// RUN: iree-opt-tflite --split-input-file --pass-pipeline='iree-tflite-convert-module-metadata,func.func(iree-tflite-convert-function-metadata)' %s | FileCheck %s
module attributes {tfl.schema_version = 3 : i32} {
- // CHECK: func @main(
+ // CHECK: func.func @main(
// CHECK-SAME: %arg0: tensor<?xf32> {iree.identifier = "input0"},
// CHECK-SAME: %arg1: tensor<?xf32> {iree.identifier = "input1"}
// CHECK-SAME: ) -> (
// CHECK-SAME: tensor<?xf32> {iree.identifier = "output0"},
// CHECK-SAME: tensor<?xf32> {iree.identifier = "output1"})
- func @main(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> (tensor<?xf32>, tensor<?xf32>) attributes {
+ func.func @main(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> (tensor<?xf32>, tensor<?xf32>) attributes {
tf.entry_function = {inputs = "input0,input1", outputs = "output0,output1"}
} {
return %arg0, %arg1 : tensor<?xf32>, tensor<?xf32>
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/test/flex_ops.mlir b/integrations/tensorflow/iree_tf_compiler/TFL/test/flex_ops.mlir
index 7426833..0e69ef5 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/test/flex_ops.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/test/flex_ops.mlir
@@ -5,7 +5,7 @@
// This excerpt was extracted from:
// https://tfhub.dev/tulasiram58827/lite-model/parallel-wavegan/float16/1
// CHECK-LABEL: @test_flex_ops
-func @test_flex_ops(%arg0: tensor<?x2x64xf32>, %arg1: tensor<1x1x64xf32>) -> tensor<*xf32> {
+func.func @test_flex_ops(%arg0: tensor<?x2x64xf32>, %arg1: tensor<1x1x64xf32>) -> tensor<*xf32> {
// CHECK: %[[ADD:.+]] = "tosa.add"(%arg0, %arg1) : (tensor<?x2x64xf32>, tensor<1x1x64xf32>)
// CHECK: %[[CAST:.+]] = tensor.cast %[[ADD]]
// CHECK: return %[[CAST]]
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/test/import/add.mlir b/integrations/tensorflow/iree_tf_compiler/TFL/test/import/add.mlir
index c204c63..cb99070 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/test/import/add.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/test/import/add.mlir
@@ -1,7 +1,7 @@
// RUN: iree-import-tflite %S/add.tflite | FileCheck %s
// CHECK: module {
-// CHECK-NEXT: func @main(%arg0: tensor<1x8x8x3xf32> {iree.identifier = "input"}) -> (tensor<1x8x8x3xf32> {iree.identifier = "output"}) {
+// CHECK-NEXT: func.func @main(%arg0: tensor<1x8x8x3xf32> {iree.identifier = "input"}) -> (tensor<1x8x8x3xf32> {iree.identifier = "output"}) {
// CHECK-NEXT: %0 = "tosa.add"(%arg0, %arg0) : (tensor<1x8x8x3xf32>, tensor<1x8x8x3xf32>) -> tensor<1x8x8x3xf32>
// CHECK-NEXT: %1 = "tosa.add"(%0, %arg0) : (tensor<1x8x8x3xf32>, tensor<1x8x8x3xf32>) -> tensor<1x8x8x3xf32>
// CHECK-NEXT: return %1 : tensor<1x8x8x3xf32>
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/test/import/multi_add.mlir b/integrations/tensorflow/iree_tf_compiler/TFL/test/import/multi_add.mlir
index 8aa3845..ba1eb5f 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/test/import/multi_add.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/test/import/multi_add.mlir
@@ -1,7 +1,7 @@
// RUN: iree-import-tflite %S/multi_add.tflite | FileCheck %s
// CHECK: module {
-// CHECK-NEXT: func @main(%arg0: tensor<1x8x8x3xf32> {iree.identifier = "a"}, %arg1: tensor<1x8x8x3xf32> {iree.identifier = "b"}, %arg2: tensor<1x8x8x3xf32> {iree.identifier = "c"}, %arg3: tensor<1x8x8x3xf32> {iree.identifier = "d"}) -> (tensor<1x8x8x3xf32> {iree.identifier = "x"}, tensor<1x8x8x3xf32> {iree.identifier = "y"}) {
+// CHECK-NEXT: func.func @main(%arg0: tensor<1x8x8x3xf32> {iree.identifier = "a"}, %arg1: tensor<1x8x8x3xf32> {iree.identifier = "b"}, %arg2: tensor<1x8x8x3xf32> {iree.identifier = "c"}, %arg3: tensor<1x8x8x3xf32> {iree.identifier = "d"}) -> (tensor<1x8x8x3xf32> {iree.identifier = "x"}, tensor<1x8x8x3xf32> {iree.identifier = "y"}) {
// CHECK-NEXT: %0 = "tosa.add"(%arg1, %arg2) : (tensor<1x8x8x3xf32>, tensor<1x8x8x3xf32>) -> tensor<1x8x8x3xf32>
// CHECK-NEXT: %1 = "tosa.add"(%arg0, %0) : (tensor<1x8x8x3xf32>, tensor<1x8x8x3xf32>) -> tensor<1x8x8x3xf32>
// CHECK-NEXT: %2 = "tosa.add"(%arg3, %0) : (tensor<1x8x8x3xf32>, tensor<1x8x8x3xf32>) -> tensor<1x8x8x3xf32>
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/test/lower_global_tensors.mlir b/integrations/tensorflow/iree_tf_compiler/TFL/test/lower_global_tensors.mlir
index 8b23894..1b7fb10 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/test/lower_global_tensors.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/test/lower_global_tensors.mlir
@@ -2,13 +2,13 @@
module {
// CHECK: iree_input.global private mutable @__iree_flow_Variable = dense<1.000000e+00> : tensor<16x16xf32>
- // CHECK-LABEL: func @state
- func @state(%arg0: tensor<16x16xf32>) -> () {
+ // CHECK-LABEL: func.func @state
+ func.func @state(%arg0: tensor<16x16xf32>) -> () {
"tfl.call_once"() {session_init_function = "StateInit"} : () -> ()
return
}
- func private @StateInit() {
+ func.func private @StateInit() {
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
%1 = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<16x16xf32>} : () -> tensor<16x16xf32>
"tfl.assign_variable"(%0, %1) : (tensor<*x!tf_type.resource>, tensor<16x16xf32>) -> ()
@@ -21,8 +21,8 @@
module {
// CHECK: iree_input.global private mutable @__iree_flow_Variable = dense<1.000000e+00> : tensor<16x16xf32>
- // CHECK-LABEL: func @assign
- func @assign(%arg0: tensor<16x16xf32>) -> () {
+ // CHECK-LABEL: func.func @assign
+ func.func @assign(%arg0: tensor<16x16xf32>) -> () {
"tfl.call_once"() {session_init_function = "AssignInit"} : () -> ()
// CHECK: %[[ADDR:.+]] = iree_input.global.address @__iree_flow_Variable : !iree_input.ptr<tensor<16x16xf32>>
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
@@ -32,7 +32,7 @@
return
}
- func private @AssignInit() {
+ func.func private @AssignInit() {
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
%1 = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<16x16xf32>} : () -> tensor<16x16xf32>
"tfl.assign_variable"(%0, %1) : (tensor<*x!tf_type.resource>, tensor<16x16xf32>) -> ()
@@ -45,8 +45,8 @@
module {
// CHECK: iree_input.global private mutable @__iree_flow_Variable = dense<1.000000e+00> : tensor<16x16xf32>
- // CHECK-LABEL: func @read
- func @read(%arg0: tensor<16x16xf32>) -> (tensor<16x16xf32>) {
+ // CHECK-LABEL: func.func @read
+ func.func @read(%arg0: tensor<16x16xf32>) -> (tensor<16x16xf32>) {
"tfl.call_once"() {session_init_function = "ReadInit"} : () -> ()
// CHECK: %[[ADDR:.+]] = iree_input.global.address @__iree_flow_Variable : !iree_input.ptr<tensor<16x16xf32>>
@@ -57,7 +57,7 @@
return %1 : tensor<16x16xf32>
}
- func private @ReadInit() {
+ func.func private @ReadInit() {
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
%1 = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<16x16xf32>} : () -> tensor<16x16xf32>
"tfl.assign_variable"(%0, %1) : (tensor<*x!tf_type.resource>, tensor<16x16xf32>) -> ()
@@ -70,8 +70,8 @@
module {
// CHECK: iree_input.global private mutable @__iree_flow_Variable = dense<2.000000e+00> : tensor<16x16xf32>
- // CHECK-LABEL: func @readAssign
- func @readAssign(%arg0: tensor<16x16xf32>) -> (tensor<16x16xf32>) {
+ // CHECK-LABEL: func.func @readAssign
+ func.func @readAssign(%arg0: tensor<16x16xf32>) -> (tensor<16x16xf32>) {
"tfl.call_once"() {session_init_function = "ReadAssignInit"} : () -> ()
// CHECK: %[[ADDR:.+]] = iree_input.global.address @__iree_flow_Variable : !iree_input.ptr<tensor<16x16xf32>>
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
@@ -86,7 +86,7 @@
"tfl.assign_variable"(%0, %2) : (tensor<*x!tf_type.resource>, tensor<16x16xf32>) -> ()
return %2 : tensor<16x16xf32>
}
- func private @ReadAssignInit() {
+ func.func private @ReadAssignInit() {
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
%1 = "tfl.pseudo_const"() {value = dense<2.000000e+00> : tensor<16x16xf32>} : () -> tensor<16x16xf32>
"tfl.assign_variable"(%0, %1) : (tensor<*x!tf_type.resource>, tensor<16x16xf32>) -> ()
@@ -98,8 +98,8 @@
module {
// CHECK: iree_input.global private mutable @__iree_flow_Variable = dense<42> : tensor<2x3xi8>
- // CHECK-LABEL: func @readAssignQuant
- func @readAssignQuant(%arg0: tensor<2x3x!quant.uniform<i8:f32, 0.1:2>>) -> (tensor<2x3x!quant.uniform<i8:f32, 0.1:2>>) {
+ // CHECK-LABEL: func.func @readAssignQuant
+ func.func @readAssignQuant(%arg0: tensor<2x3x!quant.uniform<i8:f32, 0.1:2>>) -> (tensor<2x3x!quant.uniform<i8:f32, 0.1:2>>) {
"tfl.call_once"() {session_init_function = "ReadAssignInit"} : () -> ()
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
@@ -116,7 +116,7 @@
"tfl.assign_variable"(%0, %2) : (tensor<*x!tf_type.resource>, tensor<2x3x!quant.uniform<i8:f32, 0.1:2>>) -> ()
return %2 : tensor<2x3x!quant.uniform<i8:f32, 0.1:2>>
}
- func private @ReadAssignInit() {
+ func.func private @ReadAssignInit() {
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
%1 = "tfl.pseudo_const"() {qtype = tensor<2x3x!quant.uniform<i8:f32, 0.1:2>>, value = dense<42> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8:f32, 0.1:2>>
"tfl.assign_variable"(%0, %1) : (tensor<*x!tf_type.resource>, tensor<2x3x!quant.uniform<i8:f32, 0.1:2>>) -> ()
@@ -128,7 +128,7 @@
module {
// CHECK-label: @nostate
- func @nostate(%arg0: tensor<16x16xf32>) -> (tensor<16x16xf32>) {
+ func.func @nostate(%arg0: tensor<16x16xf32>) -> (tensor<16x16xf32>) {
"tfl.call_once"() {session_init_function = "NoStateInit"} : () -> ()
// CHECK: tfl.var_handle
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
@@ -142,7 +142,7 @@
"tfl.assign_variable"(%0, %2) : (tensor<*x!tf_type.resource>, tensor<16x16xf32>) -> ()
return %2 : tensor<16x16xf32>
}
- func private @NoStateInit() {
+ func.func private @NoStateInit() {
return
}
}
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/test/retain_call_once_funcs.mlir b/integrations/tensorflow/iree_tf_compiler/TFL/test/retain_call_once_funcs.mlir
index ab354e9..025bbd5 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/test/retain_call_once_funcs.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/test/retain_call_once_funcs.mlir
@@ -3,7 +3,7 @@
// CHECK-LABEL: module {
module {
// CHECK-LABEL: @main
- func @main(%arg0: tensor<16x16xf32>) -> (tensor<16x16xf32>) {
+ func.func @main(%arg0: tensor<16x16xf32>) -> (tensor<16x16xf32>) {
// CHECK: "tfl.call_once"() {session_init_function = "NoOp", session_init_function_symbol = @NoOp} : () -> ()
"tfl.call_once"() {session_init_function = "NoOp"} : () -> ()
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
@@ -12,7 +12,7 @@
"tfl.assign_variable"(%0, %2) : (tensor<*x!tf_type.resource>, tensor<16x16xf32>) -> ()
return %2 : tensor<16x16xf32>
}
- func private @NoOp() {
+ func.func private @NoOp() {
%0 = "tfl.var_handle"() {container = "", shared_name = "Variable"} : () -> tensor<*x!tf_type.resource>
%1 = "tfl.pseudo_const"() {value = dense<0.000000e+00> : tensor<16x16xf32>} : () -> tensor<16x16xf32>
"tfl.assign_variable"(%0, %1) : (tensor<*x!tf_type.resource>, tensor<16x16xf32>) -> ()
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/test/strip_metadata.mlir b/integrations/tensorflow/iree_tf_compiler/TFL/test/strip_metadata.mlir
index 3d7a368..24f4018 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/test/strip_metadata.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/test/strip_metadata.mlir
@@ -3,9 +3,9 @@
// CHECK-LABEL: module {
// CHECK-NOT: tf.schema_version
module attributes {tfl.schema_version = 3 : i32} {
- // CHECK: func @main
+ // CHECK: func.func @main
// CHECK-NOT: tf.entry_function
- func @main(%arg0: tensor<1x8x8x3xf32>) -> tensor<1x8x8x3xf32> attributes {tf.entry_function = {inputs = "input", outputs = "output"}} {
+ func.func @main(%arg0: tensor<1x8x8x3xf32>) -> tensor<1x8x8x3xf32> attributes {tf.entry_function = {inputs = "input", outputs = "output"}} {
// CHECK-NEXT: tfl.add
%0 = tfl.add %arg0, %arg0 {fused_activation_function = "NONE"} : tensor<1x8x8x3xf32>
%1 = tfl.add %0, %arg0 {fused_activation_function = "NONE"} : tensor<1x8x8x3xf32>
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/test/verify_fully_converted.mlir b/integrations/tensorflow/iree_tf_compiler/TFL/test/verify_fully_converted.mlir
index 04904ca..335513f 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/test/verify_fully_converted.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/test/verify_fully_converted.mlir
@@ -1,7 +1,7 @@
// RUN: iree-opt-tflite %s --iree-tflite-verify-fully-converted --split-input-file -verify-diagnostics
-// CHECK-LABEL: func @main
-func @main(%arg0: tensor<2xf32>) -> (tensor<2xf32>) {
+// CHECK-LABEL: func.func @main
+func.func @main(%arg0: tensor<2xf32>) -> (tensor<2xf32>) {
// CHECK: "tosa.add"
%0 = "tosa.add"(%arg0, %arg0) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
return %0 : tensor<2xf32>
@@ -10,7 +10,7 @@
// -----
// expected-error@below {{The following illegal operations still remain}}
-func @main(%arg0: tensor<1x8x8x3xf32>) -> tensor<1x8x8x3xf32> attributes {tf.entry_function = {inputs = "input", outputs = "output"}} {
+func.func @main(%arg0: tensor<1x8x8x3xf32>) -> tensor<1x8x8x3xf32> attributes {tf.entry_function = {inputs = "input", outputs = "output"}} {
// expected-error@+1 {{'tfl.add' op : illegal op still exists}}
%0 = tfl.add %arg0, %arg0 {fused_activation_function = "NONE"} : tensor<1x8x8x3xf32>
// expected-error@+1 {{'tfl.sub' op : illegal op still exists}}
diff --git a/iree/test/e2e/linalg_transform/linalg_transform.mlir b/iree/test/e2e/linalg_transform/linalg_transform.mlir
index 21dff07..7f7afd3 100644
--- a/iree/test/e2e/linalg_transform/linalg_transform.mlir
+++ b/iree/test/e2e/linalg_transform/linalg_transform.mlir
@@ -1,6 +1,6 @@
// RUN: iree-run-mlir %s --iree-hal-target-backends=dylib-llvm-aot --iree-codegen-use-linalg-transform-interp --linalg-transform-file-name=%p/linalg_transform_spec.mlir
-func @matmul_static() -> tensor<5x5xf32> {
+func.func @matmul_static() -> tensor<5x5xf32> {
%res = flow.tensor.constant dense<[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
diff --git a/iree/test/e2e/matmul/generate_e2e_matmul_tests.py b/iree/test/e2e/matmul/generate_e2e_matmul_tests.py
index 276d513..62c7721 100644
--- a/iree/test/e2e/matmul/generate_e2e_matmul_tests.py
+++ b/iree/test/e2e/matmul/generate_e2e_matmul_tests.py
@@ -362,7 +362,7 @@
generate_function.compilation_index += 1
func_definition = func_definition + (
- f"func @{func_name}(%lhs: {lhs_tensor_type}, %rhs: {rhs_tensor_type}, %acc: {acc_tensor_type}) -> {acc_tensor_type} {{\n"
+ f"func.func @{func_name}(%lhs: {lhs_tensor_type}, %rhs: {rhs_tensor_type}, %acc: {acc_tensor_type}) -> {acc_tensor_type} {{\n"
f" %result = linalg.matmul {compilation_info_attr}ins(%lhs, %rhs: {lhs_tensor_type}, {rhs_tensor_type}) outs(%acc: {acc_tensor_type}) -> {acc_tensor_type}\n"
f" return %result: {acc_tensor_type}\n"
f"}}\n")
diff --git a/iree/test/e2e/regression/strided_slice.mlir b/iree/test/e2e/regression/strided_slice.mlir
index 106d0e0..ec93690 100644
--- a/iree/test/e2e/regression/strided_slice.mlir
+++ b/iree/test/e2e/regression/strided_slice.mlir
@@ -1,4 +1,4 @@
-func @stride_slice() {
+func.func @stride_slice() {
%c15 = arith.constant 15 : i32
%c16 = arith.constant 16 : i32
%0 = linalg.init_tensor [12, 15] : tensor<12x15xi32>
@@ -59,7 +59,7 @@
}
#map = affine_map<(d0) -> (d0)>
-func @issue_8825() {
+func.func @issue_8825() {
%c0 = arith.constant 0 : index
%c0_i64 = arith.constant 0 : i64
%c3_i64 = arith.constant 3 : i64
@@ -89,4 +89,4 @@
%17 = tensor.cast %16 : tensor<?xf32> to tensor<2xf32>
check.expect_almost_eq_const(%17, dense<[6.0, 9.0]> : tensor<2xf32>) : tensor<2xf32>
return
-}
\ No newline at end of file
+}
diff --git a/iree/test/microbenchmarks/linalg_transpose.mlir b/iree/test/microbenchmarks/linalg_transpose.mlir
index 71fd316..d3ecae1 100644
--- a/iree/test/microbenchmarks/linalg_transpose.mlir
+++ b/iree/test/microbenchmarks/linalg_transpose.mlir
@@ -10,7 +10,7 @@
util.global private @"__transpose_10_input" {noinline} = dense<1.0> : tensor<512x1024xf32>
-func @transpose_10() -> tensor<1024x512xf32> {
+func.func @transpose_10() -> tensor<1024x512xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%input_ptr = util.global.address @"__transpose_10_input" : !util.ptr<tensor<512x1024xf32>>
@@ -28,7 +28,7 @@
util.global private @"__transpose_021_input" {noinline} = dense<1.0> : tensor<64x96x128xf32>
-func @transpose_021() -> tensor<64x128x96xf32> {
+func.func @transpose_021() -> tensor<64x128x96xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%input_ptr = util.global.address @"__transpose_021_input" : !util.ptr<tensor<64x96x128xf32>>
@@ -46,7 +46,7 @@
util.global private @"__transpose_201_input" {noinline} = dense<1.0> : tensor<64x96x128xf32>
-func @transpose_201() -> tensor<128x64x96xf32> {
+func.func @transpose_201() -> tensor<128x64x96xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%input_ptr = util.global.address @"__transpose_201_input" : !util.ptr<tensor<64x96x128xf32>>
@@ -64,7 +64,7 @@
util.global private @"__transpose_210_input" {noinline} = dense<1.0> : tensor<64x96x128xf32>
-func @transpose_210() -> tensor<128x96x64xf32> {
+func.func @transpose_210() -> tensor<128x96x64xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%input_ptr = util.global.address @"__transpose_210_input" : !util.ptr<tensor<64x96x128xf32>>
@@ -82,7 +82,7 @@
util.global private @"__transpose_120_input" {noinline} = dense<1.0> : tensor<64x96x128xf32>
-func @transpose_120() -> tensor<96x128x64xf32> {
+func.func @transpose_120() -> tensor<96x128x64xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%input_ptr = util.global.address @"__transpose_120_input" : !util.ptr<tensor<64x96x128xf32>>
@@ -100,7 +100,7 @@
util.global private @"__transpose_102_input" {noinline} = dense<1.0> : tensor<64x96x128xf32>
-func @transpose_102() -> tensor<96x64x128xf32> {
+func.func @transpose_102() -> tensor<96x64x128xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%input_ptr = util.global.address @"__transpose_102_input" : !util.ptr<tensor<64x96x128xf32>>
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/bufferize-in-parallel.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/bufferize-in-parallel.mlir
index b8977fd..d3ff41f 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/bufferize-in-parallel.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/bufferize-in-parallel.mlir
@@ -1,10 +1,10 @@
// RUN: iree-dialects-opt %s --linalg-transform-interp --canonicalize | FileCheck %s
-// CHECK-LABEL: func @parallel_insert_slice_no_conflict(
+// CHECK-LABEL: func.func @parallel_insert_slice_no_conflict(
// CHECK-SAME: %[[idx:.*]]: index, %[[idx2:.*]]: index,
// CHECK-SAME: %[[arg1:.*]]: memref<?xf32, #{{.*}}>,
// CHECK-SAME: %[[arg2:.*]]: memref<?xf32, #{{.*}}>
-func @parallel_insert_slice_no_conflict(
+func.func @parallel_insert_slice_no_conflict(
%idx: index, %idx2: index,
%arg1: tensor<?xf32> {bufferization.writable=true},
%arg2: tensor<?xf32> {bufferization.writable=true}) -> (tensor<?xf32>, f32)
@@ -35,11 +35,11 @@
return %2, %f : tensor<?xf32>, f32
}
-// CHECK-LABEL: func @parallel_insert_slice_with_conflict(
+// CHECK-LABEL: func.func @parallel_insert_slice_with_conflict(
// CHECK-SAME: %[[idx:.*]]: index, %[[idx2:.*]]: index,
// CHECK-SAME: %[[arg1:.*]]: memref<?xf32, #{{.*}}>,
// CHECK-SAME: %[[arg2:.*]]: memref<?xf32, #{{.*}}>
-func @parallel_insert_slice_with_conflict(
+func.func @parallel_insert_slice_with_conflict(
%idx: index, %idx2: index,
%arg1: tensor<?xf32> {bufferization.writable=true},
%arg2: tensor<?xf32> {bufferization.writable=true}) -> (f32, f32)
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/canonicalize.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/canonicalize.mlir
index 5a6d816..115c5f6 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/canonicalize.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/canonicalize.mlir
@@ -1,7 +1,7 @@
// RUN: iree-dialects-opt --canonicalize --split-input-file %s | FileCheck %s
-// CHECK-LABEL: func @tensor.cast(
-func @tensor.cast(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
+// CHECK-LABEL: func.func @tensor.cast(
+func.func @tensor.cast(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
%init = linalg.init_tensor [3, 5] : tensor<3x5xi32>
%casted_arg0 = tensor.cast %arg0 : tensor<3x5xi32> to tensor<?x?xi32>
@@ -20,10 +20,10 @@
return %1: tensor<3x5xi32>
}
-// CHECK-LABEL: func @canonicalize_insert_slice_indices(
+// CHECK-LABEL: func.func @canonicalize_insert_slice_indices(
// CHECK-SAME: %[[arg0:.*]]: tensor<?x?xf32>, %[[arg1:.*]]: tensor<?x?xf32>,
// CHECK-SAME: %[[idx:.*]]: index
-func @canonicalize_insert_slice_indices(
+func.func @canonicalize_insert_slice_indices(
%arg0 : tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
%idx : index) -> tensor<?x?xf32>
{
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/convert_to_loops.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/convert_to_loops.mlir
index 1819165..0f36531 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/convert_to_loops.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/convert_to_loops.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --split-input-file --iree-linalg-ext-to-loops %s | FileCheck %s
-func @sort_1d(%arg0: memref<128xi32>) {
+func.func @sort_1d(%arg0: memref<128xi32>) {
iree_linalg_ext.sort dimension(0)
outs(%arg0 : memref<128xi32>) {
^bb0(%arg2: i32, %arg3: i32): // no predecessors
@@ -9,7 +9,7 @@
}
return
}
-// CHECK-LABEL: func @sort_1d
+// CHECK-LABEL: func.func @sort_1d
// CHECK-SAME: %[[BUF:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[C128:.+]] = arith.constant 128 : index
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -30,7 +30,7 @@
// -----
-func @sort_2d(%arg0: memref<16x32xi32>) {
+func.func @sort_2d(%arg0: memref<16x32xi32>) {
iree_linalg_ext.sort dimension(0)
outs(%arg0 : memref<16x32xi32>) {
^bb0(%arg2: i32, %arg3: i32): // no predecessors
@@ -39,7 +39,7 @@
}
return
}
-// CHECK-LABEL: func @sort_2d
+// CHECK-LABEL: func.func @sort_2d
// CHECK-SAME: %[[BUF:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[C16:.+]] = arith.constant 16 : index
// CHECK-DAG: %[[C32:.+]] = arith.constant 32 : index
@@ -62,7 +62,7 @@
// -----
-func @sort_multi(%arg0: memref<128xf32>, %arg1: memref<128xi32>) {
+func.func @sort_multi(%arg0: memref<128xf32>, %arg1: memref<128xi32>) {
iree_linalg_ext.sort
dimension(0)
outs(%arg0, %arg1 : memref<128xf32>, memref<128xi32>) {
@@ -72,7 +72,7 @@
}
return
}
-// CHECK-LABEL: func @sort_multi
+// CHECK-LABEL: func.func @sort_multi
// CHECK-SAME: %[[BUF1:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[BUF2:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[C128:.+]] = arith.constant 128 : index
@@ -98,7 +98,7 @@
// -----
-func @scatter_update_scalar_1D(
+func.func @scatter_update_scalar_1D(
%original: memref<8xi32>, %indices: memref<3x1xi32>,
%updates: memref<3xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -109,7 +109,7 @@
}
return
}
-// CHECK-LABEL: func @scatter_update_scalar_1D
+// CHECK-LABEL: func.func @scatter_update_scalar_1D
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]]
@@ -124,7 +124,7 @@
// -----
-func @scatter_add_scalar_2D(
+func.func @scatter_add_scalar_2D(
%original: memref<4x3xi32>, %indices: memref<3x2xi32>,
%updates: memref<3xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -136,7 +136,7 @@
}
return
}
-// CHECK-LABEL: func @scatter_add_scalar_2D
+// CHECK-LABEL: func.func @scatter_add_scalar_2D
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]]
@@ -155,7 +155,7 @@
// -----
-func @scatter_update_slice_2D(
+func.func @scatter_update_slice_2D(
%original: memref<4x3xi32>, %indices: memref<2x1xi32>,
%updates: memref<2x3xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -166,7 +166,7 @@
}
return
}
-// CHECK: func @scatter_update_slice_2D
+// CHECK: func.func @scatter_update_slice_2D
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]]
@@ -185,7 +185,7 @@
// -----
-func @scatter_add_scalar_1D(
+func.func @scatter_add_scalar_1D(
%original: memref<8xi32>, %indices: memref<3x1xi32>,
%updates: memref<3xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -197,7 +197,7 @@
}
return
}
-// CHECK-LABEL: func @scatter_add_scalar_1D
+// CHECK-LABEL: func.func @scatter_add_scalar_1D
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]]
@@ -214,7 +214,7 @@
// -----
-func @scatter_add_slice_2D(
+func.func @scatter_add_slice_2D(
%original: memref<4x3xi32>, %indices: memref<2x1xi32>,
%updates: memref<2x3xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -226,7 +226,7 @@
}
return
}
-// CHECK: func @scatter_add_slice_2D
+// CHECK: func.func @scatter_add_slice_2D
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]]
@@ -244,7 +244,7 @@
// -----
-func @scatter_update_scalar_dynamic_1D(
+func.func @scatter_update_scalar_dynamic_1D(
%original: memref<?xi32>, %indices: memref<?x1xi32>,
%updates: memref<?xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -255,7 +255,7 @@
}
return
}
-// CHECK-LABEL: func @scatter_update_scalar_dynamic_1D
+// CHECK-LABEL: func.func @scatter_update_scalar_dynamic_1D
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]]
@@ -270,7 +270,7 @@
// -----
-func @scatter_add_scalar_dynamic_2D(
+func.func @scatter_add_scalar_dynamic_2D(
%original: memref<?x?xi32>, %indices: memref<?x2xi32>,
%updates: memref<?xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -282,7 +282,7 @@
}
return
}
-// CHECK-LABEL: func @scatter_add_scalar_dynamic_2D
+// CHECK-LABEL: func.func @scatter_add_scalar_dynamic_2D
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]]
@@ -301,7 +301,7 @@
// -----
-func @scatter_update_slice_dynamic_2D(
+func.func @scatter_update_slice_dynamic_2D(
%original: memref<?x?xi32>, %indices: memref<?x1xi32>,
%updates: memref<?x?xi32>) {
iree_linalg_ext.scatter unique_indices(true)
@@ -312,7 +312,7 @@
}
return
}
-// CHECK: func @scatter_update_slice_dynamic_2D
+// CHECK: func.func @scatter_update_slice_dynamic_2D
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9]+]]
@@ -329,7 +329,7 @@
// -----
-func @scatter_partial_slices(%arg0: memref<2x64x12xf32>, %arg1: memref<2x3xi32>, %arg2: memref<2x1x12xf32>) {
+func.func @scatter_partial_slices(%arg0: memref<2x64x12xf32>, %arg1: memref<2x3xi32>, %arg2: memref<2x1x12xf32>) {
iree_linalg_ext.scatter
unique_indices(true)
ins(%arg2, %arg1 : memref<2x1x12xf32>, memref<2x3xi32>)
@@ -364,7 +364,7 @@
// -----
-func @fft_1D(%real: memref<16xf32>, %imag: memref<16xf32>) {
+func.func @fft_1D(%real: memref<16xf32>, %imag: memref<16xf32>) {
%stage = arith.constant 1 : index
iree_linalg_ext.fft
ins(%stage: index)
@@ -373,7 +373,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0) -> (d0)>
-// CHECK: func @fft_1D
+// CHECK: func.func @fft_1D
// CHECK-SAME: %[[REAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[IMAG:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -422,7 +422,7 @@
// -----
-func @fft_2D(%real: memref<?x16xf32>, %imag: memref<?x16xf32>) {
+func.func @fft_2D(%real: memref<?x16xf32>, %imag: memref<?x16xf32>) {
%stage = arith.constant 2 : index
iree_linalg_ext.fft
ins(%stage: index)
@@ -431,7 +431,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK: func @fft_2D(
+// CHECK: func.func @fft_2D(
// CHECK-SAME: %[[REAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[IMAG:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -456,7 +456,7 @@
// -----
-func @fft_2D_coef_buf(%real: memref<?x16xf32>, %imag: memref<?x16xf32>,
+func.func @fft_2D_coef_buf(%real: memref<?x16xf32>, %imag: memref<?x16xf32>,
%coef_real: memref<1xf32>, %coef_imag: memref<1xf32>) {
%stage = arith.constant 1 : index
iree_linalg_ext.fft
@@ -467,7 +467,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1) -> (d1)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK: func @fft_2D_coef_buf
+// CHECK: func.func @fft_2D_coef_buf
// CHECK-SAME: %[[REAL:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[IMAG:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[COEF_REAL:[a-zA-Z0-9]+]]
@@ -509,14 +509,14 @@
// -----
-func @reverse_dim_0(%arg0: memref<?x?xi32>, %arg1: memref<?x?xi32>) {
+func.func @reverse_dim_0(%arg0: memref<?x?xi32>, %arg1: memref<?x?xi32>) {
iree_linalg_ext.reverse
dimensions(dense<0> : tensor<1xi64>)
ins(%arg0 : memref<?x?xi32>)
outs(%arg1 : memref<?x?xi32>)
return
}
-// CHECK-LABEL: func @reverse_dim_0
+// CHECK-LABEL: func.func @reverse_dim_0
// CHECK-SAME: %[[IN:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[OUT:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -531,7 +531,7 @@
// CHECK: %[[V0:.+]] = memref.load %[[IN]][%[[I]], %[[J]]]
// CHECK: memref.store %[[V0]], %[[OUT]][%[[T2]], %[[J]]] : memref<?x?xi32>
-func @scan_1d_inclusive(%0: memref<128xi32>, %1: memref<128xi32>) {
+func.func @scan_1d_inclusive(%0: memref<128xi32>, %1: memref<128xi32>) {
%c0 = memref.alloc() : memref<i32>
iree_linalg_ext.scan dimension(0) inclusive(true)
ins(%0 : memref<128xi32>) outs(%1, %c0 : memref<128xi32>, memref<i32>) {
@@ -541,7 +541,7 @@
}
return
}
-// CHECK-LABEL: func @scan_1d_inclusive
+// CHECK-LABEL: func.func @scan_1d_inclusive
// CHECK-SAME: %[[BUFI:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[BUFO:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[C128:.+]] = arith.constant 128 : index
@@ -564,7 +564,7 @@
// -----
-func @scan_1d_exclusive(%0: memref<128xi32>, %1: memref<128xi32>) {
+func.func @scan_1d_exclusive(%0: memref<128xi32>, %1: memref<128xi32>) {
%c0 = memref.alloc() : memref<i32>
iree_linalg_ext.scan dimension(0) inclusive(false)
ins(%0 : memref<128xi32>) outs(%1, %c0 : memref<128xi32>, memref<i32>) {
@@ -574,7 +574,7 @@
}
return
}
-// CHECK-LABEL: func @scan_1d_exclusive
+// CHECK-LABEL: func.func @scan_1d_exclusive
// CHECK-SAME: %[[BUFI:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[BUFO:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[C128:.+]] = arith.constant 128 : index
@@ -597,7 +597,7 @@
// -----
-func @scan_2d(%0: memref<16x32xi32>, %1: memref<16x32xi32>) {
+func.func @scan_2d(%0: memref<16x32xi32>, %1: memref<16x32xi32>) {
%t0 = memref.alloc() : memref<32xi32>
iree_linalg_ext.scan dimension(0) inclusive(true)
ins(%0 : memref<16x32xi32>) outs(%1, %t0 : memref<16x32xi32>, memref<32xi32>) {
@@ -607,7 +607,7 @@
}
return
}
-// CHECK-LABEL: func @scan_2d
+// CHECK-LABEL: func.func @scan_2d
// CHECK-SAME: %[[BUFI:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[BUFO:[a-zA-Z0-9]+]]
// CHECK-DAG: %[[C16:.+]] = arith.constant 16 : index
@@ -632,7 +632,7 @@
// -----
-func @topk_memref(%input_values: memref<2x10xf32>, %input_indices: memref<2x10xi32>, %out_values: memref<2x3xf32>, %out_indices: memref<2x3xi32>) {
+func.func @topk_memref(%input_values: memref<2x10xf32>, %input_indices: memref<2x10xi32>, %out_values: memref<2x3xf32>, %out_indices: memref<2x3xi32>) {
iree_linalg_ext.topk
dimension(1)
ins(%input_values, %input_indices : memref<2x10xf32> , memref<2x10xi32>)
@@ -644,7 +644,7 @@
return
}
-// CHECK-LABEL: func @topk_memref
+// CHECK-LABEL: func.func @topk_memref
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]
@@ -677,7 +677,7 @@
// -----
-func @topk_memref_dynamic(%input_values: memref<?x?xf32>, %input_indices: memref<?x?xi32>, %out_values: memref<?x3xf32>, %out_indices: memref<?x3xi32>) {
+func.func @topk_memref_dynamic(%input_values: memref<?x?xf32>, %input_indices: memref<?x?xi32>, %out_values: memref<?x3xf32>, %out_indices: memref<?x3xi32>) {
iree_linalg_ext.topk
dimension(1)
ins(%input_values, %input_indices : memref<?x?xf32> , memref<?x?xi32>)
@@ -689,7 +689,7 @@
return
}
-// CHECK-LABEL: func @topk_memref_dynamic
+// CHECK-LABEL: func.func @topk_memref_dynamic
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/fuse-in-containing-op.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/fuse-in-containing-op.mlir
index 6d76046..478dc46 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/fuse-in-containing-op.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/fuse-in-containing-op.mlir
@@ -5,7 +5,7 @@
#map2 = affine_map<(d0)[s0] -> (-(d0 * s0) + 64, s0)>
module {
- // CHECK-LABEL: func @fuse_static
+ // CHECK-LABEL: func.func @fuse_static
// CHECK-SAME: %[[CHUNK_SIZE:[0-9a-z]+]]: index
// CHECK-SAME: %[[IN:[0-9a-z]+]]: tensor<64xf32>
// CHECK-SAME: %[[OUT:[0-9a-z]+]]: tensor<64xf32>
@@ -65,7 +65,7 @@
#map2 = affine_map<(d0)[s0, s1] -> (-(d0 * s1) + s0, s1)>
module {
- // CHECK-LABEL: func @fuse_dynamic
+ // CHECK-LABEL: func.func @fuse_dynamic
// CHECK-SAME: %[[CHUNK_SIZE:[0-9a-z]+]]: index
// CHECK-SAME: %[[IN:[0-9a-z]+]]: tensor<?xf32>
// CHECK-SAME: %[[OUT:[0-9a-z]+]]: tensor<?xf32>
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/fuse-operands.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/fuse-operands.mlir
index 3dc2bd9..14144eb 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/fuse-operands.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/fuse-operands.mlir
@@ -5,7 +5,7 @@
#map2 = affine_map<(d0)[s0] -> (-(d0 * s0) + 64, s0)>
module {
- // CHECK-LABEL: func @fuse_static
+ // CHECK-LABEL: func.func @fuse_static
// CHECK-SAME: %[[CHUNK_SIZE:[0-9a-z]+]]: index
// CHECK-SAME: %[[IN:[0-9a-z]+]]: tensor<64xf32>
// CHECK-SAME: %[[OUT:[0-9a-z]+]]: tensor<64xf32>
@@ -68,7 +68,7 @@
#map2 = affine_map<(d0)[s0, s1] -> (-(d0 * s1) + s0, s1)>
module {
- // CHECK-LABEL: func @fuse_dynamic
+ // CHECK-LABEL: func.func @fuse_dynamic
// CHECK-SAME: %[[CHUNK_SIZE:[0-9a-z]+]]: index
// CHECK-SAME: %[[IN:[0-9a-z]+]]: tensor<?xf32>
// CHECK-SAME: %[[OUT:[0-9a-z]+]]: tensor<?xf32>
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/in-parallel-to-async.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/in-parallel-to-async.mlir
index 436b0ba..9ba25a0 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/in-parallel-to-async.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/in-parallel-to-async.mlir
@@ -10,11 +10,11 @@
#map4 = affine_map<(d0) -> (d0)>
module {
- // CHECK-LABEL: func @static_tile
+ // CHECK-LABEL: func.func @static_tile
// CHECK-SAME: %[[CHUNK_SIZE:[0-9a-z]+]]: index
// CHECK-SAME: %[[IN:[0-9a-z]+]]: memref<?xf32>
// CHECK-SAME: %[[OUT:[0-9a-z]+]]: memref<?xf32>
- func @static_tile(%arg0: index, %arg1: memref<?xf32>, %arg2: memref<?xf32>) {
+ func.func @static_tile(%arg0: index, %arg1: memref<?xf32>, %arg2: memref<?xf32>) {
%cst = arith.constant 4.200000e+01 : f32
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/in-parallel-to-sequential-for.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/in-parallel-to-sequential-for.mlir
index a0cd121..a2a047e 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/in-parallel-to-sequential-for.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/in-parallel-to-sequential-for.mlir
@@ -11,11 +11,11 @@
module {
- // CHECK-LABEL: func @static_tile_tensors
+ // CHECK-LABEL: func.func @static_tile_tensors
// CHECK-SAME: %[[CHUNK_SIZE:[0-9a-z]+]]: index
// CHECK-SAME: %[[IN:[0-9a-z]+]]: tensor<?xf32>
// CHECK-SAME: %[[OUT:[0-9a-z]+]]: tensor<?xf32>
- func @static_tile_tensors(%arg0: index, %arg1: tensor<?xf32>, %arg2: tensor<?xf32>) -> tensor<?xf32> {
+ func.func @static_tile_tensors(%arg0: index, %arg1: tensor<?xf32>, %arg2: tensor<?xf32>) -> tensor<?xf32> {
%cst = arith.constant 4.200000e+01 : f32
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -50,11 +50,11 @@
return %2 : tensor<?xf32>
}
- // CHECK-LABEL: func @static_tile_buffers
+ // CHECK-LABEL: func.func @static_tile_buffers
// CHECK-SAME: %[[CHUNK_SIZE:[0-9a-z]+]]: index
// CHECK-SAME: %[[IN:[0-9a-z]+]]: memref<?xf32>
// CHECK-SAME: %[[OUT:[0-9a-z]+]]: memref<?xf32>
- func @static_tile_buffers(%arg0: index, %arg1: memref<?xf32>, %arg2: memref<?xf32>) {
+ func.func @static_tile_buffers(%arg0: index, %arg1: memref<?xf32>, %arg2: memref<?xf32>) {
%cst = arith.constant 4.200000e+01 : f32
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir
index e45ab16..f5652cd 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/invalid.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --split-input-file --verify-diagnostics %s
-func @sort_invalid_dimension(%arg0: tensor<128xi32>) -> tensor<128xi32> {
+func.func @sort_invalid_dimension(%arg0: tensor<128xi32>) -> tensor<128xi32> {
// expected-error @+1 {{dimension must be within (0, 1]}}
%0 = iree_linalg_ext.sort dimension(1)
outs(%arg0 : tensor<128xi32>) {
@@ -13,7 +13,7 @@
// -----
-func @sort_mismatch_rank(%arg0: tensor<?x?xi32>, %arg1: tensor<?xf32>)
+func.func @sort_mismatch_rank(%arg0: tensor<?x?xi32>, %arg1: tensor<?xf32>)
-> (tensor<?x?xi32>, tensor<?xf32>) {
// expected-error @+1 {{expected operand 1 to be rank 2, same as other operands}}
%0:2 = iree_linalg_ext.sort dimension(0)
@@ -27,7 +27,7 @@
// -----
-func @sort_mismatch_shape(%arg0: tensor<?xi32>, %arg1: tensor<42xf32>)
+func.func @sort_mismatch_shape(%arg0: tensor<?xi32>, %arg1: tensor<42xf32>)
-> (tensor<?xi32>, tensor<42xf32>) {
// expected-error @+1 {{expected operand 1 to have same shape as other operands}}
%0:2 = iree_linalg_ext.sort dimension(0)
@@ -41,7 +41,7 @@
// -----
-func @scatter_mixed_tensor_memref(
+func.func @scatter_mixed_tensor_memref(
%update : memref<?x?xf32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{expected inputs and outputs to be RankedTensorType or scalar}}
@@ -57,7 +57,7 @@
// -----
-func @scatter_mixed_tensor_memref(
+func.func @scatter_mixed_tensor_memref(
%update : tensor<?x?xf32>, %indices : memref<?x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{expected inputs and outputs to be RankedTensorType or scalar}}
@@ -73,7 +73,7 @@
// -----
-func @scatter_extra_outputs(
+func.func @scatter_extra_outputs(
%update : tensor<?x?xf32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xf32>) -> (tensor<?x?xf32>, tensor<?x?xf32>) {
// expected-error @+1 {{expected number of outputs to be same as the number of results}}
@@ -89,7 +89,7 @@
// -----
-func @scatter_mixed_tensor_memref(
+func.func @scatter_mixed_tensor_memref(
%update : tensor<?x?xf32>, %indices : tensor<?x1xi32>,
%original : memref<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{expected inputs and outputs to be RankedTensorType or scalar}}
@@ -105,7 +105,7 @@
// -----
-func @scatter_output_type_mismatch(
+func.func @scatter_output_type_mismatch(
%update : tensor<?x?xf32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xf32>) -> tensor<4x?xf32> {
// expected-error @+1 {{expected type of `outs` operand #0 'tensor<?x?xf32>' to be same as result type 'tensor<4x?xf32>'}}
@@ -121,7 +121,7 @@
// -----
-func @scatter_mixed_tensor_memref(
+func.func @scatter_mixed_tensor_memref(
%update : memref<?x?xf32>, %indices : tensor<?x1xi32>,
%original : memref<?x?xf32>) {
// expected-error @+1 {{expected inputs and outputs to be MemRefType or scalar}}
@@ -137,7 +137,7 @@
// -----
-func @scatter_mixed_tensor_memref(
+func.func @scatter_mixed_tensor_memref(
%update : memref<?x?xf32>, %indices : memref<?x1xi32>,
%original : tensor<?x?xf32>) {
// expected-error @+1 {{expected inputs and outputs to be MemRefType or scalar}}
@@ -153,7 +153,7 @@
// -----
-func @scatter_dim_mismatch(
+func.func @scatter_dim_mismatch(
%update : tensor<?x?xf32>, %indices : tensor<48x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{mismatch in shape of indices and update value at dim#0}}
@@ -169,7 +169,7 @@
// -----
-func @scatter_dim_mismatch(
+func.func @scatter_dim_mismatch(
%update : tensor<64x?xf32>, %indices : tensor<48x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{mismatch in shape of indices and update value at dim#0}}
@@ -185,7 +185,7 @@
// -----
-func @scatter_dim_mismatch(
+func.func @scatter_dim_mismatch(
%update : tensor<?x?x?x?xf32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{op update value rank exceeds the rank of the original value}}
@@ -201,7 +201,7 @@
// -----
-func @scatter_dim_mismatch(
+func.func @scatter_dim_mismatch(
%update : tensor<?x4xf32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{mismatch in shape of update value dim#1 and original value at dim#1}}
@@ -217,7 +217,7 @@
// -----
-func @scatter_region_type_mismatch(
+func.func @scatter_region_type_mismatch(
%update : tensor<?x?xi32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi32>) -> tensor<?x?xi32> {
// expected-error @+1 {{expected region to have scalar argument of integer or float types}}
@@ -234,7 +234,7 @@
// -----
-func @scatter_region_type_mismatch(
+func.func @scatter_region_type_mismatch(
%update : tensor<?x?xi32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi32>) -> tensor<?x?xi32> {
// expected-error @+1 {{mismatch in argument 0 of region 'i64' and element type of update value 'i32'}}
@@ -251,7 +251,7 @@
// -----
-func @scatter_region_type_mismatch(
+func.func @scatter_region_type_mismatch(
%update : tensor<?x?xi32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi32>) -> tensor<?x?xi32> {
// expected-error @+1 {{mismatch in argument 1 of region 'i64' and element type of original value 'i32'}}
@@ -268,7 +268,7 @@
// -----
-func @scatter_region_type_mismatch(
+func.func @scatter_region_type_mismatch(
%update : tensor<?x?xi32>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
// expected-error @+1 {{mismatch in region argument types 'i32' and 'i64'}}
@@ -285,7 +285,7 @@
// -----
-func @scatter_region_type_mismatch(
+func.func @scatter_region_type_mismatch(
%update : tensor<?x?xi64>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
// expected-error @+1 {{expected region to have two arguments}}
@@ -302,7 +302,7 @@
// -----
-func @scatter_yield_mismatch(
+func.func @scatter_yield_mismatch(
%update : tensor<?x?xi64>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
%0 = iree_linalg_ext.scatter unique_indices(true)
@@ -319,7 +319,7 @@
// -----
-func @scatter_yield_mismatch(
+func.func @scatter_yield_mismatch(
%update : tensor<?x?xi64>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
%0 = iree_linalg_ext.scatter unique_indices(true)
@@ -336,7 +336,7 @@
// -----
-func @scatter_index_depth_dynamic(
+func.func @scatter_index_depth_dynamic(
%update : tensor<?x?xi64>, %indices : tensor<?x?xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
// expected-error @+1 {{expected index depth is static}}
@@ -353,7 +353,7 @@
// -----
-func @scatter_original_rank_mismatch(
+func.func @scatter_original_rank_mismatch(
%update : tensor<?xi64>, %indices : tensor<?x1xi32>,
%original : tensor<?x?xi64>) -> tensor<?x?xi64> {
// expected-error @+1 {{op index depth and update value does not cover rank of original value}}
@@ -370,7 +370,7 @@
// -----
-func @reverse_diff_element_type(%arg0: tensor<3x5xi32>) -> tensor<3x5xf32> {
+func.func @reverse_diff_element_type(%arg0: tensor<3x5xi32>) -> tensor<3x5xf32> {
%init = linalg.init_tensor [3, 5] : tensor<3x5xf32>
// expected-error @+1 {{expected input/output element types to be identical}}
%0 = iree_linalg_ext.reverse
@@ -382,7 +382,7 @@
// -----
-func @reverse_diff_shape(%arg0: tensor<3x5xi32>) -> tensor<3x6xi32> {
+func.func @reverse_diff_shape(%arg0: tensor<3x5xi32>) -> tensor<3x6xi32> {
%init = linalg.init_tensor [3, 6] : tensor<3x6xi32>
// expected-error @+1 {{incompatible input/output shapes}}
%0 = iree_linalg_ext.reverse
@@ -394,7 +394,7 @@
// -----
-func @reverse_dup_dims(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
+func.func @reverse_dup_dims(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
%init = linalg.init_tensor [3, 5] : tensor<3x5xi32>
// expected-error @+1 {{expected dimensions numbers are all unique}}
%0 = iree_linalg_ext.reverse
@@ -406,7 +406,7 @@
// -----
-func @not_enough_results() -> () {
+func.func @not_enough_results() -> () {
%num_threads = arith.constant 100 : index
// expected-error@+1 {{'iree_linalg_ext.in_parallel' op produces 1 results, but its terminator yields 0 values}}
%result = iree_linalg_ext.in_parallel %num_threads -> tensor<100xf32> {
@@ -417,7 +417,7 @@
// -----
-func @too_many_results(%1 : tensor<1xf32>, %out : tensor<100xf32>) -> () {
+func.func @too_many_results(%1 : tensor<1xf32>, %out : tensor<100xf32>) -> () {
%num_threads = arith.constant 100 : index
// expected-error@+1 {{'iree_linalg_ext.in_parallel' op produces 1 results, but its terminator yields 2 values}}
%result = iree_linalg_ext.in_parallel %num_threads -> tensor<100xf32> {
@@ -434,7 +434,7 @@
// -----
-func @type_mismatch(%1 : tensor<1xf32>, %out : tensor<200xf32>) -> () {
+func.func @type_mismatch(%1 : tensor<1xf32>, %out : tensor<200xf32>) -> () {
%num_threads = arith.constant 100 : index
// expected-error@+1 {{'iree_linalg_ext.in_parallel' op type mismatch between 0th result of in_parallel ('tensor<200xf32>') and 0th result yielded by its terminator ('tensor<100xf32>')}}
%result = iree_linalg_ext.in_parallel %num_threads -> tensor<100xf32> {
@@ -449,7 +449,7 @@
// -----
-func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{expected two input operands}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -464,7 +464,7 @@
// -----
-func @topk_invalid(%input_values: tensor<2x10xi32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<2x10xi32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{expected input/output value types to be identical}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -479,7 +479,7 @@
// -----
-func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xf32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xf32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{expected input/output indices types to be int}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -494,7 +494,7 @@
// -----
-func @topk_invalid(%input_values: tensor<10x2x10xf32>, %input_indices: tensor<10x2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<10x2x10xf32>, %input_indices: tensor<10x2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{expected input/output to have the same rank}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -509,7 +509,7 @@
// -----
-func @topk_invalid(%input_values: tensor<3x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<3x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{input indices/values shape must match}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -524,7 +524,7 @@
// -----
-func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<3x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<3x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<2x10xf32>, %input_indices: tensor<2x10xi32>, %out_values : tensor<3x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<3x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{output indices/values shape must match}}
%0:2 = iree_linalg_ext.topk
dimension(1)
@@ -539,7 +539,7 @@
// -----
-func @topk_invalid(%input_values: tensor<3x10xf32>, %input_indices: tensor<3x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
+func.func @topk_invalid(%input_values: tensor<3x10xf32>, %input_indices: tensor<3x10xi32>, %out_values : tensor<2x3xf32>, %out_indices: tensor<2x3xi32>) -> (tensor<2x3xf32>, tensor<2x3xi32>) {
// expected-error@+1 {{incompatible input/output shapes}}
%0:2 = iree_linalg_ext.topk
dimension(1)
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/pad_contraction_to_block_size.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/pad_contraction_to_block_size.mlir
index 8cf3fa0..681a7d3 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/pad_contraction_to_block_size.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/pad_contraction_to_block_size.mlir
@@ -20,7 +20,7 @@
// CHECK: %[[VAL_15:.*]] = linalg.matmul ins(%[[VAL_16:.*]], %[[VAL_17:.*]] : tensor<256x512xf32>, tensor<512x1024xf32>) outs(%[[VAL_18:.*]] : tensor<256x1024xf32>) -> tensor<256x1024xf32>
// CHECK: %[[VAL_19:.*]] = tensor.extract_slice %[[VAL_15]][0, 0] [250, 1020] [1, 1] : tensor<256x1024xf32> to tensor<250x1020xf32>
// CHECK: return %[[VAL_19]] : tensor<250x1020xf32>
-func @pad_matmul_static(%arg0 : tensor<250x500xf32>, %arg1 : tensor<500x1020xf32>,
+func.func @pad_matmul_static(%arg0 : tensor<250x500xf32>, %arg1 : tensor<500x1020xf32>,
%arg2 : tensor<250x1020xf32>) -> tensor<250x1020xf32> {
%matmul = linalg.matmul
ins(%arg0, %arg1 : tensor<250x500xf32>, tensor<500x1020xf32>)
@@ -32,7 +32,7 @@
// CHECK-LABEL: @pad_matmul_noop
// CHECK-NOT: pad_tensor
// CHECK-NOT: extract_slice
-func @pad_matmul_noop(%arg0 : tensor<256x512xf32>, %arg1 : tensor<512x1024xf32>,
+func.func @pad_matmul_noop(%arg0 : tensor<256x512xf32>, %arg1 : tensor<512x1024xf32>,
%arg2 : tensor<256x1024xf32>) -> tensor<256x1024xf32> {
%matmul = linalg.matmul
ins(%arg0, %arg1 : tensor<256x512xf32>, tensor<512x1024xf32>)
@@ -60,7 +60,7 @@
// CHECK: %[[ORIG_DIM_VALUE:.*]] = tensor.dim %arg2, %[[DIM0]]
// CHECK: %[[RETURN:.*]] = tensor.extract_slice %[[PADDED_RESULT]][0, 0] {{\[}}%[[ORIG_DIM_VALUE]], 1024] [1, 1] : tensor<?x1024xf32> to tensor<?x1024xf32>
// CHECK: return %[[RETURN]] : tensor<?x1024xf32>
-func @pad_matmul_dynamic_row(%arg0 : tensor<?x512xf32>, %arg1 : tensor<512x1024xf32>,
+func.func @pad_matmul_dynamic_row(%arg0 : tensor<?x512xf32>, %arg1 : tensor<512x1024xf32>,
%arg2 : tensor<?x1024xf32>) -> tensor<?x1024xf32> {
%matmul = linalg.matmul
ins(%arg0, %arg1 : tensor<?x512xf32>, tensor<512x1024xf32>)
@@ -83,7 +83,7 @@
// CHECK: } : tensor<256x?xf32> to tensor<256x?xf32>
// Matmul:
// CHECK: %{{.*}} = linalg.matmul ins(%arg0, %[[RHS_PADDED]] : tensor<256x512xf32>, tensor<512x?xf32>) outs(%[[OUTPUT_PADDED]] : tensor<256x?xf32>) -> tensor<256x?xf32>
-func @pad_matmul_dynamic_col(%arg0 : tensor<256x512xf32>, %arg1 : tensor<512x?xf32>,
+func.func @pad_matmul_dynamic_col(%arg0 : tensor<256x512xf32>, %arg1 : tensor<512x?xf32>,
%arg2 : tensor<256x?xf32>) -> tensor<256x?xf32> {
%matmul = linalg.matmul
ins(%arg0, %arg1 : tensor<256x512xf32>, tensor<512x?xf32>)
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/pad_tiling.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/pad_tiling.mlir
index 5867d33..3aef994 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/pad_tiling.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/pad_tiling.mlir
@@ -3,7 +3,7 @@
// TODO: Re-enable when upstream tensor.pad op properly implements the tiling
// interface.
-func @pad_tensor(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index,
+func.func @pad_tensor(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index,
%arg3 : index, %arg4 : index, %arg5 : f32) -> tensor<?x?xf32> {
%0 = tensor.pad %arg0 low[%arg1, %arg2] high[%arg3, %arg4] {
^bb0(%arg6 : index, %arg7 : index):
@@ -13,7 +13,7 @@
return %0 : tensor<?x?xf32>
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1, s2] -> (s2 + s0 + s1)>
-// CHECK: func @pad_tensor
+// CHECK: func.func @pad_tensor
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: index
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: index
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir
index c2bd60a..e1006e4 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/roundtrip.mlir
@@ -1,11 +1,11 @@
// RUN: iree-dialects-opt --split-input-file %s | FileCheck %s
-// CHECK-LABEL: func @sort_tensor
+// CHECK-LABEL: func.func @sort_tensor
// CHECK: iree_linalg_ext.sort
// CHECK-SAME: dimension(0)
// CHECK-SAME: outs({{.*}})
// CHECK: iree_linalg_ext.yield
-func @sort_tensor(%arg0: tensor<128xi32>) -> tensor<128xi32> {
+func.func @sort_tensor(%arg0: tensor<128xi32>) -> tensor<128xi32> {
%0 = iree_linalg_ext.sort
dimension(0)
outs(%arg0 : tensor<128xi32>) {
@@ -18,12 +18,12 @@
// -----
-// CHECK-LABEL: func @sort_memref
+// CHECK-LABEL: func.func @sort_memref
// CHECK: iree_linalg_ext.sort
// CHECK-SAME: dimension(0)
// CHECK-SAME: outs({{.*}})
// CHECK: iree_linalg_ext.yield
-func @sort_memref(%arg0: memref<128xi32>) {
+func.func @sort_memref(%arg0: memref<128xi32>) {
iree_linalg_ext.sort dimension(0)
outs(%arg0 : memref<128xi32>) {
^bb0(%arg1: i32, %arg2: i32): // no predecessors
@@ -35,7 +35,7 @@
// -----
-func @sort_multi_result_tensor(
+func.func @sort_multi_result_tensor(
%arg0: tensor<?x?xi32>, %arg1: tensor<?x?xf32>)
-> (tensor<?x?xi32>, tensor<?x?xf32>) {
%0:2 = iree_linalg_ext.sort dimension(0)
@@ -46,7 +46,7 @@
} -> tensor<?x?xi32>, tensor<?x?xf32>
return %0#0, %0#1 : tensor<?x?xi32>, tensor<?x?xf32>
}
-// CHECK-LABEL: func @sort_multi_result_tensor
+// CHECK-LABEL: func.func @sort_multi_result_tensor
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xi32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<?x?xf32>
// CHECK: %[[RESULT:.+]]:2 = iree_linalg_ext.sort dimension(0)
@@ -55,7 +55,7 @@
// -----
-func @sort_multi_result_memref(
+func.func @sort_multi_result_memref(
%arg0: memref<?x?xi32>, %arg1: memref<?x?xf32>) {
iree_linalg_ext.sort dimension(0)
outs(%arg0, %arg1 : memref<?x?xi32>, memref<?x?xf32>) {
@@ -65,7 +65,7 @@
}
return
}
-// CHECK-LABEL: func @sort_multi_result_memref
+// CHECK-LABEL: func.func @sort_multi_result_memref
// CHECK-SAME: %[[ARG0:.+]]: memref<?x?xi32>
// CHECK-SAME: %[[ARG1:.+]]: memref<?x?xf32>
// CHECK: iree_linalg_ext.sort dimension(0)
@@ -73,7 +73,7 @@
// -----
-func @scatter_tensor_dynamic(
+func.func @scatter_tensor_dynamic(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update: tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -86,7 +86,7 @@
} -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
-// CHECK-LABEL: func @scatter_tensor_dynamic(
+// CHECK-LABEL: func.func @scatter_tensor_dynamic(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: tensor<?x1xi32>
// CHECK-SAME: %[[UPDATE:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
@@ -99,7 +99,7 @@
// -----
-func @scatter_repeated_tensor_dynamic(
+func.func @scatter_repeated_tensor_dynamic(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update: tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -112,7 +112,7 @@
} -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
-// CHECK-LABEL: func @scatter_repeated_tensor_dynamic(
+// CHECK-LABEL: func.func @scatter_repeated_tensor_dynamic(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: tensor<?x1xi32>
// CHECK-SAME: %[[UPDATE:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
@@ -125,7 +125,7 @@
// -----
-func @scatter_tensor_static(
+func.func @scatter_tensor_static(
%original: tensor<128x3xf32>, %indices: tensor<48x1xi32>,
%update: tensor<48x3xf32>) -> tensor<128x3xf32> {
%0 = iree_linalg_ext.scatter
@@ -138,7 +138,7 @@
} -> tensor<128x3xf32>
return %0 : tensor<128x3xf32>
}
-// CHECK-LABEL: func @scatter_tensor_static(
+// CHECK-LABEL: func.func @scatter_tensor_static(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: tensor<128x3xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: tensor<48x1xi32>
// CHECK-SAME: %[[UPDATE:[a-zA-Z0-9_]+]]: tensor<48x3xf32>
@@ -151,7 +151,7 @@
// -----
-func @scatter_tensor_multi_index_depth(
+func.func @scatter_tensor_multi_index_depth(
%original: tensor<1x128x3xf32>, %indices: tensor<48x2xi32>,
%update: tensor<48x3xf32>) -> tensor<1x128x3xf32> {
%0 = iree_linalg_ext.scatter
@@ -164,7 +164,7 @@
} -> tensor<1x128x3xf32>
return %0 : tensor<1x128x3xf32>
}
-// CHECK-LABEL: func @scatter_tensor_multi_index_depth(
+// CHECK-LABEL: func.func @scatter_tensor_multi_index_depth(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: tensor<1x128x3xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: tensor<48x2xi32>
// CHECK-SAME: %[[UPDATE:[a-zA-Z0-9_]+]]: tensor<48x3xf32>
@@ -177,7 +177,7 @@
// -----
-func @scatter_memref_dynamic(
+func.func @scatter_memref_dynamic(
%original: memref<?x?xf32>, %indices: memref<?x1xi32>,
%update: memref<?x?xf32>) {
iree_linalg_ext.scatter
@@ -190,7 +190,7 @@
}
return
}
-// CHECK-LABEL: func @scatter_memref_dynamic(
+// CHECK-LABEL: func.func @scatter_memref_dynamic(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: memref<?x?xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: memref<?x1xi32>
// CHECK-SAME: %[[UPDATE:[a-zA-Z0-9_]+]]: memref<?x?xf32>
@@ -203,7 +203,7 @@
// -----
-func @scatter_memref_static(
+func.func @scatter_memref_static(
%original: memref<128x3xf32>, %indices: memref<48x1xi32>,
%update: memref<48x3xf32>) {
iree_linalg_ext.scatter
@@ -216,7 +216,7 @@
}
return
}
-// CHECK-LABEL: func @scatter_memref_static(
+// CHECK-LABEL: func.func @scatter_memref_static(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: memref<128x3xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: memref<48x1xi32>
// CHECK-SAME: %[[UPDATE:[a-zA-Z0-9_]+]]: memref<48x3xf32>
@@ -229,7 +229,7 @@
// -----
-func @scatter_memref_multi_index_depth(
+func.func @scatter_memref_multi_index_depth(
%original: memref<1x128x3xf32>, %indices: memref<48x2xi32>,
%update: memref<48x3xf32>) {
iree_linalg_ext.scatter
@@ -242,7 +242,7 @@
}
return
}
-// CHECK-LABEL: func @scatter_memref_multi_index_depth(
+// CHECK-LABEL: func.func @scatter_memref_multi_index_depth(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: memref<1x128x3xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: memref<48x2xi32>
// CHECK-SAME: %[[UPDATE:[a-zA-Z0-9_]+]]: memref<48x3xf32>
@@ -255,7 +255,7 @@
// -----
-func @scatter_update_scalar_1D(
+func.func @scatter_update_scalar_1D(
%original: tensor<8xi32>, %indices: tensor<3x1xi32>,
%updates: tensor<3xi32>) -> tensor<8xi32> {
%0 = iree_linalg_ext.scatter
@@ -267,7 +267,7 @@
} -> tensor<8xi32>
return %0 : tensor<8xi32>
}
-// CHECK-LABEL: func @scatter_update_scalar_1D(
+// CHECK-LABEL: func.func @scatter_update_scalar_1D(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[UPDATE:[a-zA-Z0-9_]+]]
@@ -280,7 +280,7 @@
// -----
-func @scatter_update_scalar_2D(
+func.func @scatter_update_scalar_2D(
%original: tensor<4x3xi32>, %indices: tensor<3x2xi32>,
%updates: tensor<3xi32>) -> tensor<4x3xi32> {
%0 = iree_linalg_ext.scatter
@@ -292,7 +292,7 @@
} -> tensor<4x3xi32>
return %0 : tensor<4x3xi32>
}
-// CHECK-LABEL: func @scatter_update_scalar_2D(
+// CHECK-LABEL: func.func @scatter_update_scalar_2D(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[UPDATE:[a-zA-Z0-9_]+]]
@@ -305,7 +305,7 @@
// -----
-func @scatter_update_slice_2D(
+func.func @scatter_update_slice_2D(
%original: tensor<4x3xi32>, %indices: tensor<1x1xi32>,
%updates: tensor<1x3xi32>) -> tensor<4x3xi32> {
%0 = iree_linalg_ext.scatter
@@ -317,7 +317,7 @@
} -> tensor<4x3xi32>
return %0 : tensor<4x3xi32>
}
-// CHECK-LABEL: func @scatter_update_slice_2D(
+// CHECK-LABEL: func.func @scatter_update_slice_2D(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[UPDATE:[a-zA-Z0-9_]+]]
@@ -330,7 +330,7 @@
// -----
-func @fft_tensor(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>)
+func.func @fft_tensor(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>)
-> (tensor<1024xf32>, tensor<1024xf32>) {
%cst1 = arith.constant 1 : index
%0:2 = iree_linalg_ext.fft
@@ -339,7 +339,7 @@
: tensor<1024xf32>, tensor<1024xf32>
return %0#0, %0#1 : tensor<1024xf32>, tensor<1024xf32>
}
-// CHECK-LABEL: func @fft_tensor(
+// CHECK-LABEL: func.func @fft_tensor(
// CHECK-SAME: %[[REAL:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[IMAG:[a-zA-Z0-9_]+]]
// CHECK: %[[CST:.+]] = arith.constant 1 : index
@@ -351,14 +351,14 @@
// -----
-func @fft_memref(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>) {
+func.func @fft_memref(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>) {
%cst1 = arith.constant 1 : index
iree_linalg_ext.fft
ins(%cst1: index)
outs(%arg0, %arg1: memref<1024xf32>, memref<1024xf32>)
return
}
-// CHECK-LABEL: func @fft_memref(
+// CHECK-LABEL: func.func @fft_memref(
// CHECK-SAME: %[[REAL:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[IMAG:[a-zA-Z0-9_]+]]
// CHECK: %[[CST:.+]] = arith.constant 1 : index
@@ -369,7 +369,7 @@
// -----
-func @fft_tensor_coef(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
+func.func @fft_tensor_coef(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
%arg2: tensor<1xf32>, %arg3: tensor<1xf32>) -> (tensor<1024xf32>, tensor<1024xf32>) {
%cst1 = arith.constant 1 : index
%0:2 = iree_linalg_ext.fft
@@ -378,7 +378,7 @@
: tensor<1024xf32>, tensor<1024xf32>
return %0#0, %0#1 : tensor<1024xf32>, tensor<1024xf32>
}
-// CHECK-LABEL: func @fft_tensor_coef(
+// CHECK-LABEL: func.func @fft_tensor_coef(
// CHECK-SAME: %[[REAL:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[IMAG:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[COEF_REAL:[a-zA-Z0-9_]+]]
@@ -392,7 +392,7 @@
// -----
-func @fft_memref_coef(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>,
+func.func @fft_memref_coef(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>,
%arg2: memref<1xf32>, %arg3: memref<1xf32>) {
%cst1 = arith.constant 1 : index
iree_linalg_ext.fft
@@ -400,7 +400,7 @@
outs(%arg0, %arg1: memref<1024xf32>, memref<1024xf32>)
return
}
-// CHECK-LABEL: func @fft_memref_coef(
+// CHECK-LABEL: func.func @fft_memref_coef(
// CHECK-SAME: %[[REAL:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[IMAG:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[COEF_REAL:[a-zA-Z0-9_]+]]
@@ -414,7 +414,7 @@
// -----
// The size of coefficient tensor is 2^(stage-1).
-func @fft_tensor_coef_stage_5(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
+func.func @fft_tensor_coef_stage_5(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
%arg2: tensor<16xf32>, %arg3: tensor<16xf32>) -> (tensor<1024xf32>, tensor<1024xf32>) {
%cst1 = arith.constant 5 : index
%0:2 = iree_linalg_ext.fft
@@ -423,7 +423,7 @@
: tensor<1024xf32>, tensor<1024xf32>
return %0#0, %0#1 : tensor<1024xf32>, tensor<1024xf32>
}
-// CHECK-LABEL: func @fft_tensor_coef_stage_5(
+// CHECK-LABEL: func.func @fft_tensor_coef_stage_5(
// CHECK-SAME: %[[REAL:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[IMAG:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[COEF_REAL:[a-zA-Z0-9_]+]]
@@ -437,7 +437,7 @@
// -----
-func @reverse_tensor(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
+func.func @reverse_tensor(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
%init = linalg.init_tensor [3, 5] : tensor<3x5xi32>
%0 = iree_linalg_ext.reverse
dimensions(dense<0> : tensor<1xi64>)
@@ -445,7 +445,7 @@
outs(%init : tensor<3x5xi32>) : tensor<3x5xi32>
return %0 : tensor<3x5xi32>
}
-// CHECK-LABEL: func @reverse_tensor
+// CHECK-LABEL: func.func @reverse_tensor
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<3x5xi32>
// CHECK: %[[INIT:.+]] = linalg.init_tensor [3, 5]
// CHECK: %[[RESULT:.+]] = iree_linalg_ext.reverse
@@ -455,14 +455,14 @@
// -----
-func @reverse_memref(%arg0: memref<3x5xi32>, %arg1: memref<3x5xi32>) {
+func.func @reverse_memref(%arg0: memref<3x5xi32>, %arg1: memref<3x5xi32>) {
iree_linalg_ext.reverse
dimensions(dense<0> : tensor<1xi64>)
ins(%arg0 : memref<3x5xi32>)
outs(%arg1 : memref<3x5xi32>)
return
}
-// CHECK-LABEL: func @reverse_memref
+// CHECK-LABEL: func.func @reverse_memref
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: memref<3x5xi32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: memref<3x5xi32>
// CHECK: iree_linalg_ext.reverse
@@ -472,7 +472,7 @@
// -----
-func @reverse_dynamic_tensor(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
+func.func @reverse_dynamic_tensor(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xi32>
@@ -484,7 +484,7 @@
outs(%init : tensor<?x?xi32>) : tensor<?x?xi32>
return %0 : tensor<?x?xi32>
}
-// CHECK-LABEL: func @reverse_dynamic_tensor
+// CHECK-LABEL: func.func @reverse_dynamic_tensor
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xi32>
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -498,7 +498,7 @@
// -----
-func @reverse_static_dynamic_tensor(%arg0: tensor<3x5xi32>) -> tensor<?x?xi32> {
+func.func @reverse_static_dynamic_tensor(%arg0: tensor<3x5xi32>) -> tensor<?x?xi32> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<3x5xi32>
@@ -510,7 +510,7 @@
outs(%init : tensor<?x?xi32>) : tensor<?x?xi32>
return %0 : tensor<?x?xi32>
}
-// CHECK-LABEL: func @reverse_static_dynamic_tensor
+// CHECK-LABEL: func.func @reverse_static_dynamic_tensor
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<3x5xi32>
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -524,7 +524,7 @@
// -----
-func @reverse_multi_dims(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
+func.func @reverse_multi_dims(%arg0: tensor<3x5xi32>) -> tensor<3x5xi32> {
%init = linalg.init_tensor [3, 5] : tensor<3x5xi32>
%0 = iree_linalg_ext.reverse
dimensions(dense<[0, 1]> : tensor<2xi64>)
@@ -532,7 +532,7 @@
outs(%init : tensor<3x5xi32>) : tensor<3x5xi32>
return %0 : tensor<3x5xi32>
}
-// CHECK-LABEL: func @reverse_multi_dims
+// CHECK-LABEL: func.func @reverse_multi_dims
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<3x5xi32>
// CHECK: %[[INIT:.+]] = linalg.init_tensor [3, 5]
// CHECK: %[[RESULT:.+]] = iree_linalg_ext.reverse
@@ -542,7 +542,7 @@
// -----
-func @topk_tensor(%input_values: tensor<20x10x8x4xf32>, %input_indices: tensor<20x10x8x4xi32>) -> (tensor<20x10x3x4xf32>, tensor<20x10x3x4xi32>) {
+func.func @topk_tensor(%input_values: tensor<20x10x8x4xf32>, %input_indices: tensor<20x10x8x4xi32>) -> (tensor<20x10x3x4xf32>, tensor<20x10x3x4xi32>) {
%out_values = linalg.init_tensor [20, 10, 3, 4] : tensor<20x10x3x4xf32>
%out_indices = linalg.init_tensor [20, 10, 3, 4] : tensor<20x10x3x4xi32>
%0:2 = iree_linalg_ext.topk
@@ -556,7 +556,7 @@
return %0#0, %0#1 : tensor<20x10x3x4xf32>, tensor<20x10x3x4xi32>
}
-// CHECK-LABEL: func @topk_tensor
+// CHECK-LABEL: func.func @topk_tensor
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<20x10x8x4xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<20x10x8x4xi32>
// CHECK: %[[OUT_VALUES:.+]] = linalg.init_tensor [20, 10, 3, 4]
@@ -570,7 +570,7 @@
// -----
-func @topk_memref(%input_values: memref<4x10xf32>, %input_indices: memref<4x10xi32>, %out_values: memref<4x3xf32>, %out_indices: memref<4x3xi32>) {
+func.func @topk_memref(%input_values: memref<4x10xf32>, %input_indices: memref<4x10xi32>, %out_values: memref<4x3xf32>, %out_indices: memref<4x3xi32>) {
iree_linalg_ext.topk
dimension(1)
ins(%input_values, %input_indices : memref<4x10xf32> , memref<4x10xi32>)
@@ -581,7 +581,7 @@
}
return
}
-// CHECK-LABEL: func @topk_memref
+// CHECK-LABEL: func.func @topk_memref
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: memref<4x10xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: memref<4x10xi32>
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: memref<4x3xf32>
@@ -594,7 +594,7 @@
// -----
-func @topk_dynamic_tensor(%input_values: tensor<?x?xf32>, %input_indices: tensor<?x?xi32>, %out_values: tensor<?x?xf32>, %out_indices: tensor<?x?xi32>) -> (tensor<?x?xf32>, tensor<?x?xi32>) {
+func.func @topk_dynamic_tensor(%input_values: tensor<?x?xf32>, %input_indices: tensor<?x?xi32>, %out_values: tensor<?x?xf32>, %out_indices: tensor<?x?xi32>) -> (tensor<?x?xf32>, tensor<?x?xi32>) {
%0:2 = iree_linalg_ext.topk
dimension(1)
ins(%input_values, %input_indices : tensor<?x?xf32> , tensor<?x?xi32>)
@@ -605,7 +605,7 @@
} -> tensor<?x?xf32>, tensor<?x?xi32>
return %0#0, %0#1 : tensor<?x?xf32>, tensor<?x?xi32>
}
-// CHECK-LABEL: func @topk_dynamic_tensor
+// CHECK-LABEL: func.func @topk_dynamic_tensor
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xi32>
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: tensor<?x?xf32>
@@ -619,8 +619,8 @@
// -----
-// CHECK-LABEL: func @static_tile
-func @static_tile(%chunk_size: index, %in: tensor<?xf32>, %out: tensor<?xf32>, %out2: tensor<?xf32>) -> (tensor<?xf32>) {
+// CHECK-LABEL: func.func @static_tile
+func.func @static_tile(%chunk_size: index, %in: tensor<?xf32>, %out: tensor<?xf32>, %out2: tensor<?xf32>) -> (tensor<?xf32>) {
%c0 = arith.constant 0: index
//%d0 = tensor.dim %out, %c0: tensor<?xf32>
@@ -649,8 +649,8 @@
// -----
-// CHECK-LABEL: func @simple_example
-func @simple_example(%in: tensor<100xf32>, %out: tensor<100xf32>) -> (tensor<100xf32>) {
+// CHECK-LABEL: func.func @simple_example
+func.func @simple_example(%in: tensor<100xf32>, %out: tensor<100xf32>) -> (tensor<100xf32>) {
%num_threads = arith.constant 100 : index
%result = iree_linalg_ext.in_parallel %num_threads -> tensor<100xf32> {
^bb0(%thread_idx : index):
@@ -664,7 +664,7 @@
return %result : tensor<100xf32>
}
-func @no_terminator() -> () {
+func.func @no_terminator() -> () {
%num_threads = arith.constant 100 : index
iree_linalg_ext.in_parallel %num_threads -> () {
^bb0(%thread_idx : index):
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tile-to-in-parallel.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tile-to-in-parallel.mlir
index 2c0f80e..fe28db0 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tile-to-in-parallel.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tile-to-in-parallel.mlir
@@ -6,11 +6,11 @@
// CHECK-DAG: #[[$ID1_MAP:.*]] = affine_map<(d0) -> (d0)>
module {
- // CHECK-LABEL: func @static_tile
+ // CHECK-LABEL: func.func @static_tile
// CHECK-SAME: %[[CHUNK_SIZE:[0-9a-z]+]]: index
// CHECK-SAME: %[[IN:[0-9a-z]+]]: tensor<?xf32>
// CHECK-SAME: %[[OUT:[0-9a-z]+]]: tensor<?xf32>
- func @static_tile(%chunk_size: index, %in: tensor<?xf32>, %out: tensor<?xf32>) -> (tensor<?xf32>) {
+ func.func @static_tile(%chunk_size: index, %in: tensor<?xf32>, %out: tensor<?xf32>) -> (tensor<?xf32>) {
%c0 = arith.constant 0: index
// CHECK: %[[M:.*]] = tensor.dim %{{.*}}, %{{.*}} : tensor<?xf32>
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tile-to-sequential-for.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tile-to-sequential-for.mlir
index adb3cea..6a90914 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tile-to-sequential-for.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tile-to-sequential-for.mlir
@@ -4,10 +4,10 @@
// CHECK-DAG: #[[$ID1_MAP:.*]] = affine_map<(d0) -> (d0)>
module {
- // CHECK-LABEL: func @static_tile
+ // CHECK-LABEL: func.func @static_tile
// CHECK-SAME: %[[CHUNK_SIZE:[0-9a-z]+]]: index
// CHECK-SAME: %[[IN:[0-9a-z]+]]: tensor<?xf32>
- func @static_tile(%chunk_size: index, %in: tensor<?xf32>, %out: tensor<?xf32>, %out2: tensor<?xf32>) -> (tensor<?xf32>) {
+ func.func @static_tile(%chunk_size: index, %in: tensor<?xf32>, %out: tensor<?xf32>, %out2: tensor<?xf32>) -> (tensor<?xf32>) {
%c0 = arith.constant 0: index
// CHECK: %[[M:.*]] = tensor.dim %{{.*}}, %{{.*}} : tensor<?xf32>
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling-to-tile-op.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling-to-tile-op.mlir
index d320701..84fe089 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling-to-tile-op.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling-to-tile-op.mlir
@@ -6,7 +6,7 @@
// CHECK-SAME: %[[A:[0-9a-z]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[B:[0-9a-z]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[C:[0-9a-z]+]]: tensor<?x?xf32>
- func @matmul(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ func.func @matmul(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK: %[[C10:.*]] = arith.constant 10 : index
// CHECK: iree_linalg_ext.tile %[[C10]] outs(%[[C]]: tensor<?x?xf32>) -> (tensor<?x?xf32>) {
// CHECK: ^bb0(%[[OFF:.*]]: index, %[[SZ:.*]]: index, %[[C_ITER:.*]]: tensor<?x?xf32>):
@@ -45,7 +45,7 @@
// CHECK-SAME: %[[A:[0-9a-z]+]]: tensor<100x200xf32>
// CHECK-SAME: %[[B:[0-9a-z]+]]: tensor<200x300xf32>
// CHECK-SAME: %[[C:[0-9a-z]+]]: tensor<100x300xf32>
- func @matmul_static(%A: tensor<100x200xf32>, %B: tensor<200x300xf32>, %C: tensor<100x300xf32>) -> tensor<100x300xf32> {
+ func.func @matmul_static(%A: tensor<100x200xf32>, %B: tensor<200x300xf32>, %C: tensor<100x300xf32>) -> tensor<100x300xf32> {
// CHECK: %[[C10:.*]] = arith.constant 10 : index
// CHECK: iree_linalg_ext.tile %[[C10]] outs(%[[C]]: tensor<100x300xf32>) -> (tensor<100x300xf32>) {
// CHECK: ^bb0(%[[OFF:.*]]: index, %[[SZ:.*]]: index, %[[C_ITER:.*]]: tensor<?x?xf32>):
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir
index fe13e0b..4cb07d2 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/iree_linalg_ext/tiling.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --iree-linalg-ext-tile --split-input-file --verify-diagnostics %s | FileCheck %s
-func @scatter_tiling(
+func.func @scatter_tiling(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update : tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -16,7 +16,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (20, -d0 + s1)>
-// CHECK: func @scatter_tiling(
+// CHECK: func.func @scatter_tiling(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: tensor<?x1xi32>
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
@@ -52,7 +52,7 @@
// -----
-func @scatter_tiling_memref(
+func.func @scatter_tiling_memref(
%original: memref<?x?xf32>, %indices: memref<?x1xi32>,
%update : memref<?x?xf32>) {
iree_linalg_ext.scatter
@@ -68,7 +68,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (20, -d0 + s1)>
-// CHECK: func @scatter_tiling_memref(
+// CHECK: func.func @scatter_tiling_memref(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: memref<?x?xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: memref<?x1xi32>
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9_]+]]: memref<?x?xf32>
@@ -97,7 +97,7 @@
// -----
-func @scatter_tiling_distribution(
+func.func @scatter_tiling_distribution(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update : tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -113,7 +113,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 * 10)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
-// CHECK: func @scatter_tiling_distribution(
+// CHECK: func.func @scatter_tiling_distribution(
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: tensor<?x1xi32>
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
@@ -147,7 +147,7 @@
// -----
-func @scatter_no_tiling(
+func.func @scatter_no_tiling(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update : tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -161,7 +161,7 @@
} -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
-// CHECK: func @scatter_no_tiling
+// CHECK: func.func @scatter_no_tiling
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: tensor<?x1xi32>
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
@@ -174,7 +174,7 @@
// -----
-func @scatter_repeated_indices_tiling(
+func.func @scatter_repeated_indices_tiling(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update : tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = iree_linalg_ext.scatter
@@ -190,7 +190,7 @@
}
// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0)[s0, s1] -> (20, -d0 + s1)>
-// CHECK: func @scatter_repeated_indices_tiling
+// CHECK: func.func @scatter_repeated_indices_tiling
// CHECK-SAME: %[[ORIGINAL:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[INDICES:[a-zA-Z0-9_]+]]: tensor<?x1xi32>
// CHECK-SAME: %[[UPDATES:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
@@ -221,7 +221,7 @@
// -----
-func @scatter_repeated_indices_no_tiling(
+func.func @scatter_repeated_indices_no_tiling(
%original: tensor<?x?xf32>, %indices: tensor<?x1xi32>,
%update : tensor<?x?xf32>) -> tensor<?x?xf32> {
// expected-error @+1 {{unimplemented tiling of non-parallel loop iterator type}}
@@ -239,7 +239,7 @@
// -----
-func @sort_1d(%arg0: tensor<?xi32>) -> tensor<?xi32> {
+func.func @sort_1d(%arg0: tensor<?xi32>) -> tensor<?xi32> {
%0 = iree_linalg_ext.sort
{__internal_linalg_transform__ = "outer_reduce_input"}
dimension(0)
@@ -250,7 +250,7 @@
} -> tensor<?xi32>
return %0 : tensor<?xi32>
}
-// CHECK: func @sort_1d(
+// CHECK: func.func @sort_1d(
// CHECK-SAME: %[[OPERAND:.+]]: tensor<?xi32>
// CHECK: %[[RESULT:.+]] = iree_linalg_ext.sort
// CHECK-SAME: {__internal_linalg_transform__ = "outer_reduce_output"}
@@ -259,7 +259,7 @@
// -----
-func @sort_2d(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
+func.func @sort_2d(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
%0 = iree_linalg_ext.sort
{__internal_linalg_transform__ = "inner_reduce_input"}
dimension(1)
@@ -271,7 +271,7 @@
return %0 : tensor<?x?xi32>
}
// CHECK: #[[MAP:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
-// CHECK: func @sort_2d(
+// CHECK: func.func @sort_2d(
// CHECK-SAME: %[[OPERAND:.+]]: tensor<?x?xi32>
// CHECK-DAG: %[[TILESIZE:.+]] = arith.constant 10 : index
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -293,7 +293,7 @@
// -----
-func @sort_2d_inner_parallel(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
+func.func @sort_2d_inner_parallel(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
%0 = iree_linalg_ext.sort
{__internal_linalg_transform__ = "outer_reduce_input"}
dimension(0)
@@ -305,7 +305,7 @@
return %0 : tensor<?x?xi32>
}
// CHECK: #[[MAP:.+]] = affine_map<(d0)[s0, s1] -> (20, -d0 + s1)>
-// CHECK: func @sort_2d_inner_parallel(
+// CHECK: func.func @sort_2d_inner_parallel(
// CHECK-SAME: %[[OPERAND:.+]]: tensor<?x?xi32>
// CHECK-DAG: %[[TILESIZE:.+]] = arith.constant 20 : index
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -327,7 +327,7 @@
// -----
-func @sort_2d_multi_result(
+func.func @sort_2d_multi_result(
%arg0: tensor<?x?xi32>, %arg1: tensor<?x?xf32>)
-> (tensor<?x?xi32>, tensor<?x?xf32>) {
%0:2 = iree_linalg_ext.sort
@@ -341,7 +341,7 @@
return %0#0, %0#1 : tensor<?x?xi32>, tensor<?x?xf32>
}
// CHECK: #[[MAP:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
-// CHECK: func @sort_2d_multi_result(
+// CHECK: func.func @sort_2d_multi_result(
// CHECK-SAME: %[[OPERAND1:.+]]: tensor<?x?xi32>
// CHECK-SAME: %[[OPERAND2:.+]]: tensor<?x?xf32>
// CHECK-DAG: %[[TILESIZE:.+]] = arith.constant 10 : index
@@ -368,7 +368,7 @@
// -----
-func @sort_2d_multi_result_memref(
+func.func @sort_2d_multi_result_memref(
%arg0: memref<?x?xi32>, %arg1: memref<?x?xf32>) {
iree_linalg_ext.sort
{__internal_linalg_transform__ = "outer_reduce_input"}
@@ -381,7 +381,7 @@
return
}
// CHECK: #[[MAP:.+]] = affine_map<(d0)[s0, s1] -> (20, -d0 + s1)>
-// CHECK: func @sort_2d_multi_result_memref(
+// CHECK: func.func @sort_2d_multi_result_memref(
// CHECK-SAME: %[[OPERAND1:.+]]: memref<?x?xi32>
// CHECK-SAME: %[[OPERAND2:.+]]: memref<?x?xf32>
// CHECK-DAG: %[[TILESIZE:.+]] = arith.constant 20 : index
@@ -401,7 +401,7 @@
// -----
-func @sort_3d_multi_result_distribute(
+func.func @sort_3d_multi_result_distribute(
%arg0: tensor<?x?x?xi32>, %arg1 : tensor<?x?x?xf32>)
-> (tensor<?x?x?xi32>, tensor<?x?x?xf32>) {
%0, %1 = iree_linalg_ext.sort
@@ -418,7 +418,7 @@
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<()[s0] -> (s0 * 30)>
// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0)[s0, s1] -> (30, -d0 + s1)>
-// CHECK: func @sort_3d_multi_result_distribute(
+// CHECK: func.func @sort_3d_multi_result_distribute(
// CHECK-SAME: %[[OPERAND1:[a-zA-Z0-9_]+]]: tensor<?x?x?xi32>
// CHECK-SAME: %[[OPERAND2:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
// CHECK-DAG: %[[TILESIZE1:.+]] = arith.constant 10 : index
@@ -460,7 +460,7 @@
// -----
-func @sort_3d_multi_result_distribute_memref(
+func.func @sort_3d_multi_result_distribute_memref(
%arg0: memref<?x?x?xi32>, %arg1 : memref<?x?x?xf32>) {
iree_linalg_ext.sort
{__internal_linalg_transform__ = "distribute_input"}
@@ -476,7 +476,7 @@
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<()[s0] -> (s0 * 30)>
// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0)[s0, s1] -> (30, -d0 + s1)>
-// CHECK: func @sort_3d_multi_result_distribute_memref(
+// CHECK: func.func @sort_3d_multi_result_distribute_memref(
// CHECK-SAME: %[[OPERAND1:[a-zA-Z0-9_]+]]: memref<?x?x?xi32>
// CHECK-SAME: %[[OPERAND2:[a-zA-Z0-9_]+]]: memref<?x?x?xf32>
// CHECK-DAG: %[[TILESIZE1:.+]] = arith.constant 10 : index
@@ -509,7 +509,7 @@
// -----
-func @fft_1d_stage_5(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
+func.func @fft_1d_stage_5(%arg0: tensor<1024xf32>, %arg1: tensor<1024xf32>,
%arg2: tensor<16xf32>, %arg3: tensor<16xf32>) -> (tensor<1024xf32>, tensor<1024xf32>) {
%cst1 = arith.constant 5 : index
%0:2 = iree_linalg_ext.fft
@@ -520,7 +520,7 @@
return %0#0, %0#1 : tensor<1024xf32>, tensor<1024xf32>
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (32, -d0 + s1)>
-// CHECK: func @fft_1d_stage_5(
+// CHECK: func.func @fft_1d_stage_5(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[COEF_REAL:[a-zA-Z0-9_]+]]
@@ -546,7 +546,7 @@
// -----
-func @fft_2d_stage_5(%arg0: tensor<3x1024xf32>, %arg1: tensor<3x1024xf32>,
+func.func @fft_2d_stage_5(%arg0: tensor<3x1024xf32>, %arg1: tensor<3x1024xf32>,
%arg2: tensor<16xf32>, %arg3: tensor<16xf32>) -> (tensor<3x1024xf32>, tensor<3x1024xf32>) {
%cst1 = arith.constant 5 : index
%0:2 = iree_linalg_ext.fft
@@ -558,7 +558,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (32, -d0 + s1)>
-// CHECK: func @fft_2d_stage_5(
+// CHECK: func.func @fft_2d_stage_5(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[COEF_REAL:[a-zA-Z0-9_]+]]
@@ -588,7 +588,7 @@
// -----
-func @fft_1d_stage_5_memref(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>,
+func.func @fft_1d_stage_5_memref(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>,
%arg2: memref<16xf32>, %arg3: memref<16xf32>) {
%cst1 = arith.constant 5 : index
iree_linalg_ext.fft
@@ -599,7 +599,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (32, -d0 + s1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECK: func @fft_1d_stage_5_memref(
+// CHECK: func.func @fft_1d_stage_5_memref(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[COEF_REAL:[a-zA-Z0-9_]+]]
@@ -619,7 +619,7 @@
// -----
-func @reverse_memref(%arg0: memref<?xi32>, %arg1: memref<?xi32>) {
+func.func @reverse_memref(%arg0: memref<?xi32>, %arg1: memref<?xi32>) {
iree_linalg_ext.reverse
{__internal_linalg_transform__ = "tiling_input"}
dimensions(dense<0> : tensor<1xi64>)
@@ -630,7 +630,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<()[s0, s1, s2] -> (s0 - s1 - s2)>
-// CHECK: func @reverse_memref(
+// CHECK: func.func @reverse_memref(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
@@ -650,7 +650,7 @@
// -----
-func @reverse_tensor_multi_dim(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
+func.func @reverse_tensor_multi_dim(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xi32>
@@ -666,7 +666,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0, s1] -> (20, -d0 + s1)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<()[s0, s1, s2] -> (s0 - s1 - s2)>
-// CHECK: func @reverse_tensor_multi_dim(
+// CHECK: func.func @reverse_tensor_multi_dim(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -704,7 +704,7 @@
// -----
-func @scan_1d(%0: tensor<128xi32>) -> tensor<128xi32> {
+func.func @scan_1d(%0: tensor<128xi32>) -> tensor<128xi32> {
%c0 = linalg.init_tensor [] : tensor<i32>
%1 = linalg.init_tensor [128] : tensor<128xi32>
%2:2 = iree_linalg_ext.scan
@@ -717,7 +717,7 @@
} -> tensor<128xi32>, tensor<i32>
return %2#0 : tensor<128xi32>
}
-// CHECK: func @scan_1d(
+// CHECK: func.func @scan_1d(
// CHECK-SAME: %[[OPERAND:.+]]: tensor<128xi32>
// CHECK: %[[ACC:.+]] = linalg.init_tensor [] : tensor<i32>
// CHECK: %[[OUTPUT:.+]] = linalg.init_tensor [128] : tensor<128xi32>
@@ -729,7 +729,7 @@
// -----
-func @scan_2d(%0: tensor<16x32xi32>) -> tensor<16x32xi32> {
+func.func @scan_2d(%0: tensor<16x32xi32>) -> tensor<16x32xi32> {
%c0 = linalg.init_tensor [32] : tensor<32xi32>
%1 = linalg.init_tensor [16, 32] : tensor<16x32xi32>
%2:2 = iree_linalg_ext.scan
@@ -743,7 +743,7 @@
return %2#0 : tensor<16x32xi32>
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (20, -d0 + s1)>
-// CHECK: func @scan_2d(
+// CHECK: func.func @scan_2d(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[C16:.+]] = arith.constant 16 : index
@@ -771,7 +771,7 @@
// -----
-func @scan_2d_memref(%0: memref<16x32xi32>, %1: memref<16x32xi32>) {
+func.func @scan_2d_memref(%0: memref<16x32xi32>, %1: memref<16x32xi32>) {
%c0 = memref.alloc() : memref<32xi32>
iree_linalg_ext.scan
{__internal_linalg_transform__ = "outer_reduce_input"}
@@ -785,7 +785,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (20, -d0 + s1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1)[s0] -> (d0 * 32 + s0 + d1)>
-// CHECK: func @scan_2d_memref(
+// CHECK: func.func @scan_2d_memref(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]
// CHECK: %[[C0:.+]] = arith.constant 0 : index
@@ -807,7 +807,7 @@
// -----
-func @topk_tile_tensor(%input_values: tensor<?x?xf32>, %input_indices: tensor<?x?xi32>, %out_values: tensor<?x3xf32> , %out_indices: tensor<?x3xi32>) -> (tensor<?x3xf32>, tensor<?x3xi32>) {
+func.func @topk_tile_tensor(%input_values: tensor<?x?xf32>, %input_indices: tensor<?x?xi32>, %out_values: tensor<?x3xf32> , %out_indices: tensor<?x3xi32>) -> (tensor<?x3xf32>, tensor<?x3xi32>) {
%0:2 = iree_linalg_ext.topk
{__internal_linalg_transform__ = "inner_reduce_input"}
dimension(1)
@@ -821,7 +821,7 @@
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
-// CHECK-LABEL: func @topk_tile_tensor
+// CHECK-LABEL: func.func @topk_tile_tensor
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]
@@ -849,7 +849,7 @@
// -----
-func @topk_tile_memref(%input_values: memref<?x?xf32>, %input_indices: memref<?x?xi32>, %out_values: memref<?x3xf32>, %out_indices: memref<?x3xi32>) {
+func.func @topk_tile_memref(%input_values: memref<?x?xf32>, %input_indices: memref<?x?xi32>, %out_values: memref<?x3xf32>, %out_indices: memref<?x3xi32>) {
iree_linalg_ext.topk
{__internal_linalg_transform__ = "inner_reduce_input"}
dimension(1)
@@ -865,7 +865,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0, s1] -> (10, -d0 + s1)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1)[s0] -> (d0 * 3 + s0 + d1)>
-// CHECK-LABEL: func @topk_tile_memref
+// CHECK-LABEL: func.func @topk_tile_memref
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir
index f0ecdc2..5f23d0b 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/bufferize.mlir
@@ -1,11 +1,11 @@
// RUN: iree-dialects-opt --linalg-transform-interp %s | FileCheck %s
-// CHECK-LABEL: func @matmul_tensors(
+// CHECK-LABEL: func.func @matmul_tensors(
// CHECK-SAME: %[[TA:[0-9a-z]+]]: memref<128x128xf32
// CHECK-SAME: %[[TB:[0-9a-z]+]]: memref<128x128xf32
// CHECK-SAME: %[[TC:[0-9a-z]+]]: memref<128x128xf32
// CHECK-NOT: -> tensor
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
// CHECK: linalg.matmul ins(%[[TA]], %[[TB]] : memref{{.*}}, memref{{.*}} outs(%[[TC]] : memref{{.*}})
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir
index b043203..77022e6 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/double-tiling.mlir
@@ -2,8 +2,8 @@
// This test is verifying that a non-trivial 2*tiling+padding+vectorization transformation completes successfully
-// CHECK-LABEL: func @matmul_tensors(
-func @matmul_tensors(
+// CHECK-LABEL: func.func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
// Pack transposed padding of 1st operand.
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir
index 55366f5..eb464fb 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/drop-schedule.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --linalg-drop-schedule %s | FileCheck %s
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
%0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/expert.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/expert.mlir
index 5febba9..a191979 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/expert.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/expert.mlir
@@ -2,10 +2,10 @@
// _UN: iree-dialects-opt --linalg-transform-expert-expansion --linalg-interp-transforms --split-input-file %s | FileCheck %s
// RUN: true
-// CHECK-LABEL: func @matmul_tensors
+// CHECK-LABEL: func.func @matmul_tensors
// CHECK-NOT: linalg
// CHECK: llvm
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
%0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
@@ -85,10 +85,10 @@
// -----
-// CHECK-LABEL: func @matmul_tensors2
+// CHECK-LABEL: func.func @matmul_tensors2
// CHECK-NOT: linalg
// CHECK: llvm
-func @matmul_tensors2(
+func.func @matmul_tensors2(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
%0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/failure.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/failure.mlir
index 6d68b84..4ec045a 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/failure.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/failure.mlir
@@ -2,7 +2,7 @@
// This cannot be vectorized because of dynamic tensor shapes. We expect the
// pass fail and report an error at the vectorization operation below.
-func public @non_vectorizable(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
+func.func public @non_vectorizable(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%0 = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
iterator_types = ["parallel"]}
@@ -33,7 +33,7 @@
// -----
-func public @no_loop(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
+func.func public @no_loop(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%0 = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
iterator_types = ["parallel"]}
@@ -64,11 +64,11 @@
// -----
-func private @prevent_dce()
+func.func private @prevent_dce()
-func public @loop(%lb: index, %ub: index, %step: index) {
+func.func public @loop(%lb: index, %ub: index, %step: index) {
scf.for %i = %lb to %ub step %step {
- call @prevent_dce() : () -> ()
+ func.call @prevent_dce() : () -> ()
}
return
}
@@ -93,7 +93,7 @@
// -----
-func public @no_outlining() {
+func.func public @no_outlining() {
"some.operation"() ({}, {}) : () -> ()
return
}
@@ -116,7 +116,7 @@
// -----
-func @no_replacement(
+func.func @no_replacement(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
%arg2: tensor<128x128xf32> {linalg.inplaceable = true})
-> tensor<128x128xf32> {
@@ -151,7 +151,7 @@
// -----
-func @repeated_match(
+func.func @repeated_match(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
%arg2: tensor<128x128xf32> {linalg.inplaceable = true})
-> tensor<128x128xf32> {
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse-and-peel.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse-and-peel.mlir
index 1d8fdf2..8e38f7f 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse-and-peel.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse-and-peel.mlir
@@ -1,7 +1,7 @@
// RUN: iree-dialects-opt --linalg-transform-interp %s | FileCheck %s
-// CHECK-LABEL: func @fuse_unary
-func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+// CHECK-LABEL: func.func @fuse_unary
+func.func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK: scf.for
// CHECK: scf.for
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse.mlir
index 8af4353..036bc46 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/fuse.mlir
@@ -1,8 +1,8 @@
// RUN: iree-dialects-opt --linalg-transform-interp %s | FileCheck %s
-// CHECK-LABEL: func @fuse_unary
-func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+// CHECK-LABEL: func.func @fuse_unary
+func.func @fuse_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK: scf.for
// CHECK: scf.for
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/generalize.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/generalize.mlir
index d4d50cc..1ede861 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/generalize.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/generalize.mlir
@@ -1,8 +1,8 @@
// RUN: iree-dialects-opt --linalg-transform-interp %s | FileCheck %s
-// CHECK-LABEL: func @generalize_unary
-func @generalize_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+// CHECK-LABEL: func.func @generalize_unary
+func.func @generalize_unary(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK-NOT: linalg.elemwise_unary
// CHECK: linalg.generic
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/interchange.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/interchange.mlir
index 71eb65a..47e43b4 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/interchange.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/interchange.mlir
@@ -2,8 +2,8 @@
// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1) -> (d1, d0)>
-// CHECK-LABEL: func @interchange_generic
-func @interchange_generic(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+// CHECK-LABEL: func.func @interchange_generic
+func.func @interchange_generic(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[$MAP]], #[[$MAP]]
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/pad.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/pad.mlir
index 8132f04..72799fa 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/pad.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/pad.mlir
@@ -2,8 +2,8 @@
#map = affine_map<()[s0] -> (-s0 + 12, 5)>
-// CHECK-LABEL: func @pad_unary
-func @pad_unary(%arg0: tensor<24x12xf32>,
+// CHECK-LABEL: func.func @pad_unary
+func.func @pad_unary(%arg0: tensor<24x12xf32>,
%arg1: tensor<24x12xf32>) -> tensor<24x12xf32> {
// CHECK: %[[C0:.*]] = arith.constant 0 : index
%c0 = arith.constant 0 : index
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/peel.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/peel.mlir
index 4487894..7075686 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/peel.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/peel.mlir
@@ -3,7 +3,7 @@
// CHECK-DAG: #[[MAP0:.*]] = affine_map<()[s0, s1, s2] -> (s1 - (-s0 + s1) mod s2)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0)[s0] -> (-d0 + s0)>
-// CHECK: func @fully_dynamic_bounds(
+// CHECK: func.func @fully_dynamic_bounds(
// CHECK-SAME: %[[LB:.*]]: index, %[[UB:.*]]: index, %[[STEP:.*]]: index
// CHECK: %[[C0_I32:.*]] = arith.constant 0 : i32
// CHECK: %[[NEW_UB:.*]] = affine.apply #[[MAP0]]()[%[[LB]], %[[UB]], %[[STEP]]]
@@ -22,7 +22,7 @@
// CHECK: }
// CHECK: return %[[RESULT]]
#map = affine_map<(d0, d1)[s0] -> (s0, d0 - d1)>
-func @fully_dynamic_bounds(%lb : index, %ub: index, %step: index) -> i32 {
+func.func @fully_dynamic_bounds(%lb : index, %ub: index, %step: index) -> i32 {
%c0 = arith.constant 0 : i32
%r = scf.for %iv = %lb to %ub step %step iter_args(%arg = %c0) -> i32 {
%s = affine.min #map(%ub, %iv)[%step]
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scalarize.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scalarize.mlir
index 0faa579..07b17dd 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scalarize.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scalarize.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --linalg-transform-interp %s | FileCheck %s
-func @fun_to_benchmark(%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>) ->
+func.func @fun_to_benchmark(%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>) ->
tensor<128x128xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
// With scalarization we expect vectorization to still work albeit with a leading
// `1` dimension.
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scoped.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scoped.mlir
index f2f837e..7e66f09 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scoped.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/scoped.mlir
@@ -3,7 +3,7 @@
// WRAP-LABEL: @test_wrap
// WRAP-SAME: (%[[ARG0:.*]]: i32) -> i32
-func @test_wrap(%arg0: i32) -> i32 {
+func.func @test_wrap(%arg0: i32) -> i32 {
// WRAP: %[[V:.*]] = iree_linalg_transform.util.scope(%[[ARG0]], %[[ARG0]]) {
// WRAP-NEXT: ^[[B:.*]](%[[ARG1:.*]]: i32, %[[ARG2:.*]]: i32):
// WRAP-NEXT: %[[ADD:.*]] = arith.addi %[[ARG2]], %[[ARG2]]
@@ -16,7 +16,7 @@
// UNWRAP-LABEL: @test_unwrap
// UNWRAP-SAME: (%[[ARG0:.*]]: i32) -> (i32, i32)
-func @test_unwrap(%arg0: i32) -> (i32, i32) {
+func.func @test_unwrap(%arg0: i32) -> (i32, i32) {
// UNWRAP: %[[V0:.*]] = arith.addi %[[ARG0]], %[[ARG0]]
// UNWRAP-NEXT: %[[V1:.*]] = arith.addi %[[V0]], %[[ARG0]]
%0:2 = iree_linalg_transform.util.scope(%arg0) {
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir
index 7e468bb..31d950e 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/selective-targeting.mlir
@@ -1,7 +1,7 @@
// RUN: iree-dialects-opt %s --linalg-transform-interp --split-input-file | FileCheck %s
-// CHECK-LABEL: func @matmul_tensors(
-func @matmul_tensors(
+// CHECK-LABEL: func.func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>,
%arg3: tensor<128x128xf32>, %arg4: tensor<128x128xf32>, %arg5: tensor<128x128xf32>,
%arg6: tensor<128x128xf32> {linalg.inplaceable = true})
@@ -84,7 +84,7 @@
// -----
// CHECK-LABEL: @vectorize_one
-func @vectorize_one(
+func.func @vectorize_one(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>,
%arg3: tensor<128x128xf32> {linalg.inplaceable = true})
-> tensor<128x128xf32> {
@@ -123,7 +123,7 @@
// -----
// CHECK-LABEL: @vectorize_all
-func @vectorize_all(
+func.func @vectorize_all(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>,
%arg3: tensor<128x128xf32> {linalg.inplaceable = true})
-> tensor<128x128xf32> {
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir
index 5f067a0..4099c29 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/single-tiling-full-script.mlir
@@ -3,7 +3,7 @@
// CHECK-LABEL: func @matmul_tensors
// CHECK-NOT: linalg
// CHECK: llvm
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
%0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-and-peel.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-and-peel.mlir
index 89fd033..bb6aead 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-and-peel.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-and-peel.mlir
@@ -1,7 +1,7 @@
// RUN: iree-dialects-opt --linalg-transform-interp %s | FileCheck %s
-// CHECK-LABEL: func @matmul_tensors(
-func @matmul_tensors(
+// CHECK-LABEL: func.func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<126x127xf32>, %arg1: tensor<127x128xf32>, %arg2: tensor<126x128xf32> { linalg.inplaceable = true})
-> tensor<126x128xf32> {
// CHECK-DAG: %[[c124:.*]] = arith.constant 124 : index
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir
index 35371f5..1b1d0b3 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile-interchange.mlir
@@ -9,7 +9,7 @@
// CHECK-LABEL: @matmul_021
// CHECK-NOT: linalg.generic
// CHECK: vector.contract
-func public @matmul_021(%arg0: tensor<39x154xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<154x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<39x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<39x5xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
+func.func public @matmul_021(%arg0: tensor<39x154xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<154x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<39x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<39x5xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
%0 = linalg.generic {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<39x154xf32>, tensor<154x5xf32>) outs(%arg2 : tensor<39x5xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%1 = arith.mulf %arg3, %arg4 : f32
@@ -50,7 +50,7 @@
// CHECK-LABEL: @matmul_210
// CHECK-NOT: linalg.generic
// CHECK: vector.contract
-func public @matmul_210(%arg0: tensor<39x154xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<154x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<39x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<39x5xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
+func.func public @matmul_210(%arg0: tensor<39x154xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<154x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<39x5xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) -> tensor<39x5xf32> attributes {passthrough = ["noinline", ["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]} {
%0 = linalg.generic {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<39x154xf32>, tensor<154x5xf32>) outs(%arg2 : tensor<39x5xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%1 = arith.mulf %arg3, %arg4 : f32
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile.mlir
index dd3b720..3b10917 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/tile.mlir
@@ -1,11 +1,11 @@
// RUN: iree-dialects-opt --linalg-transform-interp %s | FileCheck %s
-// CHECK-LABEL: func @matmul_tensors(
+// CHECK-LABEL: func.func @matmul_tensors(
// CHECK-SAME: %[[TA:[0-9a-z]+]]: tensor<128x128xf32>
// CHECK-SAME: %[[TB:[0-9a-z]+]]: tensor<128x128xf32>
// CHECK-SAME: %[[TC:[0-9a-z]+]]: tensor<128x128xf32>
// CHECK-SAME: -> tensor<128x128xf32> {
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
// CHECK: %[[TD0:.*]] = scf.for {{.*}} to {{.*}} step {{.*}} iter_args(%[[TC0:.*]] = %[[TC]]) -> (tensor<128x128xf32>) {
diff --git a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir
index d298256..ef7520f 100644
--- a/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir
+++ b/llvm-external-projects/iree-dialects/test/Dialect/linalg_transform/vectorize.mlir
@@ -1,11 +1,11 @@
// RUN: iree-dialects-opt --linalg-transform-interp --linalg-transform-file-name=%p/vectorize-transforms.mlir %s | FileCheck %s
-// CHECK-LABEL: func @matmul_tensors(
+// CHECK-LABEL: func.func @matmul_tensors(
// CHECK-SAME: %[[TA:[0-9a-z]+]]: tensor<128x128xf32>
// CHECK-SAME: %[[TB:[0-9a-z]+]]: tensor<128x128xf32>
// CHECK-SAME: %[[TC:[0-9a-z]+]]: tensor<128x128xf32>
// CHECK-SAME: -> tensor<128x128xf32> {
-func @matmul_tensors(
+func.func @matmul_tensors(
%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32> { linalg.inplaceable = true})
-> tensor<128x128xf32> {
// CHECK: %[[VA:.*]] = vector.transfer_read %[[TA]]
diff --git a/llvm-external-projects/iree-dialects/test/Transforms/test-listener-canonicalize.mlir b/llvm-external-projects/iree-dialects/test/Transforms/test-listener-canonicalize.mlir
index 9de08af..40d7cda 100644
--- a/llvm-external-projects/iree-dialects/test/Transforms/test-listener-canonicalize.mlir
+++ b/llvm-external-projects/iree-dialects/test/Transforms/test-listener-canonicalize.mlir
@@ -6,7 +6,7 @@
// CHECK-LABEL: @select_same_val
// CHECK: return %arg1
-func @select_same_val(%arg0: i1, %arg1: i64) -> i64 {
+func.func @select_same_val(%arg0: i1, %arg1: i64) -> i64 {
%0 = arith.select %arg0, %arg1, %arg1 : i64
return %0 : i64
}
@@ -15,7 +15,7 @@
// CHECK-LABEL: @select_cmp_eq_select
// CHECK: return %arg1
-func @select_cmp_eq_select(%arg0: i64, %arg1: i64) -> i64 {
+func.func @select_cmp_eq_select(%arg0: i64, %arg1: i64) -> i64 {
%0 = arith.cmpi eq, %arg0, %arg1 : i64
%1 = arith.select %0, %arg0, %arg1 : i64
return %1 : i64
@@ -25,7 +25,7 @@
// CHECK-LABEL: @select_cmp_ne_select
// CHECK: return %arg0
-func @select_cmp_ne_select(%arg0: i64, %arg1: i64) -> i64 {
+func.func @select_cmp_ne_select(%arg0: i64, %arg1: i64) -> i64 {
%0 = arith.cmpi ne, %arg0, %arg1 : i64
%1 = arith.select %0, %arg0, %arg1 : i64
return %1 : i64
@@ -36,7 +36,7 @@
// CHECK-LABEL: @select_extui
// CHECK: %[[res:.+]] = arith.extui %arg0 : i1 to i64
// CHECK: return %[[res]]
-func @select_extui(%arg0: i1) -> i64 {
+func.func @select_extui(%arg0: i1) -> i64 {
%c0_i64 = arith.constant 0 : i64
%c1_i64 = arith.constant 1 : i64
%res = arith.select %arg0, %c1_i64, %c0_i64 : i64
@@ -48,7 +48,7 @@
// CHECK-DAG: %[[xor:.+]] = arith.xori %arg0, %true : i1
// CHECK-DAG: %[[res:.+]] = arith.extui %[[xor]] : i1 to i64
// CHECK: return %[[res]]
-func @select_extui2(%arg0: i1) -> i64 {
+func.func @select_extui2(%arg0: i1) -> i64 {
%c0_i64 = arith.constant 0 : i64
%c1_i64 = arith.constant 1 : i64
%res = arith.select %arg0, %c0_i64, %c1_i64 : i64
@@ -59,7 +59,7 @@
// CHECK-LABEL: @select_extui_i1
// CHECK-NEXT: return %arg0
-func @select_extui_i1(%arg0: i1) -> i1 {
+func.func @select_extui_i1(%arg0: i1) -> i1 {
%c0_i1 = arith.constant false
%c1_i1 = arith.constant true
%res = arith.select %arg0, %c1_i1, %c0_i1 : i1
@@ -73,7 +73,7 @@
// CHECK: %[[falseval:.+]] = arith.constant false
// CHECK: "test.consumer1"(%[[trueval]]) : (i1) -> ()
// CHECK: "test.consumer2"(%[[falseval]]) : (i1) -> ()
-func @branchCondProp(%arg0: i1) {
+func.func @branchCondProp(%arg0: i1) {
cf.cond_br %arg0, ^trueB, ^falseB
^trueB:
@@ -93,7 +93,7 @@
// CHECK-LABEL: @selToNot
// CHECK: %[[trueval:.+]] = arith.constant true
// CHECK: %{{.+}} = arith.xori %arg0, %[[trueval]] : i1
-func @selToNot(%arg0: i1) -> i1 {
+func.func @selToNot(%arg0: i1) -> i1 {
%true = arith.constant true
%false = arith.constant false
%res = arith.select %arg0, %false, %true : i1
diff --git a/llvm-external-projects/iree-dialects/test/Transforms/test-listener-cse.mlir b/llvm-external-projects/iree-dialects/test/Transforms/test-listener-cse.mlir
index 434add5..a272968 100644
--- a/llvm-external-projects/iree-dialects/test/Transforms/test-listener-cse.mlir
+++ b/llvm-external-projects/iree-dialects/test/Transforms/test-listener-cse.mlir
@@ -8,7 +8,7 @@
#map0 = affine_map<(d0) -> (d0 mod 2)>
// CHECK-LABEL: @simple_constant
-func @simple_constant() -> (i32, i32) {
+func.func @simple_constant() -> (i32, i32) {
// CHECK-NEXT: %c1_i32 = arith.constant 1 : i32
%0 = arith.constant 1 : i32
@@ -18,7 +18,7 @@
}
// CHECK-LABEL: @basic
-func @basic() -> (index, index) {
+func.func @basic() -> (index, index) {
// CHECK: %c0 = arith.constant 0 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 0 : index
@@ -32,7 +32,7 @@
}
// CHECK-LABEL: @many
-func @many(f32, f32) -> (f32) {
+func.func @many(f32, f32) -> (f32) {
^bb0(%a : f32, %b : f32):
// CHECK-NEXT: %0 = arith.addf %arg0, %arg1 : f32
%c = arith.addf %a, %b : f32
@@ -58,7 +58,7 @@
/// Check that operations are not eliminated if they have different operands.
// CHECK-LABEL: @different_ops
-func @different_ops() -> (i32, i32) {
+func.func @different_ops() -> (i32, i32) {
// CHECK: %c0_i32 = arith.constant 0 : i32
// CHECK: %c1_i32 = arith.constant 1 : i32
%0 = arith.constant 0 : i32
@@ -71,7 +71,7 @@
/// Check that operations are not eliminated if they have different result
/// types.
// CHECK-LABEL: @different_results
-func @different_results(%arg0: tensor<*xf32>) -> (tensor<?x?xf32>, tensor<4x?xf32>) {
+func.func @different_results(%arg0: tensor<*xf32>) -> (tensor<?x?xf32>, tensor<4x?xf32>) {
// CHECK: %0 = tensor.cast %arg0 : tensor<*xf32> to tensor<?x?xf32>
// CHECK-NEXT: %1 = tensor.cast %arg0 : tensor<*xf32> to tensor<4x?xf32>
%0 = tensor.cast %arg0 : tensor<*xf32> to tensor<?x?xf32>
@@ -83,7 +83,7 @@
/// Check that operations are not eliminated if they have different attributes.
// CHECK-LABEL: @different_attributes
-func @different_attributes(index, index) -> (i1, i1, i1) {
+func.func @different_attributes(index, index) -> (i1, i1, i1) {
^bb0(%a : index, %b : index):
// CHECK: %0 = arith.cmpi slt, %arg0, %arg1 : index
%0 = arith.cmpi slt, %a, %b : index
@@ -99,7 +99,7 @@
/// Check that operations with side effects are not eliminated.
// CHECK-LABEL: @side_effect
-func @side_effect() -> (memref<2x1xf32>, memref<2x1xf32>) {
+func.func @side_effect() -> (memref<2x1xf32>, memref<2x1xf32>) {
// CHECK: %0 = memref.alloc() : memref<2x1xf32>
%0 = memref.alloc() : memref<2x1xf32>
@@ -113,7 +113,7 @@
/// Check that operation definitions are properly propagated down the dominance
/// tree.
// CHECK-LABEL: @down_propagate_for
-func @down_propagate_for() {
+func.func @down_propagate_for() {
// CHECK: %c1_i32 = arith.constant 1 : i32
%0 = arith.constant 1 : i32
@@ -127,7 +127,7 @@
}
// CHECK-LABEL: @down_propagate
-func @down_propagate() -> i32 {
+func.func @down_propagate() -> i32 {
// CHECK-NEXT: %c1_i32 = arith.constant 1 : i32
%0 = arith.constant 1 : i32
@@ -148,7 +148,7 @@
/// Check that operation definitions are NOT propagated up the dominance tree.
// CHECK-LABEL: @up_propagate_for
-func @up_propagate_for() -> i32 {
+func.func @up_propagate_for() -> i32 {
// CHECK: affine.for {{.*}} = 0 to 4 {
affine.for %i = 0 to 4 {
// CHECK-NEXT: %c1_i32_0 = arith.constant 1 : i32
@@ -163,8 +163,8 @@
return %1 : i32
}
-// CHECK-LABEL: func @up_propagate
-func @up_propagate() -> i32 {
+// CHECK-LABEL: func.func @up_propagate
+func.func @up_propagate() -> i32 {
// CHECK-NEXT: %c0_i32 = arith.constant 0 : i32
%0 = arith.constant 0 : i32
@@ -194,8 +194,8 @@
/// The same test as above except that we are testing on a cfg embedded within
/// an operation region.
-// CHECK-LABEL: func @up_propagate_region
-func @up_propagate_region() -> i32 {
+// CHECK-LABEL: func.func @up_propagate_region
+func.func @up_propagate_region() -> i32 {
// CHECK-NEXT: %0 = "foo.region"
%0 = "foo.region"() ({
// CHECK-NEXT: %c0_i32 = arith.constant 0 : i32
@@ -228,7 +228,7 @@
/// This test checks that nested regions that are isolated from above are
/// properly handled.
// CHECK-LABEL: @nested_isolated
-func @nested_isolated() -> i32 {
+func.func @nested_isolated() -> i32 {
// CHECK-NEXT: arith.constant 1
%0 = arith.constant 1 : i32
diff --git a/llvm-external-projects/iree-dialects/test/Transforms/test-with-listener.mlir b/llvm-external-projects/iree-dialects/test/Transforms/test-with-listener.mlir
index f8dcc30..16ba1fe 100644
--- a/llvm-external-projects/iree-dialects/test/Transforms/test-with-listener.mlir
+++ b/llvm-external-projects/iree-dialects/test/Transforms/test-with-listener.mlir
@@ -1,7 +1,7 @@
// RUN: iree-dialects-opt --test-listener-canonicalize='listener=1' %s | FileCheck %s --check-prefix CANON
// RUN: iree-dialects-opt --test-listener-cse='listener=1' %s | FileCheck %s --check-prefix CSE
-func @test_canonicalize(%arg0: i32) -> (i32, i32) {
+func.func @test_canonicalize(%arg0: i32) -> (i32, i32) {
// CANON: REPLACED arith.addi
// CANON: REMOVED arith.addi
%c5 = arith.constant -5 : i32
@@ -10,7 +10,7 @@
return %0, %1 : i32, i32
}
-func @test_cse(%arg0: i32) -> (i32, i32) {
+func.func @test_cse(%arg0: i32) -> (i32, i32) {
// CSE: REPLACED arith.addi
// CSE: REMOVED arith.addi
%c5 = arith.constant -5 : i32
diff --git a/llvm-external-projects/iree-dialects/test/iree_pydm/optimize/variables_to_ssa.mlir b/llvm-external-projects/iree-dialects/test/iree_pydm/optimize/variables_to_ssa.mlir
index 2924957..60e8b8e 100644
--- a/llvm-external-projects/iree-dialects/test/iree_pydm/optimize/variables_to_ssa.mlir
+++ b/llvm-external-projects/iree-dialects/test/iree_pydm/optimize/variables_to_ssa.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: @entry_block_does_not_hoist
// Hoisting must be disabled for the entry block as that would change the
-// func signature (which is tested because it would fail verification).
+// func.func signature (which is tested because it would fail verification).
iree_pydm.func @entry_block_does_not_hoist() -> (!iree_pydm.exception_result, !iree_pydm.none) {
// CHECK: load_var
%a = alloc_free_var "a" -> !iree_pydm.free_var_ref
diff --git a/llvm-external-projects/iree-dialects/test/iree_pydm/to_iree/numeric.mlir b/llvm-external-projects/iree-dialects/test/iree_pydm/to_iree/numeric.mlir
index 2024688..f4e4ce5 100644
--- a/llvm-external-projects/iree-dialects/test/iree_pydm/to_iree/numeric.mlir
+++ b/llvm-external-projects/iree-dialects/test/iree_pydm/to_iree/numeric.mlir
@@ -1,6 +1,6 @@
// RUN: iree-dialects-opt --split-input-file --convert-iree-pydm-to-iree %s | FileCheck --dump-input-filter=all %s
-// CHECK-LABEL: func @neg_integer(
+// CHECK-LABEL: func.func @neg_integer(
// CHECK-SAME: %[[VAL_0:.*]]: i32) -> (i32, i32) {
// CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
// CHECK: %[[VAL_2:.*]] = arith.subi %[[VAL_1]], %[[VAL_0]] : i32
@@ -13,7 +13,7 @@
}
// -----
-// CHECK-LABEL: func @integer_add(
+// CHECK-LABEL: func.func @integer_add(
// CHECK-SAME: %[[VAL_0:.*]]: i32,
// CHECK-SAME: %[[VAL_1:.*]]: i32) -> (i32, i32) {
// CHECK: %[[VAL_2:.*]] = arith.addi %[[VAL_0]], %[[VAL_1]] : i32
@@ -74,7 +74,7 @@
}
// -----
-// CHECK-LABEL: func @real_add(
+// CHECK-LABEL: func.func @real_add(
// CHECK-SAME: %[[VAL_0:.*]]: f32,
// CHECK-SAME: %[[VAL_1:.*]]: f32) -> (i32, f32) {
// CHECK: %[[VAL_2:.*]] = arith.addf %[[VAL_0]], %[[VAL_1]] : f32
diff --git a/runtime/bindings/tflite/java/org/tensorflow/lite/tests/simple_add.mlir b/runtime/bindings/tflite/java/org/tensorflow/lite/tests/simple_add.mlir
index b05eb45..8723136 100644
--- a/runtime/bindings/tflite/java/org/tensorflow/lite/tests/simple_add.mlir
+++ b/runtime/bindings/tflite/java/org/tensorflow/lite/tests/simple_add.mlir
@@ -1,4 +1,4 @@
-func @main(
+func.func @main(
%input : tensor<2xf32> {iree.identifier = "input"}
) -> (
tensor<2xf32> {iree.identifier = "output"}
diff --git a/runtime/bindings/tflite/testdata/add_dynamic.mlir b/runtime/bindings/tflite/testdata/add_dynamic.mlir
index f274abc..d06efe5 100644
--- a/runtime/bindings/tflite/testdata/add_dynamic.mlir
+++ b/runtime/bindings/tflite/testdata/add_dynamic.mlir
@@ -1,4 +1,4 @@
-func @main(
+func.func @main(
%input : tensor<?xf32> {iree.identifier = "input"}
) -> (
tensor<?xf32> {iree.identifier = "output"}
diff --git a/runtime/bindings/tflite/testdata/add_multi.mlir b/runtime/bindings/tflite/testdata/add_multi.mlir
index 8a75b8e..a18a7a9 100644
--- a/runtime/bindings/tflite/testdata/add_multi.mlir
+++ b/runtime/bindings/tflite/testdata/add_multi.mlir
@@ -1,4 +1,4 @@
-func @main(
+func.func @main(
%arg0: tensor<1x8x8x3xf32> {iree.identifier = "a"},
%arg1: tensor<1x8x8x3xf32> {iree.identifier = "b"},
%arg2: tensor<1x8x8x3xf32> {iree.identifier = "c"},
diff --git a/runtime/bindings/tflite/testdata/add_static.mlir b/runtime/bindings/tflite/testdata/add_static.mlir
index 6a764a6..7dc9d0c 100644
--- a/runtime/bindings/tflite/testdata/add_static.mlir
+++ b/runtime/bindings/tflite/testdata/add_static.mlir
@@ -1,4 +1,4 @@
-func @main(
+func.func @main(
%input : tensor<1x8x8x3xf32> {iree.identifier = "input"}
) -> (
tensor<1x8x8x3xf32> {iree.identifier = "output"}
diff --git a/third_party/llvm-project b/third_party/llvm-project
index e2ed3fd..25cd6fb 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit e2ed3fd71e08ac50ca326c79f31247e7e4a16b7b
+Subproject commit 25cd6fba983a606145912e25b0f3d37d1197970c
diff --git a/third_party/mlir-hlo b/third_party/mlir-hlo
index 40d9c13..900a4cd 160000
--- a/third_party/mlir-hlo
+++ b/third_party/mlir-hlo
@@ -1 +1 @@
-Subproject commit 40d9c1338e8f023ccce0b0241d664a347aa7c438
+Subproject commit 900a4cda2b27b5103587d08d3bf8b9cc4a1b8fa6