Removing ordinal requirement on hal.executable.entry_point.
It's only required for serialization and this will allow us to assign
ordinals later in the pipeline (post-linking).
diff --git a/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir b/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir
index 1d453ba..c88964e 100644
--- a/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir
+++ b/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir
@@ -4,10 +4,9 @@
// CHECK-LABEL: func @dispatch_0()
hal.executable private @dispatch_0 {
hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @dispatch_0 attributes {
- interface = @io,
- ordinal = 0 : index,
- workgroup_size = [64: index, 1: index, 1:index]}
+ hal.executable.entry_point @dispatch_0 interface(@io) {
+ workgroup_size = [64: index, 1: index, 1:index]
+ }
builtin.module {
builtin.func @dispatch_0() {
%c2 = arith.constant 2 : index
@@ -45,9 +44,7 @@
#translation = #iree_codegen.translation.info<"LLVMGPUDistribute", workload_per_wg = [32]>
hal.executable private @workgroup_tile_loop {
hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @workgroup_tile_loop attributes {
- interface = @io,
- ordinal = 0 : index,
+ hal.executable.entry_point @workgroup_tile_loop interface(@io) {
translation.info = #translation
}
builtin.module {
@@ -75,9 +72,7 @@
#translation = #iree_codegen.translation.info<"LLVMGPUDistribute", workload_per_wg = [16]>
hal.executable private @workgroup_tile_loop_negative {
hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @workgroup_tile_loop_negative attributes {
- interface = @io,
- ordinal = 0 : index,
+ hal.executable.entry_point @workgroup_tile_loop_negative interface(@io) {
translation.info = #translation
}
builtin.module {
@@ -107,8 +102,7 @@
#translation = #iree_codegen.translation.info<"LLVMGPUDistribute", workload_per_wg = [32, 8, 1]>
hal.executable private @both_workgroup_and_workitem {
hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @both_workgroup_and_workitem attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @both_workgroup_and_workitem interface(@io) {
translation.info = #translation,
workgroup_size = [8: index, 2: index, 1: index]
}
diff --git a/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir b/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir
index 8445de0..ba966ea 100644
--- a/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir
+++ b/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir
@@ -9,10 +9,8 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @llvm, target = #hal.executable.target<"llvm", "embedded-elf-x86_64", {}> {
- hal.executable.entry_point @illegal attributes {
- translation.info = #translation,
- interface = @io,
- ordinal = 0 : index
+ hal.executable.entry_point @illegal interface(@io) {
+ translation.info = #translation
}
builtin.module {
func @illegal() {
@@ -40,10 +38,8 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @llvm, target = #hal.executable.target<"llvm", "embedded-elf-x86_64", {}> {
- hal.executable.entry_point @illegal attributes {
- translation.info = #translation,
- interface = @io,
- ordinal = 0 : index
+ hal.executable.entry_point @illegal interface(@io) {
+ translation.info = #translation
}
builtin.module {
func @illegal() {
@@ -71,10 +67,8 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @llvm, target = #hal.executable.target<"llvm", "embedded-elf-x86_64", {}> {
- hal.executable.entry_point @illegal attributes {
- translation.info = #translation,
- interface = @io,
- ordinal = 0 : index
+ hal.executable.entry_point @illegal interface(@io) {
+ translation.info = #translation
}
builtin.module {
func @illegal() {
@@ -102,10 +96,8 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @llvm, target = #hal.executable.target<"llvm", "embedded-elf-x86_64", {}> {
- hal.executable.entry_point @illegal attributes {
- translation.info = #translation,
- interface = @io,
- ordinal = 0 : index
+ hal.executable.entry_point @illegal interface(@io) {
+ translation.info = #translation
}
builtin.module {
func @illegal() {
@@ -133,10 +125,8 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @llvm, target = #hal.executable.target<"llvm", "embedded-elf-x86_64", {}> {
- hal.executable.entry_point @illegal attributes {
- translation.info = #translation,
- interface = @io,
- ordinal = 0 : index
+ hal.executable.entry_point @illegal interface(@io) {
+ translation.info = #translation
}
builtin.module {
func @illegal() {
@@ -164,10 +154,8 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @llvm, target = #hal.executable.target<"llvm", "embedded-elf-x86_64", {}> {
- hal.executable.entry_point @illegal attributes {
- translation.info = #translation,
- interface = @io,
- ordinal = 0 : index
+ hal.executable.entry_point @illegal interface(@io) {
+ translation.info = #translation
}
builtin.module {
func @illegal() {
diff --git a/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir b/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir
index b3318b4..3899b29 100644
--- a/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir
+++ b/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir
@@ -11,10 +11,7 @@
native_vector_size = 16 : index,
target_triple = "aarch64-unknown-unknown-eabi-elf"
}> {
- hal.executable.entry_point @matmul_tensors attributes {
- interface = @io,
- ordinal = 0 : index
- }
+ hal.executable.entry_point @matmul_tensors interface(@io)
builtin.module {
func @matmul_tensors() {
%c0 = arith.constant 0 : index
@@ -81,11 +78,8 @@
native_vector_size = 16 : index,
target_triple = "x86_64-unknown-linux-gnu"
}> {
- hal.executable.entry_point @add_no_config attributes {
- interface = @io,
- ordinal = 0 : index
- }
- builtin.module {
+ hal.executable.entry_point @add_no_config interface(@io)
+ builtin.module {
func @add_no_config() {
%c0 = arith.constant 0 : index
%dim0 = hal.interface.load.constant offset = 0 : index
@@ -139,10 +133,8 @@
native_vector_size = 16 : index,
target_triple = "x86_64-unknown-linux-gnu"
}> {
- hal.executable.entry_point @add4D attributes {
- interface = @io, ordinal = 0 : index
- }
- builtin.module {
+ hal.executable.entry_point @add4D interface(@io)
+ builtin.module {
func @add4D() {
%c0 = arith.constant 0 : index
%0 = hal.interface.load.constant offset = 0 : index
@@ -220,11 +212,8 @@
native_vector_size = 16 : index,
target_triple = "aarch64-unknown-unknown-eabi-elf"
}> {
- hal.executable.entry_point @batch_matmul_tensors attributes {
- interface = @io,
- ordinal = 0 : index
- }
- builtin.module {
+ hal.executable.entry_point @batch_matmul_tensors interface(@io)
+ builtin.module {
func @batch_matmul_tensors() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
@@ -297,8 +286,8 @@
workgroup_size = []>
hal.executable private @preset_config_matmul_tensors {
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
- hal.executable.entry_point @preset_config attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @preset_config interface(@io)
+ builtin.module {
builtin.func @preset_config() {
%c0 = arith.constant 0 : index
%c512 = arith.constant 512 : index
@@ -375,8 +364,8 @@
hal.executable @tensor_insert {
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
- hal.executable.entry_point @tensor_insert_slice attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @tensor_insert_slice interface(@io)
+ builtin.module {
builtin.func @tensor_insert_slice() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan type(StorageBuffer) set(0) binding(0) : !flow.dispatch.tensor<readonly:?x?xi32>
@@ -430,7 +419,7 @@
hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
- hal.executable.entry_point @static_1d_fft_stage2 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @static_1d_fft_stage2 interface(@io)
builtin.module {
builtin.func @static_1d_fft_stage2() {
%c0 = arith.constant 0 : index
@@ -473,7 +462,7 @@
hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
- hal.executable.entry_point @static_3d_fft_stage3 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @static_3d_fft_stage3 interface(@io)
builtin.module {
builtin.func @static_3d_fft_stage3() {
%c0 = arith.constant 0 : index
@@ -545,7 +534,7 @@
hal.interface.binding @arg2, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
- hal.executable.entry_point @outs_fusion_fn attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @outs_fusion_fn interface(@io)
builtin.module {
builtin.func @outs_fusion_fn() {
%c0 = arith.constant 0 : index
@@ -609,8 +598,8 @@
hal.executable private @conv {
hal.executable.variant public @system_elf_x86_64, target = <"llvm", "system-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "x86_64-unknown-linux-gnu"}> {
- hal.executable.entry_point public @conv attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @conv interface(@io)
+ builtin.module {
func @conv() {
%c0 = arith.constant 0 : index
%0 = hal.interface.load.constant offset = 0 : index
@@ -668,7 +657,7 @@
// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0] -> (s0 ceildiv 64)>
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDefault", workload_per_wg = [64, 64, 64]>
-// CHECK: hal.executable.entry_point public @conv attributes
+// CHECK: hal.executable.entry_point public @conv
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index, %[[ARG1:[a-zA-Z0-9]+]]: index, %[[ARG2:[a-zA-Z0-9]+]]: index)
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP0]]()[%[[ARG0]]
@@ -682,8 +671,8 @@
hal.executable private @conv_static {
hal.executable.variant public @system_elf_x86_64, target = <"llvm", "system-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 64 : index, target_triple = "x86_64-pc-linux-gnu"}> {
- hal.executable.entry_point public @conv_static attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @conv_static interface(@io)
+ builtin.module {
func @conv_static() {
%cst = arith.constant 0.000000e+00 : f32
%c80 = arith.constant 80 : index
@@ -740,7 +729,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 ceildiv 64)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 ceildiv 32)>
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDefault", workload_per_wg = [64, 64, 32]>
-// CHECK: hal.executable.entry_point public @conv_static attributes
+// CHECK: hal.executable.entry_point public @conv_static
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index, %[[ARG1:[a-zA-Z0-9]+]]: index, %[[ARG2:[a-zA-Z0-9]+]]: index)
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP0]]()[%[[ARG0]]
@@ -754,8 +743,8 @@
hal.executable private @generic_static {
hal.executable.variant public @system_elf_x86_64, target = <"llvm", "system-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 64 : index, target_triple = "x86_64-pc-linux-gnu"}> {
- hal.executable.entry_point public @generic_static attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @generic_static interface(@io)
+ builtin.module {
func @generic_static() {
%c16 = arith.constant 16 : index
%c96 = arith.constant 96 : index
@@ -796,7 +785,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 ceildiv 32)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 ceildiv 8)>
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDefault", workload_per_wg = [32, 8]>
-// CHECK: hal.executable.entry_point public @generic_static attributes
+// CHECK: hal.executable.entry_point public @generic_static
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index, %[[ARG1:[a-zA-Z0-9]+]]: index, %[[ARG2:[a-zA-Z0-9]+]]: index)
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -810,8 +799,8 @@
hal.executable private @matmul_static {
hal.executable.variant public @system_elf_arm_64, target = <"llvm", "system-elf-arm_64", {data_layout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-linux-android30"}> {
- hal.executable.entry_point public @matmul_static attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @matmul_static interface(@io)
+ builtin.module {
func @matmul_static() {
%cst = arith.constant 0.000000e+00 : f32
%c196 = arith.constant 196 : index
@@ -854,7 +843,7 @@
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 ceildiv 8)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 ceildiv 28)>
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUTileFuseAndVectorize", workload_per_wg = [8, 28]>
-// CHECK: hal.executable.entry_point public @matmul_static attributes
+// CHECK: hal.executable.entry_point public @matmul_static
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index, %[[ARG1:[a-zA-Z0-9]+]]: index, %[[ARG2:[a-zA-Z0-9]+]]: index)
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -868,8 +857,8 @@
hal.executable private @restrict_num_workgroups {
hal.executable.variant public @system_elf_arm_64, target = <"llvm", "system-elf-arm_64", {data_layout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-linux-android30"}> {
- hal.executable.entry_point public @restrict_num_workgroups attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @restrict_num_workgroups interface(@io)
+ builtin.module {
func @restrict_num_workgroups() {
%cst = arith.constant 0.000000e+00 : f32
%c7 = arith.constant 7 : index
@@ -923,7 +912,7 @@
// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 ceildiv 8)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<()[s0] -> (s0 ceildiv 4)>
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDefault", workload_per_wg = [64, 8, 4]>
-// CHECK: hal.executable.entry_point public @restrict_num_workgroups attributes
+// CHECK: hal.executable.entry_point public @restrict_num_workgroups
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index, %[[ARG1:[a-zA-Z0-9]+]]: index, %[[ARG2:[a-zA-Z0-9]+]]: index)
// CHECK-DAG: %[[D0:.+]] = affine.apply #[[MAP0]]()[%[[ARG0]]]
@@ -935,8 +924,8 @@
hal.executable private @test_exp_0 {
hal.executable.variant public @system_elf_arm_64, target = <"llvm", "system-elf-arm_64", {data_layout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-linux-android30"}> {
- hal.executable.entry_point public @test_exp_0 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @test_exp_0 interface(@io)
+ builtin.module {
func @test_exp_0() {
%c0 = arith.constant 0 : index
%size = hal.interface.workgroup.size[0] : index
@@ -961,7 +950,7 @@
// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0] -> (s0 ceildiv 64)>
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDefault", workload_per_wg = [64]>
-// CHECK: hal.executable.entry_point public @test_exp_0 attributes
+// CHECK: hal.executable.entry_point public @test_exp_0
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -972,8 +961,8 @@
hal.executable private @test_exp_1 {
hal.executable.variant public @system_elf_arm_64, target = <"llvm", "system-elf-arm_64", {data_layout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-linux-android30"}> {
- hal.executable.entry_point public @test_exp_1 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @test_exp_1 interface(@io)
+ builtin.module {
func @test_exp_1() {
%c0 = arith.constant 0 : index
%size = hal.interface.workgroup.size[0] : index
@@ -998,7 +987,7 @@
// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0] -> (s0 ceildiv 64)>
// CHECk-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDefault", workload_per_wg = [64]>
-// CHECK: hal.executable.entry_point public @test_exp_1 attributes
+// CHECK: hal.executable.entry_point public @test_exp_1
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -1009,8 +998,8 @@
hal.executable private @test_exp_2 {
hal.executable.variant public @system_elf_arm_64, target = <"llvm", "system-elf-arm_64", {data_layout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-linux-android30"}> {
- hal.executable.entry_point public @test_exp_3 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @test_exp_3 interface(@io)
+ builtin.module {
func @test_exp_3() {
%c0 = arith.constant 0 : index
%size = hal.interface.workgroup.size[0] : index
@@ -1035,7 +1024,7 @@
// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0] -> (s0 ceildiv 64)>
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDefault", workload_per_wg = [64]>
-// CHECK: hal.executable.entry_point public @test_exp_3 attributes
+// CHECK: hal.executable.entry_point public @test_exp_3
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -1046,8 +1035,8 @@
hal.executable private @test_exp_3 {
hal.executable.variant public @system_elf_arm_64, target = <"llvm", "system-elf-arm_64", {data_layout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-linux-android30"}> {
- hal.executable.entry_point public @test_exp_4 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @test_exp_4 interface(@io)
+ builtin.module {
func @test_exp_4() {
%c0 = arith.constant 0 : index
%size = hal.interface.workgroup.size[0] : index
@@ -1072,7 +1061,7 @@
// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0] -> (s0 ceildiv 64)>
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDefault", workload_per_wg = [64]>
-// CHECK: hal.executable.entry_point public @test_exp_4 attributes
+// CHECK: hal.executable.entry_point public @test_exp_4
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -1083,8 +1072,8 @@
hal.executable private @test_exp_4 {
hal.executable.variant public @system_elf_arm_64, target = <"llvm", "system-elf-arm_64", {data_layout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-linux-android30"}> {
- hal.executable.entry_point public @test_exp_5 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @test_exp_5 interface(@io)
+ builtin.module {
func @test_exp_5() {
%c0 = arith.constant 0 : index
%size = hal.interface.workgroup.size[0] : index
@@ -1109,7 +1098,7 @@
// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0] -> (s0 ceildiv 64)>
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDefault", workload_per_wg = [64]>
-// CHECK: hal.executable.entry_point public @test_exp_5 attributes
+// CHECK: hal.executable.entry_point public @test_exp_5
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -1126,8 +1115,8 @@
native_vector_size = 16 : index,
target_triple = "x86_64-unknown-unknown-eabi-elf"
}> {
- hal.executable.entry_point public @matmul_x86 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @matmul_x86 interface(@io)
+ builtin.module {
func @matmul_x86() {
%c128 = arith.constant 128 : index
%c384 = arith.constant 384 : index
diff --git a/iree/compiler/Codegen/LLVMCPU/test/test_config_mmt4d.mlir b/iree/compiler/Codegen/LLVMCPU/test/test_config_mmt4d.mlir
index 6853009..4d526f8 100644
--- a/iree/compiler/Codegen/LLVMCPU/test/test_config_mmt4d.mlir
+++ b/iree/compiler/Codegen/LLVMCPU/test/test_config_mmt4d.mlir
@@ -11,7 +11,7 @@
hal.interface.binding public @s0b2_rw_external, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant public @embedded_elf_arm_64, target = #executable_target_embedded_elf_arm_64_ {
- hal.executable.entry_point public @mmt4d_384x384x512_4x1x4_dispatch_0 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point public @mmt4d_384x384x512_4x1x4_dispatch_0 interface(@io)
builtin.module {
func @mmt4d_384x384x512_4x1x4_dispatch_0() {
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir b/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir
index 71000f7..d5057fd 100644
--- a/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir
+++ b/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir
@@ -10,11 +10,10 @@
#map4 = affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>
hal.executable private @dot_dispatch_0 {
hal.executable.variant @cuda, target = #executable_target_cuda_nvptx_fb {
- hal.executable.entry_point @dot_dispatch_0 attributes {
- interface = @legacy_io,
- ordinal = 0 : index,
+ hal.executable.entry_point @dot_dispatch_0 interface(@io) {
translation.info = #translation,
- workgroup_size = [64 : index, 1 : index, 1 : index]}
+ workgroup_size = [64 : index, 1 : index, 1 : index]
+ }
builtin.module {
builtin.func @dot_dispatch_0() {
%cst = arith.constant 0.000000e+00 : f32
@@ -85,11 +84,10 @@
// Pure reducion case, skip tiling.
hal.executable @reduction_dispatch {
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @predict_dispatch_153 attributes {
- interface = @io,
- ordinal = 0 : index,
+ hal.executable.entry_point @predict_dispatch_153 interface(@io) {
translation.info = #translation,
- workgroup_size = [1: index, 1: index, 1: index]}
+ workgroup_size = [1: index, 1: index, 1: index]
+ }
builtin.module {
builtin.func @predict_dispatch_153() {
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir b/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir
index 29e1a35..23b0dcc 100644
--- a/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir
+++ b/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir
@@ -9,10 +9,9 @@
hal.executable private @shared_mem_cpy {
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @shared_mem_cpy attributes {
- interface = @io,
- ordinal = 0 : index,
- workgroup_size = [32: index, 4: index, 1:index]}
+ hal.executable.entry_point @shared_mem_cpy interface(@io) {
+ workgroup_size = [32: index, 4: index, 1:index]
+ }
builtin.module {
memref.global "private" @__shared_memory___1 : memref<3x512xf32, 3>
memref.global "private" @__shared_memory___0 : memref<256x4xf32, 3>
diff --git a/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir b/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir
index e48df39..3864a27 100644
--- a/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir
+++ b/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir
@@ -6,8 +6,8 @@
hal.interface.binding @ret0, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @add_dispatch_0 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @add_dispatch_0 interface(@io)
+ builtin.module {
func @add_dispatch_0() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan type(StorageBuffer) set(0) binding(0) : !flow.dispatch.tensor<readonly:16384xf32>
@@ -46,8 +46,8 @@
hal.executable private @dot_dispatch_1 {
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @dot_dispatch_1 attributes {interface = @legacy_io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @dot_dispatch_1 interface(@io)
+ builtin.module {
func @dot_dispatch_1() {
%c0 = arith.constant 0 : index
%c4 = arith.constant 4 : index
@@ -105,10 +105,8 @@
hal.executable @reduction_dispatch {
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @predict_dispatch_153 attributes {
- interface = @io,
- ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @predict_dispatch_153 interface(@io)
+ builtin.module {
func @predict_dispatch_153() {
%c0 = arith.constant 0 : index
%cst = arith.constant 0x7FC00000 : f32
@@ -147,8 +145,8 @@
hal.executable @tensor_insert {
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @tensor_insert_slice attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @tensor_insert_slice interface(@io)
+ builtin.module {
builtin.func @tensor_insert_slice() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan type(StorageBuffer) set(0) binding(0) : !flow.dispatch.tensor<readonly:?x?xi32>
@@ -197,8 +195,8 @@
hal.executable @tensor_insert {
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @tensor_insert_slice attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @tensor_insert_slice interface(@io)
+ builtin.module {
builtin.func @tensor_insert_slice() {
%c0 = arith.constant 0 : index
%d0 = hal.interface.load.constant offset = 0 : index
@@ -253,7 +251,7 @@
hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @static_1d_fft_stage2 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @static_1d_fft_stage2 interface(@io)
builtin.module {
builtin.func @static_1d_fft_stage2() {
%c0 = arith.constant 0 : index
@@ -297,7 +295,7 @@
hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @static_3d_fft_stage3 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @static_3d_fft_stage3 interface(@io)
builtin.module {
builtin.func @static_3d_fft_stage3() {
%c0 = arith.constant 0 : index
@@ -360,8 +358,8 @@
workgroup_size = [16, 8, 1]>
hal.executable @user_config {
hal.executable.variant public @cuda_nvptx_fb, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point public @_lowering_config_test_dispatch_1 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @_lowering_config_test_dispatch_1 interface(@io)
+ builtin.module {
func @_lowering_config_test_dispatch_1() {
%cst = arith.constant 0.000000e+00 : f32
%c128 = arith.constant 128 : index
@@ -423,8 +421,8 @@
hal.interface.binding public @s0b3, set=0, binding=3, type="StorageBuffer"
}
hal.executable.variant public @cuda_nvptx_fb, target = <"cuda", "cuda-nvptx-fb", {target_arch = "sm_35"}> {
- hal.executable.entry_point public @sort_op attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @sort_op interface(@io)
+ builtin.module {
func @sort_op() {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir b/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir
index 9192c7d..41cae36 100644
--- a/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir
+++ b/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir
@@ -10,8 +10,8 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @add_dispatch_0 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @add_dispatch_0 interface(@io)
+ builtin.module {
func @add_dispatch_0() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan type(StorageBuffer) set(0) binding(0) : !flow.dispatch.tensor<readonly:16xf32>
@@ -48,8 +48,8 @@
hal.interface.binding @wo2, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @dot_dispatch_0 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @dot_dispatch_0 interface(@io)
+ builtin.module {
func @dot_dispatch_0() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
@@ -131,8 +131,8 @@
hal.interface.binding @wo2, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @dot_dispatch_0 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @dot_dispatch_0 interface(@io)
+ builtin.module {
func @dot_dispatch_0() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
@@ -194,8 +194,8 @@
hal.interface.binding @ro1, set=0, binding=1, type="StorageBuffer"
hal.interface.binding @wo2, set=0, binding=2, type="StorageBuffer"
}
- hal.executable.entry_point @conv2d_dispatch_0 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @conv2d_dispatch_0 interface(@io)
+ builtin.module {
func @conv2d_dispatch_0() {
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
@@ -262,8 +262,8 @@
hal.interface.binding @ret0, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @add_dispatch_0 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @add_dispatch_0 interface(@io)
+ builtin.module {
func @add_dispatch_0() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan type(StorageBuffer) set(0) binding(0) : !flow.dispatch.tensor<readonly:16xf32>
@@ -292,8 +292,8 @@
hal.executable @reduction_dispatch {
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @reduction attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @reduction interface(@io)
+ builtin.module {
func @reduction() {
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
@@ -333,8 +333,8 @@
hal.executable @vector_add_dispatch {
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @vector_add_dispatch attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @vector_add_dispatch interface(@io)
+ builtin.module {
builtin.func @vector_add_dispatch() {
%c0 = arith.constant 0 : index
%c16384 = arith.constant 16384 : index
@@ -381,8 +381,8 @@
hal.executable @vector_reduction_dispatch {
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
- hal.executable.entry_point @vector_reduction_dispatch attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @vector_reduction_dispatch interface(@io)
+ builtin.module {
builtin.func @vector_reduction_dispatch() {
%c0 = arith.constant 0 : index
%c16384 = arith.constant 16384 : index
@@ -431,8 +431,8 @@
hal.interface.binding @wo2, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @cuda, target = #hal.executable.target<"cuda", "cuda-nvptx-fb", {target_arch = "sm_80"}> {
- hal.executable.entry_point @mma_fused attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @mma_fused interface(@io)
+ builtin.module {
func @mma_fused() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/LLVMGPU/test/rocdl_pipeline_test.mlir b/iree/compiler/Codegen/LLVMGPU/test/rocdl_pipeline_test.mlir
index 9c4d5b1..3ab60ed 100644
--- a/iree/compiler/Codegen/LLVMGPU/test/rocdl_pipeline_test.mlir
+++ b/iree/compiler/Codegen/LLVMGPU/test/rocdl_pipeline_test.mlir
@@ -10,8 +10,8 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @rocm, target = <"rocm", "rocm-hsaco-fb"> {
- hal.executable.entry_point @add_dispatch_0 attributes {interface = @io, ordinal = 0 : index, signature = (!flow.dispatch.tensor<readonly:16xf32>, !flow.dispatch.tensor<readonly:16xf32>, !flow.dispatch.tensor<writeonly:16xf32>) -> ()}
- builtin.module {
+ hal.executable.entry_point @add_dispatch_0 interface(@io)
+ builtin.module {
func @add_dispatch_0() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan type(StorageBuffer) set(0) binding(0) : !flow.dispatch.tensor<readonly:16xf32>
@@ -48,8 +48,8 @@
hal.interface.binding @wo2, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @rocm, target = <"rocm", "rocm-hsaco-fb"> {
- hal.executable.entry_point @dot_dispatch_0 attributes {interface = @io, ordinal = 0 : index, signature = (!flow.dispatch.tensor<readonly:1024x1024xf32>, !flow.dispatch.tensor<readonly:1024x1024xf32>, !flow.dispatch.tensor<writeonly:1024x1024xf32>) -> ()}
- builtin.module {
+ hal.executable.entry_point @dot_dispatch_0 interface(@io)
+ builtin.module {
func @dot_dispatch_0() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/config_adreno_conv.mlir b/iree/compiler/Codegen/SPIRV/test/config_adreno_conv.mlir
index b038482..86930d9 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_adreno_conv.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_adreno_conv.mlir
@@ -15,7 +15,7 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point public @conv_112x112x512 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point public @conv_112x112x512 interface(@io)
builtin.module {
func @conv_112x112x512() {
%c0 = arith.constant 0 : index
@@ -102,7 +102,7 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point public @conv_112x112x32 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point public @conv_112x112x32 interface(@io)
builtin.module {
func @conv_112x112x32() {
%c0 = arith.constant 0 : index
@@ -196,7 +196,7 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point public @conv_16x16x16 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point public @conv_16x16x16 interface(@io)
builtin.module {
func @conv_16x16x16() {
%c0 = arith.constant 0 : index
@@ -283,7 +283,7 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point public @dwconv_28x28x144 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point public @dwconv_28x28x144 interface(@io)
builtin.module {
func @dwconv_28x28x144() {
%c0 = arith.constant 0 : index
@@ -372,7 +372,7 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point public @dwconv_4x4x8 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point public @dwconv_4x4x8 interface(@io)
builtin.module {
func @dwconv_4x4x8() {
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/config_adreno_matmul.mlir b/iree/compiler/Codegen/SPIRV/test/config_adreno_matmul.mlir
index 41a4d0c..1bbf7b2 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_adreno_matmul.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_adreno_matmul.mlir
@@ -15,8 +15,8 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point @matmul_1024x2048x512 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @matmul_1024x2048x512 interface(@io)
+ builtin.module {
func @matmul_1024x2048x512() {
%c0 = arith.constant 0 : index
%c2048 = arith.constant 2048 : index
@@ -91,8 +91,8 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point @matmul_3136x24x96 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @matmul_3136x24x96 interface(@io)
+ builtin.module {
func @matmul_3136x24x96() {
%c0 = arith.constant 0 : index
%c24 = arith.constant 24 : index
@@ -167,8 +167,8 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point @matmul_196x64x192 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @matmul_196x64x192 interface(@io)
+ builtin.module {
func @matmul_196x64x192() {
%c0 = arith.constant 0 : index
%c64 = arith.constant 64 : index
@@ -243,8 +243,8 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point @matmul_12544x96x16 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @matmul_12544x96x16 interface(@io)
+ builtin.module {
func @matmul_12544x96x16() {
%c0 = arith.constant 0 : index
%c96 = arith.constant 96 : index
@@ -314,8 +314,8 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point @matmul_49x160x576 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @matmul_49x160x576 interface(@io)
+ builtin.module {
func @matmul_49x160x576() {
%c0 = arith.constant 0 : index
%c160 = arith.constant 160 : index
@@ -390,8 +390,8 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point @batch_matmul_4x384x384 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @batch_matmul_4x384x384 interface(@io)
+ builtin.module {
func @batch_matmul_4x384x384() {
%c0 = arith.constant 0 : index
%c384 = arith.constant 384 : index
@@ -476,8 +476,8 @@
max_compute_workgroup_size = dense<[1024, 1024, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point @batch_matmul_4x8x8 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @batch_matmul_4x8x8 interface(@io)
+ builtin.module {
func @batch_matmul_4x8x8() {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/config_default_conv.mlir b/iree/compiler/Codegen/SPIRV/test/config_default_conv.mlir
index b345955..c766289 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_default_conv.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_default_conv.mlir
@@ -25,8 +25,8 @@
max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>,
subgroup_size = 32 : i32}>
}> {
- hal.executable.entry_point public @conv_pointwise_112x112x32 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @conv_pointwise_112x112x32 interface(@io)
+ builtin.module {
func @conv_pointwise_112x112x32() {
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
diff --git a/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir b/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir
index 7166173..6c3f12e 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir
@@ -10,7 +10,7 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @static_1d_sort attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @static_1d_sort interface(@io)
builtin.module {
builtin.func @static_1d_sort() {
%c0 = arith.constant 0 : index
@@ -57,7 +57,7 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @static_3d_sort attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @static_3d_sort interface(@io)
builtin.module {
builtin.func @static_3d_sort() {
%c64 = arith.constant 64 : index
@@ -125,7 +125,7 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @static_1d_fft_stage2 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @static_1d_fft_stage2 interface(@io)
builtin.module {
builtin.func @static_1d_fft_stage2() {
%c0 = arith.constant 0 : index
@@ -175,7 +175,7 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @static_3d_fft_stage3 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @static_3d_fft_stage3 interface(@io)
builtin.module {
builtin.func @static_3d_fft_stage3() {
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ops.mlir b/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ops.mlir
index 1064aa3..4f8ab6b 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ops.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ops.mlir
@@ -8,8 +8,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @tensor_insert_slice attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @tensor_insert_slice interface(@io)
+ builtin.module {
builtin.func @tensor_insert_slice() {
%c0 = arith.constant 0 : index
%1 = hal.interface.load.constant offset = 0 : index
@@ -59,8 +59,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @tensor_insert_slice attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @tensor_insert_slice interface(@io)
+ builtin.module {
builtin.func @tensor_insert_slice() {
%c0 = arith.constant 0 : index
%d0 = hal.interface.load.constant offset = 0 : index
@@ -117,8 +117,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point @copy attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @copy interface(@io)
+ builtin.module {
builtin.func @copy() {
%c0 = arith.constant 0 : index
%c224 = arith.constant 224 : index
@@ -196,8 +196,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 32 : i32}>
}> {
- hal.executable.entry_point public @avg_pool attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @avg_pool interface(@io)
+ builtin.module {
func @avg_pool() {
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
@@ -279,8 +279,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 32 : i32}>
}> {
- hal.executable.entry_point public @elementwise attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @elementwise interface(@io)
+ builtin.module {
func @elementwise() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/config_default_matmul.mlir b/iree/compiler/Codegen/SPIRV/test/config_default_matmul.mlir
index 7cc0a33..0bcb6c6 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_default_matmul.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_default_matmul.mlir
@@ -15,8 +15,8 @@
max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>,
subgroup_size = 32 : i32}>
}> {
- hal.executable.entry_point public @batch_matmul_1x3x32 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @batch_matmul_1x3x32 interface(@io)
+ builtin.module {
func @batch_matmul_1x3x32() {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
@@ -100,8 +100,8 @@
max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point public @matmul_64x16 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @matmul_64x16 interface(@io)
+ builtin.module {
func @matmul_64x16() {
%c0 = arith.constant 0 : index
%c16 = arith.constant 16 : index
@@ -176,8 +176,8 @@
max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point public @matmul_400x273 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @matmul_400x273 interface(@io)
+ builtin.module {
func @matmul_400x273() {
%c0 = arith.constant 0 : index
%c11775744 = arith.constant 11775744 : index
@@ -260,8 +260,8 @@
max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>,
subgroup_size = 64 : i32}>
}> {
- hal.executable.entry_point public @matmul_25x546 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @matmul_25x546 interface(@io)
+ builtin.module {
func @matmul_25x546() {
%c0 = arith.constant 0 : index
%c15842560 = arith.constant 15842560 : index
@@ -352,8 +352,8 @@
max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>,
subgroup_size = 32 : i32}>
}> {
- hal.executable.entry_point public @matmul_pointwise_256x1024 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @matmul_pointwise_256x1024 interface(@io)
+ builtin.module {
func @matmul_pointwise_256x1024() {
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f16
diff --git a/iree/compiler/Codegen/SPIRV/test/config_mali_conv.mlir b/iree/compiler/Codegen/SPIRV/test/config_mali_conv.mlir
index b191169..e559b1f 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_mali_conv.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_mali_conv.mlir
@@ -15,8 +15,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point public @conv_112x112x512 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @conv_112x112x512 interface(@io)
+ builtin.module {
func @conv_112x112x512() {
%c0 = arith.constant 0 : index
%c512 = arith.constant 512 : index
@@ -102,8 +102,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point public @conv_112x112x32 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @conv_112x112x32 interface(@io)
+ builtin.module {
func @conv_112x112x32() {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
@@ -189,8 +189,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point public @conv_16x16x16 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @conv_16x16x16 interface(@io)
+ builtin.module {
func @conv_16x16x16() {
%c0 = arith.constant 0 : index
%c16 = arith.constant 16 : index
@@ -276,8 +276,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point public @dwconv_28x28x144 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @dwconv_28x28x144 interface(@io)
+ builtin.module {
func @dwconv_28x28x144() {
%c0 = arith.constant 0 : index
%c144 = arith.constant 144 : index
@@ -365,8 +365,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point public @dwconv_1x2x8 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point public @dwconv_1x2x8 interface(@io)
+ builtin.module {
func @dwconv_1x2x8() {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/config_mali_matmul.mlir b/iree/compiler/Codegen/SPIRV/test/config_mali_matmul.mlir
index 187b7f3..c4098e1 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_mali_matmul.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_mali_matmul.mlir
@@ -15,8 +15,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @matmul_1024x2048x512 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @matmul_1024x2048x512 interface(@io)
+ builtin.module {
func @matmul_1024x2048x512() {
%c0 = arith.constant 0 : index
%c2048 = arith.constant 2048 : index
@@ -91,8 +91,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @matmul_3136x24x96 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @matmul_3136x24x96 interface(@io)
+ builtin.module {
func @matmul_3136x24x96() {
%c0 = arith.constant 0 : index
%c24 = arith.constant 24 : index
@@ -167,8 +167,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @matmul_196x64x192 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @matmul_196x64x192 interface(@io)
+ builtin.module {
func @matmul_196x64x192() {
%c0 = arith.constant 0 : index
%c64 = arith.constant 64 : index
@@ -243,8 +243,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @matmul_12544x96x16 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @matmul_12544x96x16 interface(@io)
+ builtin.module {
func @matmul_12544x96x16() {
%c0 = arith.constant 0 : index
%c96 = arith.constant 96 : index
@@ -314,8 +314,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @matmul_49x160x576 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @matmul_49x160x576 interface(@io)
+ builtin.module {
func @matmul_49x160x576() {
%c0 = arith.constant 0 : index
%c160 = arith.constant 160 : index
@@ -388,8 +388,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @batch_matmul_4x384x384 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @batch_matmul_4x384x384 interface(@io)
+ builtin.module {
func @batch_matmul_4x384x384() {
%c0 = arith.constant 0 : index
%c384 = arith.constant 384 : index
@@ -474,8 +474,8 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @batch_matmul_4x2x8 attributes {interface = @io, ordinal = 0 : index}
- builtin.module {
+ hal.executable.entry_point @batch_matmul_4x2x8 interface(@io)
+ builtin.module {
func @batch_matmul_4x2x8() {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/config_nvidia_matmul_cooperative_ops.mlir b/iree/compiler/Codegen/SPIRV/test/config_nvidia_matmul_cooperative_ops.mlir
index 9543810..6392357 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_nvidia_matmul_cooperative_ops.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_nvidia_matmul_cooperative_ops.mlir
@@ -33,7 +33,7 @@
max_compute_workgroup_invocations = 1024 : i32,
max_compute_workgroup_size = dense<[2147483647, 65535, 65535]> : vector<3xi32>,
subgroup_size = 32 : i32}>}> {
- hal.executable.entry_point public @matmul_256x1024x128_div_sub attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point public @matmul_256x1024x128_div_sub interface(@io)
builtin.module {
func @matmul_256x1024x128_div_sub() {
%c0 = arith.constant 0 : index
@@ -143,7 +143,7 @@
max_compute_workgroup_invocations = 1024 : i32,
max_compute_workgroup_size = dense<[2147483647, 65535, 65535]> : vector<3xi32>,
subgroup_size = 32 : i32}>}> {
- hal.executable.entry_point public @matmul_256x1024x8 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point public @matmul_256x1024x8 interface(@io)
builtin.module {
func @matmul_256x1024x8() {
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/convert_to_spirv.mlir b/iree/compiler/Codegen/SPIRV/test/convert_to_spirv.mlir
index 763fb87..25a502c 100644
--- a/iree/compiler/Codegen/SPIRV/test/convert_to_spirv.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/convert_to_spirv.mlir
@@ -7,8 +7,7 @@
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb", {
spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, {}>}> {
- hal.executable.entry_point @push_constant attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @push_constant interface(@io) {
workgroup_size = [32: index, 1: index, 1: index]
}
builtin.module {
@@ -43,8 +42,7 @@
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb", {
spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, {}>}> {
- hal.executable.entry_point @resource_bindings_in_same_func attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @resource_bindings_in_same_func interface(@io) {
workgroup_size = [32: index, 1: index, 1: index]
}
builtin.module {
@@ -95,12 +93,10 @@
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb", {
spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, {}>}> {
- hal.executable.entry_point @resource_bindings_in_entry_func1 attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @resource_bindings_in_entry_func1 interface(@io) {
workgroup_size = [32: index, 1: index, 1: index]
}
- hal.executable.entry_point @resource_bindings_in_entry_func2 attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @resource_bindings_in_entry_func2 interface(@io) {
workgroup_size = [32: index, 1: index, 1: index]
}
builtin.module {
@@ -151,8 +147,7 @@
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb", {
spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, {}>}> {
- hal.executable.entry_point @interface_binding attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @interface_binding interface(@io) {
workgroup_size = [32: index, 1: index, 1: index]
}
builtin.module {
@@ -193,8 +188,7 @@
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb", {
spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, {}>}> {
- hal.executable.entry_point @interface_wg_id attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @interface_wg_id interface(@io) {
workgroup_size = [32: index, 1: index, 1: index]
}
builtin.module {
@@ -227,8 +221,7 @@
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb", {
spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, {}>}> {
- hal.executable.entry_point @interface_wg_count attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @interface_wg_count interface(@io) {
workgroup_size = [32: index, 1: index, 1: index]
}
builtin.module {
diff --git a/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_cooperative_ops.mlir b/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_cooperative_ops.mlir
index c2632dc..4459496 100644
--- a/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_cooperative_ops.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_cooperative_ops.mlir
@@ -33,7 +33,7 @@
max_compute_workgroup_invocations = 1024 : i32,
max_compute_workgroup_size = dense<[2147483647, 65535, 65535]> : vector<3xi32>,
subgroup_size = 32 : i32}>}> {
- hal.executable.entry_point public @matmul_256x1024x128_div_sub attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point public @matmul_256x1024x128_div_sub interface(@io)
builtin.module {
func @matmul_256x1024x128_div_sub() {
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_vectorization.mlir b/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_vectorization.mlir
index e0f4089..0d1ba3e 100644
--- a/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_vectorization.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_vectorization.mlir
@@ -13,7 +13,7 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @fuse_and_vectorize_fill_matmul attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @fuse_and_vectorize_fill_matmul interface(@io)
builtin.module {
func @fuse_and_vectorize_fill_matmul() {
%c0 = arith.constant 0 : index
@@ -77,7 +77,7 @@
max_compute_workgroup_size = dense<512> : vector<3xi32>,
subgroup_size = 16 : i32}>
}> {
- hal.executable.entry_point @fuse_and_vectorize_matmul_add attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @fuse_and_vectorize_matmul_add interface(@io)
builtin.module {
func @fuse_and_vectorize_matmul_add() {
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/promote_workgroup_memory.mlir b/iree/compiler/Codegen/SPIRV/test/promote_workgroup_memory.mlir
index aac32b9..6c3f8de 100644
--- a/iree/compiler/Codegen/SPIRV/test/promote_workgroup_memory.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/promote_workgroup_memory.mlir
@@ -8,8 +8,7 @@
hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @matmul_promote_workgroup_memory attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @matmul_promote_workgroup_memory interface(@io) {
workgroup_size = [16: index, 8: index, 1: index]
}
builtin.module {
@@ -82,8 +81,7 @@
hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @conv_promote_workgroup_memory attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @conv_promote_workgroup_memory interface(@io) {
workgroup_size = [32: index, 4: index, 1: index]
}
builtin.module {
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir
index 1bda127..86408d4 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir
@@ -18,8 +18,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @matmul attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @matmul interface(@io) {
workgroup_size = [16: index, 8: index, 1: index],
translation.info = #translation
}
@@ -63,6 +62,7 @@
}
}
}
+
// CHECK-LABEL: func @matmul
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
@@ -86,8 +86,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @conv_1d attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @conv_1d interface(@io) {
workgroup_size = [32: index, 4: index, 1: index],
translation.info = #translation
}
@@ -165,8 +164,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @conv_2d attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @conv_2d interface(@io) {
workgroup_size = [32: index, 4: index, 1: index],
translation.info = #translation
}
@@ -279,8 +277,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @conv_3d attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @conv_3d interface(@io) {
workgroup_size = [32: index, 4: index, 1: index],
translation.info = #translation
}
@@ -349,8 +346,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @pooling_nhwc_max attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @pooling_nhwc_max interface(@io) {
workgroup_size = [32: index, 4: index, 1: index],
translation.info = #translation
}
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir
index 9ad1b10..dcc163b 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir
@@ -10,12 +10,10 @@
}
hal.executable.variant @vulkan_spirv_fb, target = <"vulkan", "vulkan-spirv-fb"> {
- hal.executable.entry_point @static_scatter_update_slice attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @static_scatter_update_slice interface(@io) {
translation.info = #translation,
workgroup_size = [16 : index, 1 : index, 1 : index]
}
-
builtin.module {
builtin.func @static_scatter_update_slice() {
%c40 = arith.constant 40 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir
index f56b2f6..499e080 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir
@@ -8,8 +8,7 @@
hal.interface.binding @s0b1_xw_external, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @vulkan_spirv_fb, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @static_3d_sort attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @static_3d_sort interface(@io) {
translation.info = #translation,
workgroup_size = [16 : index, 1 : index, 1 : index]
}
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir
index 1e7d073..6f87fce 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir
@@ -10,8 +10,7 @@
hal.interface.binding public @out0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @fused_fill_batch_matmul attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @fused_fill_batch_matmul interface(@io) {
workgroup_size = [16: index, 1: index, 1: index],
translation.info = #translation
}
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir
index 8f07633..97107d2 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir
@@ -10,8 +10,7 @@
hal.interface.binding public @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @conv_static_shape_f32 attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @conv_static_shape_f32 interface(@io) {
workgroup_size = [4: index, 4: index, 1: index],
translation.info = #translation
}
@@ -101,8 +100,7 @@
hal.interface.binding public @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @depthwise_conv_static_shape_f32 attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @depthwise_conv_static_shape_f32 interface(@io) {
workgroup_size = [4: index, 4: index, 4: index],
translation.info = #translation
}
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir
index d0f8a94..034b419 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir
@@ -9,8 +9,7 @@
hal.interface.binding public @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @matmul_static_shape_f16 attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @matmul_static_shape_f16 interface(@io) {
workgroup_size = [16: index, 1: index, 1: index],
translation.info = #translation
}
@@ -73,8 +72,7 @@
hal.interface.binding public @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan, target = <"vulkan-spirv", "vulkan-spirv-fb"> {
- hal.executable.entry_point @matmul_static_shape_f32 attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point @matmul_static_shape_f32 interface(@io) {
workgroup_size = [16: index, 1: index, 1: index],
translation.info = #translation
}
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir
index cdb5148..5793c05 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir
@@ -28,8 +28,7 @@
max_compute_workgroup_invocations = 1024 : i32,
max_compute_workgroup_size = dense<[2147483647, 65535, 65535]> : vector<3xi32>,
subgroup_size = 32 : i32}>}> {
- hal.executable.entry_point public @matmul_256x1024x128_div_sub attributes {
- interface = @io, ordinal = 0 : index,
+ hal.executable.entry_point public @matmul_256x1024x128_div_sub interface(@io) {
translation.info = #translation,
workgroup_size = [32 : index, 1 : index, 1 : index]
} {
diff --git a/iree/compiler/Dialect/HAL/IR/HALOps.cpp b/iree/compiler/Dialect/HAL/IR/HALOps.cpp
index 4d8c5e5..4380d46 100644
--- a/iree/compiler/Dialect/HAL/IR/HALOps.cpp
+++ b/iree/compiler/Dialect/HAL/IR/HALOps.cpp
@@ -698,12 +698,19 @@
}
StringAttr nameAttr;
+ SymbolRefAttr interfaceAttr;
if (failed(parser.parseSymbolName(nameAttr,
mlir::SymbolTable::getSymbolAttrName(),
result->attributes)) ||
- failed(parser.parseOptionalAttrDictWithKeyword(result->attributes))) {
+ failed(parser.parseKeyword("interface")) ||
+ failed(parser.parseLParen()) ||
+ failed(parser.parseAttribute(interfaceAttr)) ||
+ failed(parser.parseRParen()) ||
+ failed(parser.parseOptionalAttrDict(result->attributes))) {
return failure();
}
+ result->addAttribute("interface", interfaceAttr);
+
// For now assume that the workload is at max 3D. So arguments to the region
// are workload along x, y and z.
std::unique_ptr<Region> region;
@@ -714,6 +721,7 @@
if (!parseResult.hasValue()) return success();
if (failed(*parseResult)) return failure();
result->addRegion(std::move(region));
+
return success();
}
@@ -723,8 +731,11 @@
printSymbolVisibility(p, op, op->getAttrOfType<StringAttr>("sym_visibility"));
p << ' ';
p.printSymbolName(op.sym_name());
- p.printOptionalAttrDictWithKeyword(op->getAttrs(),
- /*elidedAttrs=*/{"sym_name"});
+ p << " interface(";
+ p.printAttributeWithoutType(op.interfaceAttr());
+ p << ")";
+ p.printOptionalAttrDict(op->getAttrs(),
+ /*elidedAttrs=*/{"sym_name", "interface"});
if (op.workgroup_count_region().empty()) return;
p.printRegion(op.workgroup_count_region().front());
}
diff --git a/iree/compiler/Dialect/HAL/IR/HALOps.td b/iree/compiler/Dialect/HAL/IR/HALOps.td
index b7a6459..01ae682 100644
--- a/iree/compiler/Dialect/HAL/IR/HALOps.td
+++ b/iree/compiler/Dialect/HAL/IR/HALOps.td
@@ -1478,7 +1478,7 @@
let arguments = (ins
OptionalAttr<StrAttr>:$sym_visibility,
SymbolNameAttr:$sym_name,
- HAL_OrdinalAttr:$ordinal,
+ OptionalAttr<HAL_OrdinalAttr>:$ordinal,
FlatSymbolRefAttr:$interface,
OptionalAttr<HAL_WorkgroupSizeAttr>:$workgroup_size,
OptionalAttr<IndexAttr>:$workgroup_local_memory
@@ -1488,16 +1488,6 @@
let builders = [
OpBuilder<(ins
- "::llvm::StringRef":$sym_name,
- "::llvm::APInt":$ordinal,
- "::llvm::StringRef":$interface,
- "::mlir::ArrayAttr":$workgroup_size,
- "::mlir::IntegerAttr":$workgroup_local_memory
- ), [{
- build($_builder, $_state, nullptr, sym_name, ordinal, interface,
- workgroup_size, workgroup_local_memory, 0);
- }]>,
- OpBuilder<(ins
"::mlir::StringAttr":$sym_name,
"::mlir::IntegerAttr":$ordinal,
"::mlir::FlatSymbolRefAttr":$interface,
diff --git a/iree/compiler/Dialect/HAL/IR/test/command_buffer_ops.mlir b/iree/compiler/Dialect/HAL/IR/test/command_buffer_ops.mlir
index b2db2d1..3438f3c 100644
--- a/iree/compiler/Dialect/HAL/IR/test/command_buffer_ops.mlir
+++ b/iree/compiler/Dialect/HAL/IR/test/command_buffer_ops.mlir
@@ -135,8 +135,7 @@
hal.executable @ex {
hal.executable.variant @backend, target = <"backend", "format"> {
- hal.executable.entry_point @entry0 attributes {
- interface = @interface,
+ hal.executable.entry_point @entry0 interface(@interface) {
ordinal = 0 : index
}
}
@@ -164,8 +163,7 @@
hal.executable @ex {
hal.executable.variant @backend, target = <"backend", "format"> {
- hal.executable.entry_point @entry0 attributes {
- interface = @interface,
+ hal.executable.entry_point @entry0 interface(@interface) {
ordinal = 0 : index
}
}
diff --git a/iree/compiler/Dialect/HAL/IR/test/executable_ops.mlir b/iree/compiler/Dialect/HAL/IR/test/executable_ops.mlir
index 7ec0088..7599fc6 100644
--- a/iree/compiler/Dialect/HAL/IR/test/executable_ops.mlir
+++ b/iree/compiler/Dialect/HAL/IR/test/executable_ops.mlir
@@ -6,12 +6,10 @@
hal.executable @ex {
// CHECK: hal.executable.variant public @backend, target = #executable_target_format
hal.executable.variant @backend, target = #executable_target_format {
- // CHECK-DAG: hal.executable.entry_point public @entry0 attributes {
- // CHECK-SAME: interface = @interface
+ // CHECK-DAG: hal.executable.entry_point public @entry0 interface(@interface) {
// CHECK-SAME: ordinal = 0 : index
// CHECK-SAME: workgroup_size = [4 : index, 1 : index, 1 : index]
- hal.executable.entry_point @entry0 attributes {
- interface = @interface,
+ hal.executable.entry_point @entry0 interface(@interface) {
ordinal = 0 : index,
workgroup_size = [4 : index, 1 : index, 1 : index]
}
@@ -40,12 +38,10 @@
hal.executable @ex_with_workgroup_count_region {
// CHECK: hal.executable.variant public @backend, target = #executable_target_format
hal.executable.variant @backend, target = #executable_target_format {
- // CHECK-DAG: hal.executable.entry_point public @entry0 attributes {
- // CHECK-SAME: interface = @interface
+ // CHECK-DAG: hal.executable.entry_point public @entry0 interface(@interface) {
// CHECK-SAME: ordinal = 0 : index
// CHECK-SAME: workgroup_size = [4 : index, 1 : index, 1 : index]
- hal.executable.entry_point @entry0 attributes {
- interface = @interface,
+ hal.executable.entry_point @entry0 interface(@interface) {
ordinal = 0 : index,
workgroup_size = [4 : index, 1 : index, 1 : index]
} {
diff --git a/iree/compiler/Dialect/HAL/Target/TargetBackend.h b/iree/compiler/Dialect/HAL/Target/TargetBackend.h
index bb45c4b..6caf416 100644
--- a/iree/compiler/Dialect/HAL/Target/TargetBackend.h
+++ b/iree/compiler/Dialect/HAL/Target/TargetBackend.h
@@ -136,8 +136,7 @@
// hal.interface.binding @arg1, set=0, binding=1, ...
// }
// hal.executable.variant @target, target="target-backend" {
- // hal.executable.entry_point @main attributes {
- // interface = @main_io,
+ // hal.executable.entry_point @main interface(@main_io) {
// ordinal = 0 : index
// }
// module { ... }
diff --git a/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir b/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir
index 340fd9d..f0f60a4 100644
--- a/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir
+++ b/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir
@@ -9,7 +9,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_0 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @dispatch_0 interface(@io) {ordinal = 0 : index}
builtin.module {
vm.module @module {
vm.func @dispatch_0() {
@@ -27,7 +27,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_1 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @dispatch_1 interface(@io) {ordinal = 0 : index}
builtin.module {
vm.module @module {
vm.func @dispatch_1() {
@@ -46,7 +46,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_2 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @dispatch_2 interface(@io) {ordinal = 0 : index}
builtin.module {
vm.module @module {
vm.func @dispatch_2() {
@@ -94,9 +94,9 @@
// CHECK-NEXT: hal.interface.binding public @ret0, set=0, binding=2, type="StorageBuffer"
// CHECK-NEXT: }
// CHECK-NEXT: hal.executable.variant public @vmvx_bytecode_fb, target = #executable_target_vmvx_bytecode_fb {
-// CHECK-NEXT: hal.executable.entry_point public @dispatch_0 attributes {interface = @io_0, ordinal = 0 : index}
-// CHECK-NEXT: hal.executable.entry_point public @dispatch_1 attributes {interface = @io_0, ordinal = 1 : index}
-// CHECK-NEXT: hal.executable.entry_point public @dispatch_2 attributes {interface = @io_1, ordinal = 2 : index}
+// CHECK-NEXT: hal.executable.entry_point public @dispatch_0 interface(@io_0) {ordinal = 0 : index}
+// CHECK-NEXT: hal.executable.entry_point public @dispatch_1 interface(@io_0) {ordinal = 1 : index}
+// CHECK-NEXT: hal.executable.entry_point public @dispatch_2 interface(@io_1) {ordinal = 2 : index}
// CHECK-NEXT: module {
// CHECK-NEXT: vm.module public @linked_module {
// CHECK-NEXT: vm.func @dispatch_0() {
@@ -141,7 +141,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_0 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @dispatch_0 interface(@io) {ordinal = 0 : index}
builtin.module {
vm.module @module {
vm.func @dispatch_0() {
@@ -162,7 +162,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_1 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @dispatch_1 interface(@io) {ordinal = 0 : index}
builtin.module {
vm.module @module {
vm.func @dispatch_1() {
@@ -209,8 +209,8 @@
// CHECK-NEXT: hal.interface.binding public @ret0, set=0, binding=2, type="StorageBuffer"
// CHECK-NEXT: }
// CHECK-NEXT: hal.executable.variant public @vmvx_bytecode_fb, target = #executable_target_vmvx_bytecode_fb {
-// CHECK-NEXT: hal.executable.entry_point public @dispatch_0 attributes {interface = @io_0, ordinal = 0 : index}
-// CHECK-NEXT: hal.executable.entry_point public @dispatch_1 attributes {interface = @io_1, ordinal = 1 : index}
+// CHECK-NEXT: hal.executable.entry_point public @dispatch_0 interface(@io_0) {ordinal = 0 : index}
+// CHECK-NEXT: hal.executable.entry_point public @dispatch_1 interface(@io_1) {ordinal = 1 : index}
// CHECK-NEXT: module {
// CHECK-NEXT: vm.module public @linked_module {
// CHECK-NEXT: vm.func @dispatch_0() {
@@ -263,7 +263,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_0 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @dispatch_0 interface(@io) {ordinal = 0 : index}
builtin.module {
vm.module @module {}
}
@@ -276,7 +276,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_1 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @dispatch_1 interface(@io) {ordinal = 0 : index}
builtin.module {
vm.module @module {}
}
@@ -289,7 +289,7 @@
hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_2 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @dispatch_2 interface(@io) {ordinal = 0 : index}
builtin.module {
vm.module @module {}
}
@@ -320,7 +320,7 @@
hal.executable private @dispatch_0 {
hal.interface @io {}
hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_0 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @dispatch_0 interface(@io) {ordinal = 0 : index}
builtin.module {
vm.module @module {
vm.rodata public @rodata_a dense<[0]> : tensor<1xi32>
@@ -343,7 +343,7 @@
hal.executable private @dispatch_1 {
hal.interface @io {}
hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_1 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @dispatch_1 interface(@io) {ordinal = 0 : index}
builtin.module {
vm.module @module {
// Conflict with a public symbol, this should be renamed when linked.
diff --git a/iree/compiler/Dialect/HAL/Target/VMVX/test/smoketest.mlir b/iree/compiler/Dialect/HAL/Target/VMVX/test/smoketest.mlir
index 3762ac9..7313ed1 100644
--- a/iree/compiler/Dialect/HAL/Target/VMVX/test/smoketest.mlir
+++ b/iree/compiler/Dialect/HAL/Target/VMVX/test/smoketest.mlir
@@ -43,8 +43,7 @@
// CHECK-NEXT: hal.interface.binding public @s0b2, set=0, binding=2, type="StorageBuffer"
// CHECK-NEXT: }
// CHECK-NEXT: hal.executable.variant public @vmvx_bytecode_fb, target = #executable_target_vmvx_bytecode_fb {
-// CHECK-NEXT: hal.executable.entry_point public @add_dispatch_0 attributes {
-// CHECK-SAME: interface = @io,
+// CHECK-NEXT: hal.executable.entry_point public @add_dispatch_0 interface(@io) {
// CHECK-SAME: ordinal = 0 : index
// CHECK-SAME: }
// CHECK: module attributes {vm.toplevel} {
diff --git a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/linking.mlir b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/linking.mlir
index ac44fd8..7865ad5 100644
--- a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/linking.mlir
+++ b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/test/linking.mlir
@@ -11,7 +11,7 @@
hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb {
- hal.executable.entry_point @call_dispatch_0 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @call_dispatch_0 interface(@io) {ordinal = 0 : index}
builtin.module {
spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> {
spv.func @call_dispatch_0() "None" {
@@ -34,7 +34,7 @@
hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb {
- hal.executable.entry_point @call_dispatch_1 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @call_dispatch_1 interface(@io) {ordinal = 0 : index}
builtin.module {
spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> {
spv.func @call_dispatch_1() "None" {
@@ -57,7 +57,7 @@
hal.interface.binding @s0b1_rw_external, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb {
- hal.executable.entry_point @call_dispatch_2 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @call_dispatch_2 interface(@io) {ordinal = 0 : index}
builtin.module {
spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> {
spv.func @call_dispatch_2() "None" {
@@ -80,7 +80,7 @@
hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb {
- hal.executable.entry_point @call_dispatch_3 attributes {interface = @io, ordinal = 0 : index} {
+ hal.executable.entry_point @call_dispatch_3 interface(@io) {ordinal = 0 : index} {
^bb0(%arg0: index, %arg1: index, %arg2: index): // no predecessors
%c1 = arith.constant 1 : index
%c56 = arith.constant 56 : index
@@ -110,7 +110,7 @@
hal.interface.binding @s0b2_xw_external, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb {
- hal.executable.entry_point @call_dispatch_4 attributes {interface = @io, ordinal = 0 : index}
+ hal.executable.entry_point @call_dispatch_4 interface(@io) {ordinal = 0 : index}
builtin.module {
spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> {
spv.func @call_dispatch_4() "None" {
@@ -137,9 +137,9 @@
// CHECK-NEXT: hal.interface.binding public @s0b2_xw_external, set=0, binding=2, type="StorageBuffer"
// CHECK-NEXT: }
// CHECK-NEXT: hal.executable.variant public @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb {
-// CHECK-NEXT: hal.executable.entry_point public @call_dispatch_1 attributes {interface = @io_0, ordinal = 0 : index}
-// CHECK-NEXT: hal.executable.entry_point public @call_dispatch_3 attributes {interface = @io_0, ordinal = 1 : index}
-// CHECK-NEXT: hal.executable.entry_point public @call_dispatch_4 attributes {interface = @io_0, ordinal = 2 : index}
+// CHECK-NEXT: hal.executable.entry_point public @call_dispatch_1 interface(@io_0) {ordinal = 0 : index}
+// CHECK-NEXT: hal.executable.entry_point public @call_dispatch_3 interface(@io_0) {ordinal = 1 : index}
+// CHECK-NEXT: hal.executable.entry_point public @call_dispatch_4 interface(@io_0) {ordinal = 2 : index}
// CHECK-NEXT: module {
// CHECK-NEXT: spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> {
// CHECK-NEXT: spv.func @call_dispatch_1() "None" {
@@ -168,8 +168,8 @@
// CHECK-NEXT: hal.interface.binding public @s0b1_rw_external, set=0, binding=1, type="StorageBuffer"
// CHECK-NEXT: }
// CHECK-NEXT: hal.executable.variant public @vulkan_spirv_fb, target = #executable_target_vulkan_spirv_fb {
-// CHECK-NEXT: hal.executable.entry_point public @call_dispatch_0 attributes {interface = @io_0, ordinal = 0 : index}
-// CHECK-NEXT: hal.executable.entry_point public @call_dispatch_2 attributes {interface = @io_0, ordinal = 1 : index}
+// CHECK-NEXT: hal.executable.entry_point public @call_dispatch_0 interface(@io_0) {ordinal = 0 : index}
+// CHECK-NEXT: hal.executable.entry_point public @call_dispatch_2 interface(@io_0) {ordinal = 1 : index}
// CHECK-NEXT: module {
// CHECK-NEXT: spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> {
// CHECK-NEXT: spv.func @call_dispatch_0() "None" {
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir b/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir
index 99603f3..eea1434 100644
--- a/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir
+++ b/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir
@@ -21,8 +21,7 @@
hal.interface.binding public @s0b2_wo, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant public @embedded_elf_x86_64, target = #executable_target_embedded_elf_x86_64_ {
- hal.executable.entry_point public @dispatch attributes {
- interface = @io,
+ hal.executable.entry_point public @dispatch interface(@io) {
ordinal = 0 : index,
translation.info = #translation
} {
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir b/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir
index 85e6aed..2bed1a2 100644
--- a/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir
+++ b/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir
@@ -117,18 +117,15 @@
hal.interface.binding @s0b2, set=0, binding=2, type="StorageBuffer"
}
hal.executable.variant @vmvx, target = <"vmvx", "vmvx-bytecode-fb"> {
- hal.executable.entry_point @entry0 attributes {
- interface = @interface0,
+ hal.executable.entry_point @entry0 interface(@interface0) {
ordinal = 0 : index,
workgroup_size = [32 : index, 1 : index, 1 : index]
}
- hal.executable.entry_point @entry0_alias attributes {
- interface = @interface0,
+ hal.executable.entry_point @entry0_alias interface(@interface0) {
ordinal = 0 : index,
workgroup_size = [32 : index, 1 : index, 1 : index]
}
- hal.executable.entry_point @entry1 attributes {
- interface = @interface1,
+ hal.executable.entry_point @entry1 interface(@interface1) {
ordinal = 1 : index,
workgroup_size = [32 : index, 1 : index, 1 : index]
}
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/resolve_entry_point_ordinals.mlir b/iree/compiler/Dialect/HAL/Transforms/test/resolve_entry_point_ordinals.mlir
index 6e88a4f..127b007 100644
--- a/iree/compiler/Dialect/HAL/Transforms/test/resolve_entry_point_ordinals.mlir
+++ b/iree/compiler/Dialect/HAL/Transforms/test/resolve_entry_point_ordinals.mlir
@@ -6,8 +6,7 @@
hal.interface.binding @s0b1, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @target, target = <"vmvx", "vmvx-bytecode-fb"> {
- hal.executable.entry_point @entry attributes {
- interface = @interface,
+ hal.executable.entry_point @entry interface(@interface) {
ordinal = 0 : index,
workgroup_size = [32 : index, 1 : index, 1 : index]
}
@@ -62,8 +61,7 @@
hal.interface.binding @s0b1, set=0, binding=1, type="StorageBuffer"
}
hal.executable.variant @target, target = <"vmvx", "vmvx-bytecode-fb"> {
- hal.executable.entry_point @entry attributes {
- interface = @interface,
+ hal.executable.entry_point @entry interface(@interface) {
ordinal = 0 : index,
workgroup_size = [32 : index, 1 : index, 1 : index]
}