Integrate LLVM at llvm/llvm-project@1cbf8e89b54d

Updates LLVM usage to match
[1cbf8e89b54d](https://github.com/llvm/llvm-project/commit/1cbf8e89b54d)

PiperOrigin-RevId: 341867974
diff --git a/SUBMODULE_VERSIONS b/SUBMODULE_VERSIONS
index a1dc4c9..38e0fdd 100644
--- a/SUBMODULE_VERSIONS
+++ b/SUBMODULE_VERSIONS
@@ -6,7 +6,7 @@
 4fb0ff7069bd88ee85902f4d0bb62794e5f6d021 third_party/flatcc
 f2fb48c3b3d79a75a88a99fba6576b25d42ec528 third_party/googletest
 e3f662922d8340a7fd46a4f75cad0e72163bf219 third_party/llvm-bazel
-f147f59cd377a6be68e5ca5c343eb11df8e7ee6f third_party/llvm-project
+1cbf8e89b54de939420d53d7a528bec6fbaf0a55 third_party/llvm-project
 17b12a4481daa150e2d1ea3ada086b551b856707 third_party/marl
 9e970e18057e80e8af07f96d73714e395bac98d1 third_party/mlir-emitc
 d8c7ee00a687ac369e62e2032514a93a9b413502 third_party/pybind11
diff --git a/iree/compiler/Conversion/HLOToLinalg/test/pipeline_test.mlir b/iree/compiler/Conversion/HLOToLinalg/test/pipeline_test.mlir
index 402166f..15c4ec5 100644
--- a/iree/compiler/Conversion/HLOToLinalg/test/pipeline_test.mlir
+++ b/iree/compiler/Conversion/HLOToLinalg/test/pipeline_test.mlir
@@ -87,7 +87,7 @@
 //   CHECK-DAG:   %[[ARG1:.+]] = iree.placeholder for "interface buffer" {binding = @legacy_io::@arg1} : memref<512xf32>
 //   CHECK-DAG:   %[[RET0:.+]] = iree.placeholder for "interface buffer" {binding = @legacy_io::@ret0} : memref<4x512xf32>
 //       CHECK:   linalg.generic
-//  CHECK-SAME:     ins(%[[ARG0]], %[[ARG1]], %[[ARG1]], %[[ARG1]] :
+//  CHECK-SAME:     ins(%[[ARG0]], %[[ARG1]] :
 //  CHECK-SAME:     outs(%[[RET0]] :
 
 // -----
diff --git a/iree/compiler/Conversion/HLOToLinalg/test/pw_linalg_fusion.mlir b/iree/compiler/Conversion/HLOToLinalg/test/pw_linalg_fusion.mlir
deleted file mode 100644
index b65cf46..0000000
--- a/iree/compiler/Conversion/HLOToLinalg/test/pw_linalg_fusion.mlir
+++ /dev/null
@@ -1,76 +0,0 @@
-// RUN: iree-opt -split-input-file -iree-codegen-hlo-to-linalg-on-tensors -linalg-fusion-for-tensor-ops %s | IreeFileCheck %s
-
-// CHECK-LABEL: @pw_fusion_two
-func @pw_fusion_two(%arg0: tensor<4x8xi32>, %arg1: tensor<4x8xi32>, %arg2 : tensor<4x8xi32>) -> tensor<4x8xi32> {
-  // CHECK: linalg.generic
-  // CHECK: ^{{[a-zA-Z0-9$._-]+}}
-  // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG2:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK: %[[TEMP:[a-zA-Z0-9$._-]+]] = muli %[[ARG0]], %[[ARG1]]
-  // CHECK: addi %[[TEMP]], %[[ARG2]]
-  // CHECK-NOT: linalg.generic
-  %4 = "mhlo.multiply"(%arg0, %arg1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  %5 = "mhlo.add"(%4, %arg2) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  return %5 : tensor<4x8xi32>
-}
-
-// -----
-
-// CHECK-LABEL: @pw_fusion_three
-func @pw_fusion_three(%arg0: tensor<4x8xi32>, %arg1: tensor<4x8xi32>, %arg2 : tensor<4x8xi32>, %arg3: tensor<4x8xi32>) -> tensor<4x8xi32> {
-  // CHECK: linalg.generic
-  // CHECK: ^{{[a-zA-Z0-9$._-]+}}
-  // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG2:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG3:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK: %[[TEMP1:[a-zA-Z0-9$._-]+]] = muli %[[ARG0]], %[[ARG1]]
-  // CHECK: %[[TEMP2:[a-zA-Z0-9$._-]+]] = addi %[[TEMP1]], %[[ARG2]]
-  // CHECK: subi %[[TEMP2]], %[[ARG3]]
-  // CHECK-NOT: linalg.generic
-  %4 = "mhlo.multiply"(%arg0, %arg1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  %5 = "mhlo.add"(%4, %arg2) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  %6 = "mhlo.subtract"(%5, %arg3) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  return %6: tensor<4x8xi32>
-}
-
-// -----
-
-// CHECK-LABEL: @pw_fusion_dag
-func @pw_fusion_dag(%arg0: tensor<4x8xi32>, %arg1: tensor<4x8xi32>, %arg2 : tensor<4x8xi32>, %arg3: tensor<4x8xi32>) -> tensor<4x8xi32> {
-  // CHECK: linalg.generic
-  // CHECK: ^{{[a-zA-Z0-9$._-]+}}
-  // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG2:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG3:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-DAG: %[[TEMP1:[a-zA-Z0-9$._-]+]] = muli %[[ARG0]], %[[ARG1]]
-  // CHECK-DAG: %[[TEMP2:[a-zA-Z0-9$._-]+]] = addi %[[ARG2]], %[[ARG3]]
-  // CHECK: subi %[[TEMP1]], %[[TEMP2]]
-  // CHECK-NOT: linalg.generic
-  %4 = "mhlo.multiply"(%arg0, %arg1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  %5 = "mhlo.add"(%arg2, %arg3) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  %6 = "mhlo.subtract"(%4, %5) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  return %6: tensor<4x8xi32>
-}
-
-// -----
-
-// CHECK-LABEL: @pw_fusion_dag2
-func @pw_fusion_dag2(%arg0: tensor<4x8xi32>, %arg1: tensor<4x8xi32>, %arg2 : tensor<4x8xi32>) -> tensor<4x8xi32> {
-  // CHECK: linalg.generic
-  // CHECK: ^{{[a-zA-Z0-9$._-]+}}
-  // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG2:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-SAME: %[[ARG3:[a-zA-Z0-9$._-]+]]: i32
-  // CHECK-DAG: %[[TEMP1:[a-zA-Z0-9$._-]+]] = muli %[[ARG0]], %[[ARG1]]
-  // CHECK-DAG: %[[TEMP2:[a-zA-Z0-9$._-]+]] = addi %[[ARG2]], %[[ARG3]]
-  // CHECK: subi %[[TEMP1]], %[[TEMP2]]
-  // CHECK-NOT: linalg.generic
-  %3 = "mhlo.multiply"(%arg0, %arg1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  %4 = "mhlo.add"(%arg0, %arg2) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  %5 = "mhlo.subtract"(%3, %4) :  (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-  return %5: tensor<4x8xi32>
-}
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/convert_to_gpu.mlir b/iree/compiler/Conversion/LinalgToSPIRV/test/convert_to_gpu.mlir
index c4419d8..a63c723 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/convert_to_gpu.mlir
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/convert_to_gpu.mlir
@@ -126,7 +126,7 @@
 //       CHECK:       load %{{.+}}[%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]]
 //       CHECK:       store %{{.+}}[%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]]
 
-//       CHECK: func @[[NUM_WORKGROUPS_FN]]
+//       CHECK: func private @[[NUM_WORKGROUPS_FN]]
 //   CHECK-DAG:   %[[C1:.+]] = constant 1 : index
 //   CHECK-DAG:   %[[C12:.+]] = constant 12 : index
 //       CHECK:   return %[[C12]], %[[C1]], %[[C1]]
@@ -179,7 +179,7 @@
 //  CHECK-NEXT:     store
 //  CHECK-NEXT:     return
 
-//       CHECK: func @[[NUM_WORKGROUPS_FN]]
+//       CHECK: func private @[[NUM_WORKGROUPS_FN]]
 //   CHECK-DAG:   %[[C1:.+]] = constant 1 : index
 //       CHECK:   return %[[C1]], %[[C1]], %[[C1]]
 
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/linalg_tile_and_fuse.mlir b/iree/compiler/Conversion/LinalgToSPIRV/test/linalg_tile_and_fuse.mlir
index 1f27ed9..a115363 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/linalg_tile_and_fuse.mlir
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/linalg_tile_and_fuse.mlir
@@ -84,7 +84,7 @@
 //       CHECK:   linalg.conv
 //  CHECK-SAME:     %[[ARG0]], %[[VIEW1]], %[[VIEW2]]
 //  CHECK-SAME:     "workgroup"
-//       CHECK: func @[[NUM_WORKGROUPS_FN]]
+//       CHECK: func private @[[NUM_WORKGROUPS_FN]]
 //   CHECK-DAG:   %[[C0:.+]] = constant 0
 //   CHECK-DAG:   %[[C1:.+]] = constant 1
 //   CHECK-DAG:   %[[C2:.+]] = constant 2
@@ -158,7 +158,7 @@
 //  CHECK-SAME:     "workgroup"
 //  CHECK-SAME:     ins(%[[VIEW0]], %[[VIEW1]]
 //  CHECK-SAME:     outs(%[[VIEW2]]
-//       CHECK: func @[[NUM_WORKGROUPS_FN]]
+//       CHECK: func private @[[NUM_WORKGROUPS_FN]]
 //   CHECK-DAG:   %[[C8:.+]] = constant 8 : index
 //   CHECK-DAG:   %[[C7:.+]] = constant 7 : index
 //   CHECK-DAG:   %[[C0:.+]] = constant 0 : index
@@ -222,7 +222,7 @@
 //       CHECK:   linalg.pooling_max
 //  CHECK-SAME:     %[[VIEW0]], %[[ARG1]], %[[VIEW2]]
 //  CHECK-SAME:     "workgroup"
-//       CHECK: func @[[NUM_WORKGROUPS_FN]]
+//       CHECK: func private @[[NUM_WORKGROUPS_FN]]
 //   CHECK-DAG:   %[[C0:.+]] = constant 0
 //   CHECK-DAG:   %[[C1:.+]] = constant 1
 //   CHECK-DAG:   %[[C32:.+]] = constant 32
@@ -288,7 +288,7 @@
 //       CHECK:   linalg.pooling_max
 //  CHECK-SAME:     %[[VIEW0]], %[[ARG1]], %[[VIEW2]]
 //  CHECK-SAME:     "workgroup"
-//       CHECK: func @[[NUM_WORKGROUPS_FN]]
+//       CHECK: func private @[[NUM_WORKGROUPS_FN]]
 //   CHECK-DAG:   %[[C1:.+]] = constant 1
 //   CHECK-DAG:   %[[C32:.+]] = constant 32
 //   CHECK-DAG:   %[[C31:.+]] = constant 31
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/split_dispatch_function.mlir b/iree/compiler/Conversion/LinalgToSPIRV/test/split_dispatch_function.mlir
index c543614..7f7ad16 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/split_dispatch_function.mlir
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/split_dispatch_function.mlir
@@ -166,7 +166,7 @@
 //      CHECK:   linalg.conv(%[[IN2]], %[[TS1]], %[[TS2]])
 //      CHECK:   return
 
-//      CHECK: func @[[NUM_WORKGROUPS_FN2]]
+//      CHECK: func private @[[NUM_WORKGROUPS_FN2]]
 
 //      CHECK: func @kernel_dispatch_1()
 // CHECK-SAME: {hal.num_workgroups_fn = @[[NUM_WORKGROUPS_FN1:.+]]}
@@ -176,7 +176,7 @@
 //      CHECK:     scf.yield
 //      CHECK:   return
 
-//      CHECK: func @[[NUM_WORKGROUPS_FN1]]
+//      CHECK: func private @[[NUM_WORKGROUPS_FN1]]
 
 //      CHECK: func @kernel_dispatch_0()
 // CHECK-SAME: {hal.num_workgroups_fn = @[[NUM_WORKGROUPS_FN0:.+]]}
@@ -188,7 +188,7 @@
 //      CHECK:   linalg.fill(%[[TS]], %[[ZERO]])
 //      CHECK:   return
 
-//      CHECK: func @[[NUM_WORKGROUPS_FN0]]
+//      CHECK: func private @[[NUM_WORKGROUPS_FN0]]
 
   func @kernel() attributes {hal.num_workgroups_fn = @kernel__num_workgroups__} {
     %cst = constant 0.000000e+00 : f32
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/identify_constant_pools.mlir b/iree/compiler/Dialect/HAL/Transforms/test/identify_constant_pools.mlir
index 3c58678..56a3474 100644
--- a/iree/compiler/Dialect/HAL/Transforms/test/identify_constant_pools.mlir
+++ b/iree/compiler/Dialect/HAL/Transforms/test/identify_constant_pools.mlir
@@ -27,7 +27,7 @@
 
 // CHECK: flow.variable @variable_0 mutable init(@variable_0_initializer)
 flow.variable @variable_0 mutable dense<3.0> : tensor<128xf32>
-// CHECK-NEXT: func @variable_0_initializer() -> tensor<128xf32>
+// CHECK-NEXT: func private @variable_0_initializer() -> tensor<128xf32>
 // CHECK-NEXT:   [[CONST:%.+]] = hal.constant_pool.load @_const_pool_init::@variable_0 : tensor<128xf32>
 // CHECK-NEXT:   return [[CONST]] : tensor<128xf32>
 // CHECK-NEXT: }
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/materialize_constant_pool_buffers.mlir b/iree/compiler/Dialect/HAL/Transforms/test/materialize_constant_pool_buffers.mlir
index 54b5ae2..3232083 100644
--- a/iree/compiler/Dialect/HAL/Transforms/test/materialize_constant_pool_buffers.mlir
+++ b/iree/compiler/Dialect/HAL/Transforms/test/materialize_constant_pool_buffers.mlir
@@ -10,7 +10,7 @@
 }
 
 //      CHECK: hal.variable @dense_variable_init_storage_buffer init(@dense_variable_init_storage_buffer_initializer) : !hal.buffer
-// CHECK-NEXT: func @dense_variable_init_storage_buffer_initializer() -> !hal.buffer
+// CHECK-NEXT: func private @dense_variable_init_storage_buffer_initializer() -> !hal.buffer
 //      CHECK: [[STORAGE:%.+]] = hal.constant_storage.lookup @dense_variable_init::@_storage : !iree.byte_buffer
 //      CHECK: = hal.allocator.map {{.+}} [[STORAGE]][%c0, %c768] : !iree.byte_buffer -> !hal.buffer
 
@@ -25,7 +25,7 @@
 }
 
 //      CHECK: hal.variable @splat_variable_init_splats init(@splat_variable_init_splats_initializer) : !hal.buffer
-// CHECK-NEXT: func @splat_variable_init_splats_initializer() -> !hal.buffer
+// CHECK-NEXT: func private @splat_variable_init_splats_initializer() -> !hal.buffer
 //      CHECK: [[BUFFER:%.+]] = hal.allocator.allocate {{.+}} %c64 : !hal.buffer
 //      CHECK: hal.buffer.fill [[BUFFER]], %c0, %c4, %c1065353216_i32
 //      CHECK: hal.buffer.fill [[BUFFER]], %c32, %c32_0, %c1234567890_i32
@@ -47,15 +47,15 @@
 }
 
 //      CHECK: hal.variable @pool_storage0_buffer init(@pool_storage0_buffer_initializer) : !hal.buffer
-// CHECK-NEXT: func @pool_storage0_buffer_initializer() -> !hal.buffer
+// CHECK-NEXT: func private @pool_storage0_buffer_initializer() -> !hal.buffer
 //      CHECK: [[STORAGE:%.+]] = hal.constant_storage.lookup @pool::@_storage0 : !iree.byte_buffer
 //      CHECK: = hal.allocator.map {{.+}} [[STORAGE]][%c0, %c16] : !iree.byte_buffer -> !hal.buffer
 
 //      CHECK: hal.variable @pool_storage1_buffer init(@pool_storage1_buffer_initializer) : !hal.buffer
-// CHECK-NEXT: func @pool_storage1_buffer_initializer() -> !hal.buffer
+// CHECK-NEXT: func private @pool_storage1_buffer_initializer() -> !hal.buffer
 
 //      CHECK: hal.variable @pool_splats init(@pool_splats_initializer) : !hal.buffer
-// CHECK-NEXT: func @pool_splats_initializer() -> !hal.buffer
+// CHECK-NEXT: func private @pool_splats_initializer() -> !hal.buffer
 //      CHECK: [[BUFFER:%.+]] = hal.allocator.allocate %allocator, "HostVisible|DeviceVisible|DeviceLocal", "Constant|Transfer|Mapping|Dispatch", %c64 : !hal.buffer
 //      CHECK: hal.buffer.fill [[BUFFER]], %c0, %c4, %c1065353216_i32
 //      CHECK: hal.buffer.fill [[BUFFER]], %c32, %c32_0, %c1234567890_i32
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/materialize_interfaces.mlir b/iree/compiler/Dialect/HAL/Transforms/test/materialize_interfaces.mlir
index 499d36f..e2640e7 100644
--- a/iree/compiler/Dialect/HAL/Transforms/test/materialize_interfaces.mlir
+++ b/iree/compiler/Dialect/HAL/Transforms/test/materialize_interfaces.mlir
@@ -24,7 +24,7 @@
     // CHECK-NEXT:   hal.interface.store.tensor %[[RET0]], @legacy_io::@ret0, offset = %[[ZERO]] : tensor<4xf32>
     // CHECK-NEXT:   return
     // CHECK-NEXT: }
-    // CHECK-NEXT: func @simpleMath_rgn_dispatch_0_impl
+    // CHECK-NEXT: func private @simpleMath_rgn_dispatch_0_impl
     func @simpleMath_rgn_dispatch_0(%arg0: tensor<4xf32>) -> tensor<4xf32> {
       %0 = mhlo.add %arg0, %arg0 : tensor<4xf32>
       return %0 : tensor<4xf32>
@@ -64,7 +64,7 @@
     // CHECK-NEXT:   hal.interface.store.tensor %[[RET0_I8]], @legacy_io::@ret0, offset = %[[ZERO]] : tensor<4xi8>
     // CHECK-NEXT:   return
     // CHECK-NEXT: }
-    // CHECK-NEXT: func @bools_rgn_dispatch_0_impl(%arg0: tensor<4xi1>, %arg1: tensor<4xi1>) -> tensor<4xi1>
+    // CHECK-NEXT: func private @bools_rgn_dispatch_0_impl(%arg0: tensor<4xi1>, %arg1: tensor<4xi1>) -> tensor<4xi1>
     func @bools_rgn_dispatch_0(%arg0: tensor<4xi1>, %arg1: tensor<4xi1>) -> tensor<4xi1> {
       %0 = mhlo.and %arg0, %arg1 : tensor<4xi1>
       %c = mhlo.constant dense<[false, false, true, false]> : tensor<4xi1>
@@ -96,7 +96,7 @@
     //  CHECK-NEXT:   hal.interface.store.tensor %[[RET0]], @legacy_io::@ret0, offset = %[[ZERO]] : tensor<7x?x10xf32>
     //  CHECK-NEXT:   return
     //  CHECK-NEXT: }
-    //  CHECK-NEXT: func @entry_impl
+    //  CHECK-NEXT: func private @entry_impl
     func @entry(%arg0: tensor<?x7x10xf32>, %arg1: index, %arg2: index) -> tensor<7x?x10xf32> {
       %0 = shapex.make_ranked_shape %arg1 : (index) -> !shapex.ranked_shape<[?,7,10]>
       %1 = shapex.make_ranked_shape %arg2 : (index) -> !shapex.ranked_shape<[7,?,10]>
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir b/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir
index 590df15..aff893c 100644
--- a/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir
+++ b/iree/compiler/Dialect/HAL/Transforms/test/materialize_resource_caches.mlir
@@ -1,7 +1,7 @@
 // RUN: iree-opt -split-input-file -iree-hal-materialize-resource-caches %s -iree-hal-target-backends=vmla | IreeFileCheck %s
 
 //      CHECK: hal.variable @_descriptor_set_layout_0 init(@_descriptor_set_layout_0_initializer) : !hal.descriptor_set_layout
-// CHECK-NEXT: func @_descriptor_set_layout_0_initializer() -> !hal.descriptor_set_layout attributes {sym_visibility = "private"} {
+// CHECK-NEXT: func private @_descriptor_set_layout_0_initializer() -> !hal.descriptor_set_layout {
 // CHECK-NEXT:   %dev = hal.ex.shared_device : !hal.device
 // CHECK-NEXT:   %descriptor_set_layout = hal.descriptor_set_layout.create %dev, "PushOnly", bindings = [#hal.descriptor_set_layout_binding<0, "StorageBuffer", "Read">, #hal.descriptor_set_layout_binding<1, "StorageBuffer", "Write">] : !hal.descriptor_set_layout
 // CHECK-NEXT:   return %descriptor_set_layout : !hal.descriptor_set_layout
@@ -23,7 +23,7 @@
 // CHECK: hal.variable @_descriptor_set_layout_0 init(@_descriptor_set_layout_0_initializer) : !hal.descriptor_set_layout
 
 //      CHECK: hal.variable @_executable_layout_0 init(@_executable_layout_0_initializer) : !hal.executable_layout
-// CHECK-NEXT: func @_executable_layout_0_initializer() -> !hal.executable_layout attributes {sym_visibility = "private"} {
+// CHECK-NEXT: func private @_executable_layout_0_initializer() -> !hal.executable_layout {
 // CHECK-NEXT:   %0 = hal.variable.load @_descriptor_set_layout_0 : !hal.descriptor_set_layout
 // CHECK-NEXT:   %dev = hal.ex.shared_device : !hal.device
 // CHECK-NEXT:   %executable_layout = hal.executable_layout.create %dev, set_layouts = [%0], push_constants = 0 : !hal.executable_layout
@@ -49,7 +49,7 @@
 // CHECK: hal.variable @_descriptor_set_layout_1
 
 //      CHECK: hal.variable @_executable_layout_0 init(@_executable_layout_0_initializer) : !hal.executable_layout
-// CHECK-NEXT: func @_executable_layout_0_initializer() -> !hal.executable_layout attributes {sym_visibility = "private"} {
+// CHECK-NEXT: func private @_executable_layout_0_initializer() -> !hal.executable_layout {
 // CHECK-NEXT:   %0 = hal.variable.load @_descriptor_set_layout_0 : !hal.descriptor_set_layout
 // CHECK-NEXT:   %1 = hal.variable.load @_descriptor_set_layout_1 : !hal.descriptor_set_layout
 // CHECK-NEXT:   %dev = hal.ex.shared_device : !hal.device
@@ -119,7 +119,7 @@
 }
 
 //      CHECK: hal.variable @_executable_cache init(@_executable_cache_initializer) : !hal.executable_cache
-// CHECK-NEXT: func @_executable_cache_initializer
+// CHECK-NEXT: func private @_executable_cache_initializer
 //      CHECK: %[[CACHE:.+]] = hal.executable_cache.create %dev, identifier = "default" : !hal.executable_cache
 // CHECK-NEXT: hal.device.switch(%dev : !hal.device)
 // CHECK-NEXT: #hal.device.match.id<"vmla">(%[[CACHE_CAPTURE:.+]] = %executable_cache_default : !hal.executable_cache) {
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/memoize_device_queries.mlir b/iree/compiler/Dialect/HAL/Transforms/test/memoize_device_queries.mlir
index a081a4c..b6b7339 100644
--- a/iree/compiler/Dialect/HAL/Transforms/test/memoize_device_queries.mlir
+++ b/iree/compiler/Dialect/HAL/Transforms/test/memoize_device_queries.mlir
@@ -1,7 +1,7 @@
 // RUN: iree-opt -split-input-file -iree-hal-memoize-device-queries %s | IreeFileCheck %s
 
 //      CHECK: hal.variable @_device_match_id_0 init(@_device_match_id_0_initializer) : i1
-//      CHECK: func @_device_match_id_0_initializer() -> i1
+//      CHECK: func private @_device_match_id_0_initializer() -> i1
 // CHECK-NEXT:   %[[DEVICE:.+]] = hal.ex.shared_device : !hal.device
 // CHECK-NEXT:   %[[IS_MATCH:.+]] = hal.device.match.id %[[DEVICE]], pattern = ["vulkan-v1.?-*"] : (!hal.device) -> i1
 // CHECK-NEXT:   return %[[IS_MATCH]] : i1
diff --git a/iree/compiler/Dialect/VM/Transforms/test/mark_public_symbols_exported.mlir b/iree/compiler/Dialect/VM/Transforms/test/mark_public_symbols_exported.mlir
index 8bd5256..3310761 100644
--- a/iree/compiler/Dialect/VM/Transforms/test/mark_public_symbols_exported.mlir
+++ b/iree/compiler/Dialect/VM/Transforms/test/mark_public_symbols_exported.mlir
@@ -1,8 +1,7 @@
 // RUN: iree-opt -split-input-file -iree-vm-mark-public-symbols-exported %s | IreeFileCheck %s
 
-// CHECK-LABEL: @private_symbol
-// CHECK-SAME: {sym_visibility = "private"}
-func @private_symbol() attributes {sym_visibility = "private"}
+// CHECK-LABEL: private @private_symbol
+func private @private_symbol()
 
 // CHECK-LABEL: @public_symbol
 // CHECK-SAME: {iree.module.export}
diff --git a/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/convert.mlir b/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/convert.mlir
index 95759c6..8abb0f5 100644
--- a/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/convert.mlir
+++ b/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/convert.mlir
@@ -1,14 +1,14 @@
 // RUN: iree-opt -split-input-file -iree-vmla-conversion %s | IreeFileCheck %s
 
-// CHECK-LABEL: func @basic
-func @basic(%arg0 : tensor<5xf32>) -> (tensor<5xi32>) attributes { sym_visibility = "private" } {
+// CHECK-LABEL: func private @basic
+func private @basic(%arg0 : tensor<5xf32>) -> (tensor<5xi32>) {
   // CHECK: vmla.convert
   %0 = "mhlo.convert"(%arg0) : (tensor<5xf32>) -> tensor<5xi32>
   return %0 : tensor<5xi32>
 }
 
-// CHECK-LABEL: func @noop
-func @noop(%arg0 : tensor<?xf32>) -> (tensor<5xf32>) attributes { sym_visibility = "private" } {
+// CHECK-LABEL: func private @noop
+func private @noop(%arg0 : tensor<?xf32>) -> (tensor<5xf32>) {
   // CHECK: return %arg0
   %0 = "mhlo.convert"(%arg0) : (tensor<?xf32>) -> tensor<5xf32>
   return %0 : tensor<5xf32>
diff --git a/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/sort.mlir b/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/sort.mlir
index 0903793..826cfc8 100644
--- a/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/sort.mlir
+++ b/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/sort.mlir
@@ -1,6 +1,6 @@
 // RUN: iree-opt -split-input-file -iree-vmla-pre-conversion-lowering -iree-vmla-conversion -canonicalize %s | IreeFileCheck %s
 
-func @sort1D(%arg0 : tensor<4xf32>) -> tensor<4xf32> attributes { sym_visibility = "private" } {
+func private @sort1D(%arg0 : tensor<4xf32>) -> tensor<4xf32> {
   // CHECK-DAG: [[C16:%.+]] = constant 16 : index
   // CHECK-DAG: [[RS:%.+]] = shapex.const_ranked_shape : !shapex.ranked_shape<[4]>
   // CHECK-DAG: [[BL:%.+]] = vmla.buffer.alloc byte_length = [[C16]] : !vmla.buffer
@@ -18,8 +18,8 @@
 }
 
 
-// CHECK-LABEL: func @sort2D
-func @sort2D(%arg0 : tensor<4x4xf32>) -> tensor<4x4xf32> attributes { sym_visibility = "private" } {
+// CHECK-LABEL: func private @sort2D
+func private @sort2D(%arg0 : tensor<4x4xf32>) -> tensor<4x4xf32> {
   // CHECK-DAG: [[C64:%.+]] = constant 64 : index
   // CHECK-DAG: [[RS:%.+]] = shapex.const_ranked_shape : !shapex.ranked_shape<[4,4]>
   // CHECK-DAG: [[BL:%.+]] = vmla.buffer.alloc byte_length = [[C64]] : !vmla.buffer
diff --git a/iree/compiler/Dialect/VMLA/Transforms/test/pre_conversion_lowering.mlir b/iree/compiler/Dialect/VMLA/Transforms/test/pre_conversion_lowering.mlir
index 7d9e5ee..366dae2 100644
--- a/iree/compiler/Dialect/VMLA/Transforms/test/pre_conversion_lowering.mlir
+++ b/iree/compiler/Dialect/VMLA/Transforms/test/pre_conversion_lowering.mlir
@@ -16,8 +16,8 @@
 
 // -----
 
-// CHECK-LABEL: func @f
-func @f(%arg0 : tensor<4xf32>) -> tensor<4xf32> attributes { sym_visibility = "private" } {
+// CHECK-LABEL: func private @f
+func private @f(%arg0 : tensor<4xf32>) -> tensor<4xf32> {
   // CHECK-DAG: [[SORT:%.+]] = vmla.sort.pseudo %arg0
   // CHECK-DAG: [[GATHER:%.+]] = "mhlo.torch_index_select"(%arg0, [[SORT]]) {batch_dims = 0 : i64, dim = 0 : i64}
   %sort = "mhlo.sort"(%arg0) ( {
@@ -32,8 +32,8 @@
 
 // -----
 
-// CHECK-LABEL: func @f
-func @f(%arg0 : tensor<4x4xf32>) -> tensor<4x4xf32> attributes { sym_visibility = "private" } {
+// CHECK-LABEL: func private @f
+func private @f(%arg0 : tensor<4x4xf32>) -> tensor<4x4xf32> {
   // CHECK-DAG: [[SORT:%.+]] = vmla.sort.pseudo %arg0
   // CHECK-DAG: [[GATHER:%.+]] = "mhlo.torch_index_select"(%arg0, [[SORT]]) {batch_dims = 1 : i64, dim = 1 : i64}
   %sort = "mhlo.sort"(%arg0) ( {
@@ -66,8 +66,8 @@
 
 // -----
 
-// CHECK-LABEL: func @f
-func @f(%arg0: tensor<8xcomplex<f32>>) -> tensor<8xcomplex<f32>> attributes { sym_visibility = "private" } {
+// CHECK-LABEL: func private @f
+func private @f(%arg0: tensor<8xcomplex<f32>>) -> tensor<8xcomplex<f32>> {
   // CHECK-DAG: [[REAL:%.+]] = "mhlo.real"(%arg0)
   // CHECK-DAG: [[IMAG:%.+]] = "mhlo.imag"(%arg0)
   // CHECK-DAG: [[REAL_OUT:%.+]], [[IMAG_OUT:%.+]] = vmla.fft.pseudo [[REAL]], [[IMAG]]