Integrate LLVM at llvm/llvm-project@93fd30bac334

Updates LLVM usage to match
[93fd30bac334](https://github.com/llvm/llvm-project/commit/93fd30bac334)

PiperOrigin-RevId: 332436415
diff --git a/SUBMODULE_VERSIONS b/SUBMODULE_VERSIONS
index c463090..82aa271 100644
--- a/SUBMODULE_VERSIONS
+++ b/SUBMODULE_VERSIONS
@@ -4,7 +4,7 @@
 a5d9d0f7d368054fd1691aedf1db4116efcc233e third_party/flatbuffers
 4fb0ff7069bd88ee85902f4d0bb62794e5f6d021 third_party/flatcc
 f2fb48c3b3d79a75a88a99fba6576b25d42ec528 third_party/googletest
-c0e7f64685789520ad732d9dd6bf388dc916e518 third_party/llvm-project
+93fd30bac3345fea4f5beba3241f1ef4f2f5f419 third_party/llvm-project
 17b12a4481daa150e2d1ea3ada086b551b856707 third_party/marl
 5dfc6b09104202d662efc7cb93d17c94eb269df7 third_party/mlir-emitc
 d8c7ee00a687ac369e62e2032514a93a9b413502 third_party/pybind11
diff --git a/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp b/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp
index b2e6dc4..c3aa9ad 100644
--- a/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp
+++ b/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp
@@ -282,7 +282,7 @@
     auto B = kernelFunc.getArgument(1);
     auto C = kernelFunc.getArgument(2);
 
-    linalg_matmul(TypeRange{}, ValueRange{A, B, C});
+    linalg_matmul(ValueRange{A, B}, ValueRange{C});
     std_ret();
   }
 
diff --git a/experimental/ModelBuilder/test/TestMatMulVulkan.cpp b/experimental/ModelBuilder/test/TestMatMulVulkan.cpp
index 72c0bcc..a8590ac 100644
--- a/experimental/ModelBuilder/test/TestMatMulVulkan.cpp
+++ b/experimental/ModelBuilder/test/TestMatMulVulkan.cpp
@@ -89,7 +89,7 @@
     Value A = kernelFunc.getArgument(0);
     Value B = kernelFunc.getArgument(1);
     Value C = kernelFunc.getArgument(2);
-    (linalg_matmul(TypeRange{}, ValueRange{A, B, C}));
+    (linalg_matmul(ValueRange{A, B}, ValueRange{C}));
     std_ret();
   }
   // 2. Compile the function, pass in runtime support library
diff --git a/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnBuffers.cpp b/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnBuffers.cpp
index 78e2101..37ed038 100644
--- a/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnBuffers.cpp
+++ b/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnBuffers.cpp
@@ -280,9 +280,7 @@
         rewriter.notifyMatchFailure(op, "failed to zero fill result buffer");
         return failure();
       }
-      rewriter.create<LinalgOpTy>(
-          op.getLoc(), TypeRange{},
-          ValueRange{inputBuffers[0], inputBuffers[1], resultBuffers[0]});
+      rewriter.create<LinalgOpTy>(op.getLoc(), inputBuffers, resultBuffers);
       return success();
     }
     return failure();
@@ -340,9 +338,8 @@
       return rewriter.notifyMatchFailure(op,
                                          "failed to zero fill result buffer");
     }
-    rewriter.create<linalg::BatchMatmulOp>(
-        op.getLoc(), TypeRange{},
-        ValueRange{inputBuffers[0], inputBuffers[1], resultBuffers[0]});
+    rewriter.create<linalg::BatchMatmulOp>(op.getLoc(), inputBuffers,
+                                           resultBuffers);
     return success();
   }
 };
diff --git a/iree/compiler/Conversion/HLOToLinalg/test/dot.mlir b/iree/compiler/Conversion/HLOToLinalg/test/dot.mlir
index 142fce0..021fa90 100644
--- a/iree/compiler/Conversion/HLOToLinalg/test/dot.mlir
+++ b/iree/compiler/Conversion/HLOToLinalg/test/dot.mlir
@@ -6,7 +6,7 @@
     %c0 = constant 0 : index
     %0 = hal.interface.load.tensor @legacy_io::@arg0, offset = %c0 : tensor<2x3xf32>
     %1 = hal.interface.load.tensor @legacy_io::@arg1, offset = %c0 : tensor<3x2xf32>
-    // CHECK: linalg.matmul %{{.+}}, %{{.+}}, %{{.+}} : (memref<2x3xf32>, memref<3x2xf32>, memref<2x2xf32>)
+    // CHECK: linalg.matmul ins(%{{.+}}, %{{.+}} : memref<2x3xf32>, memref<3x2xf32>) outs(%{{.+}} : memref<2x2xf32>)
     %result = "mhlo.dot"(%0, %1) : (tensor<2x3xf32>, tensor<3x2xf32>) -> tensor<2x2xf32>
     hal.interface.store.tensor %result, @legacy_io::@ret0, offset = %c0 : tensor<2x2xf32>
     return
diff --git a/iree/compiler/Conversion/HLOToLinalg/test/dot_general.mlir b/iree/compiler/Conversion/HLOToLinalg/test/dot_general.mlir
index b8ea260..95499bc 100644
--- a/iree/compiler/Conversion/HLOToLinalg/test/dot_general.mlir
+++ b/iree/compiler/Conversion/HLOToLinalg/test/dot_general.mlir
@@ -6,7 +6,7 @@
     %c0 = constant 0 : index
     %0 = hal.interface.load.tensor @legacy_io::@arg0, offset = %c0 : tensor<2x2x3xf32>
     %1 = hal.interface.load.tensor @legacy_io::@arg1, offset = %c0 : tensor<2x3x4xf32>
-    // CHECK: linalg.batch_matmul %{{.+}}, %{{.+}}, %{{.+}} : (memref<2x2x3xf32>, memref<2x3x4xf32>, memref<2x2x4xf32>)
+    // CHECK: linalg.batch_matmul ins(%{{.+}}, %{{.+}} : memref<2x2x3xf32>, memref<2x3x4xf32>) outs(%{{.+}} : memref<2x2x4xf32>)
     %result ="mhlo.dot_general"(%0, %1) {
         dot_dimension_numbers = {
             lhs_batching_dimensions = dense<0> : tensor<1xi64>,
diff --git a/iree/compiler/Conversion/LinalgToLLVM/test/matmul_vectorization.mlir b/iree/compiler/Conversion/LinalgToLLVM/test/matmul_vectorization.mlir
index 123a1e0..8b9e735 100644
--- a/iree/compiler/Conversion/LinalgToLLVM/test/matmul_vectorization.mlir
+++ b/iree/compiler/Conversion/LinalgToLLVM/test/matmul_vectorization.mlir
@@ -1,7 +1,7 @@
 // RUN: iree-opt --iree-codegen-linalg-to-llvm-matmul-vectorization-pass -split-input-file %s | IreeFileCheck %s
 
 func @matmul_128x128x128(%arg0 : memref<128x128xf32>, %arg1: memref<128x128xf32>, %arg2: memref<128x128xf32>) {
-    linalg.matmul %arg0, %arg1, %arg2 : (memref<128x128xf32>, memref<128x128xf32>, memref<128x128xf32>)
+    linalg.matmul ins(%arg0, %arg1 : memref<128x128xf32>, memref<128x128xf32>) outs(%arg2 : memref<128x128xf32>)
     return
 }
 // CHECK-LABEL: func @matmul_128x128x128
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/convert_to_gpu.mlir b/iree/compiler/Conversion/LinalgToSPIRV/test/convert_to_gpu.mlir
index daa92e2..2437491 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/convert_to_gpu.mlir
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/convert_to_gpu.mlir
@@ -275,8 +275,9 @@
       %16 = dim %arg2, %c1 : memref<?x?xf32>
       %17 = affine.min #map1()[%1, %16]
       %18 = subview %arg2[%3, %10] [%15, %17] [1, 1]  : memref<?x?xf32> to memref<?x?xf32, #map3>
-      linalg.matmul  %7, %13, %18 {__internal_linalg_transform__ = "workgroup_numprocs_ge_numiters"}
-        : (memref<?x?xf32, #map3>, memref<?x?xf32, #map3>, memref<?x?xf32, #map3>)
+      linalg.matmul {__internal_linalg_transform__ = "workgroup_numprocs_ge_numiters"}
+        ins(%7, %13 : memref<?x?xf32, #map3>, memref<?x?xf32, #map3>)
+       outs(%18 : memref<?x?xf32, #map3>)
     }
     return
   }
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/linalg_tile_and_fuse.mlir b/iree/compiler/Conversion/LinalgToSPIRV/test/linalg_tile_and_fuse.mlir
index f6604bf..9a2602c 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/linalg_tile_and_fuse.mlir
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/linalg_tile_and_fuse.mlir
@@ -99,8 +99,8 @@
       {binding = @legacy_io::@arg1, operand_result_index = 1 : i32} : memref<?x?xf32>
     %2 = iree.placeholder for "interace buffer"
       {binding = @legacy_io::@ret0, operand_result_index = 2 : i32} : memref<?x?xf32>
-    linalg.matmul %0, %1, %2 :
-      (memref<?x?xf32>, memref<?x?xf32>, memref<?x?xf32>)
+    linalg.matmul ins(%0, %1 : memref<?x?xf32>, memref<?x?xf32>)
+                 outs(%2 : memref<?x?xf32>)
     return
   }
   func @matmul__num_workgroups__
@@ -135,7 +135,8 @@
 //       CHECK:   %[[VIEW2:.+]] = subview %[[RET0]][%[[LBY_2]], %[[LBX_2]]]
 //       CHECK:   linalg.matmul
 //  CHECK-SAME:     "workgroup_numprocs_ge_numiters"
-//  CHECK-SAME:     %[[VIEW0]], %[[VIEW1]], %[[VIEW2]]
+//  CHECK-SAME:     ins(%[[VIEW0]], %[[VIEW1]]
+//  CHECK-SAME:    outs(%[[VIEW2]]
 //       CHECK: func @[[NUM_WORKGROUPS_FN]]
 //   CHECK-DAG:   %[[C8:.+]] = constant 8 : index
 //   CHECK-DAG:   %[[C7:.+]] = constant 7 : index
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_subgroup.mlir b/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_subgroup.mlir
index 8421f81..31b3cea 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_subgroup.mlir
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_subgroup.mlir
@@ -15,8 +15,8 @@
       {binding = @legacy_io::@arg1, operand_result_num = 1 : i32} : memref<64x256xf16>
     %ret0 = iree.placeholder for "interface buffer"
       {binding = @legacy_io::@ret0, operand_result_num = 2 : i32} : memref<128x256xf16>
-    linalg.matmul %arg0, %arg1, %ret0 :
-      (memref<128x64xf16>, memref<64x256xf16>, memref<128x256xf16>)
+    linalg.matmul ins(%arg0, %arg1 : memref<128x64xf16>, memref<64x256xf16>)
+                 outs(%ret0 : memref<128x256xf16>)
     return
   }
   func @matmul_static_shape__num_workgroups__
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_vectorization.mlir b/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_vectorization.mlir
index c642f62..dd846ce 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_vectorization.mlir
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_vectorization.mlir
@@ -5,7 +5,7 @@
 // CHECK-LABEL: func @matmul_128x128x128
 // CHECK-SAME: (%[[ARG0:.+]]: memref<128x128xf32>, %[[ARG1:.+]]: memref<128x128xf32>, %[[ARG2:.+]]: memref<128x128xf32>)
 func @matmul_128x128x128(%arg0 : memref<128x128xf32>, %arg1: memref<128x128xf32>, %arg2: memref<128x128xf32>) {
-    linalg.matmul %arg0, %arg1, %arg2 : (memref<128x128xf32>, memref<128x128xf32>, memref<128x128xf32>)
+    linalg.matmul ins(%arg0, %arg1 : memref<128x128xf32>, memref<128x128xf32>) outs(%arg2 : memref<128x128xf32>)
     return
 }
 
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/pipeline_test.mlir b/iree/compiler/Conversion/LinalgToSPIRV/test/pipeline_test.mlir
index ddd2530..fd05ac6 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/pipeline_test.mlir
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/pipeline_test.mlir
@@ -15,8 +15,8 @@
       {binding = @legacy_io::@arg1, operand_result_num = 1} : memref<64x256xf16>
     %2 = iree.placeholder for "interface buffer"
       {binding = @legacy_io::@ret0, operand_result_num = 2} : memref<128x256xf16>
-    linalg.matmul %0, %1, %2 :
-      (memref<128x64xf16>, memref<64x256xf16>, memref<128x256xf16>)
+    linalg.matmul ins(%0, %1 : memref<128x64xf16>, memref<64x256xf16>)
+                 outs(%2 : memref<128x256xf16>)
     return
   }
   func @matmul_static_shape__num_workgroups__
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/workgroup_memory_promotion.mlir b/iree/compiler/Conversion/LinalgToSPIRV/test/workgroup_memory_promotion.mlir
index 5b1d440..993b491 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/workgroup_memory_promotion.mlir
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/workgroup_memory_promotion.mlir
@@ -12,7 +12,8 @@
       {binding = @legacy_io::@arg1, operand_result_index = 1 : i32} : memref<?x?xf32>
     %2 = iree.placeholder for "interace buffer"
       {binding = @legacy_io::@ret0, operand_result_index = 2 : i32} : memref<?x?xf32>
-    linalg.matmul %0, %1, %2 : (memref<?x?xf32>, memref<?x?xf32>, memref<?x?xf32>)
+    linalg.matmul ins(%0, %1 : memref<?x?xf32>, memref<?x?xf32>)
+                 outs(%2 : memref<?x?xf32>)
     return
   }
   func @matmul_tile__num_workgroups__
@@ -43,7 +44,8 @@
 //  CHECK-SAME:       "copy_to_workgroup_memory"
 //       CHECK:     linalg.matmul
 //  CHECK-SAME:       "workgroup_memory_numprocs_ge_numiters"
-//  CHECK-SAME:       %[[SUBVIEW1]], %[[SUBVIEW2]], %[[RET0SV]]
+//  CHECK-SAME:       ins(%[[SUBVIEW1]], %[[SUBVIEW2]]
+//  CHECK-SAME:      outs(%[[RET0SV]]
 //   CHECK-DAG:     dealloc %[[ALLOC1]] : memref<8x32xf32, 3>
 //   CHECK-DAG:     dealloc %[[ALLOC2]] : memref<32x16xf32, 3>