Bump LLVM to llvm/llvm-project@6b7afaa9db8f (#18197)

Signed-off-by: Stanley Winata <stanley@nod-labs.com>
diff --git a/compiler/src/iree/compiler/Codegen/Common/GPU/GPUDistributionPatterns.cpp b/compiler/src/iree/compiler/Codegen/Common/GPU/GPUDistributionPatterns.cpp
index 17d6535..ad0dbc2 100644
--- a/compiler/src/iree/compiler/Codegen/Common/GPU/GPUDistributionPatterns.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/GPU/GPUDistributionPatterns.cpp
@@ -423,8 +423,7 @@
   LogicalResult matchAndRewrite(vector::MultiDimReductionOp reductionOp,
                                 DistributionSignature &signature,
                                 PatternRewriter &rewriter) const override {
-    auto reductionDims = llvm::to_vector<4>(
-        reductionOp.getReductionDims().getAsRange<IntegerAttr>());
+    ArrayRef<int64_t> reductionDims = reductionOp.getReductionDims();
     // TODO: Add support for reductions along multiple dimensions.
     if (reductionDims.size() > 1)
       return failure();
@@ -461,7 +460,7 @@
     Value storeVec = rewriter.create<arith::ConstantOp>(
         loc, storeVectorType, rewriter.getZeroAttr(storeVectorType));
 
-    int reductionDim = reductionDims[0].getInt();
+    int reductionDim = reductionDims[0];
     int parallelDim = reductionDim ^ 1;
     if (!sourceLayout.getLane(reductionDim))
       return failure();
diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/Utils/LLVMGPULayoutAnalysisAndDistribution.cpp b/compiler/src/iree/compiler/Codegen/LLVMGPU/Utils/LLVMGPULayoutAnalysisAndDistribution.cpp
index 9767b71..7123e4d 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMGPU/Utils/LLVMGPULayoutAnalysisAndDistribution.cpp
+++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/Utils/LLVMGPULayoutAnalysisAndDistribution.cpp
@@ -297,8 +297,7 @@
   if (!layoutMap.count(reductionSrc))
     return;
   // Get the reduction dims
-  auto reductionDims =
-      llvm::to_vector(reductionOp.getReductionDims().getAsRange<IntegerAttr>());
+  ArrayRef<int64_t> reductionDims = reductionOp.getReductionDims();
   // Get the transpose permutation
   ArrayRef<int64_t> perm = transposeOp.getPermutation();
   // Don't support dim-1 broadcasted dims
@@ -325,8 +324,7 @@
     return;
   // Check that transpose(reductionDim) == broadcastDim
   // and that the shapes match
-  for (IntegerAttr dimAttr : reductionDims) {
-    int64_t dim = dimAttr.getInt();
+  for (int64_t dim : reductionDims) {
     int64_t transposedDim = perm[dim];
     if (!broadcastedDims.contains(transposedDim))
       return;
@@ -816,13 +814,12 @@
     return;
   Location loc = reductionOp.getLoc();
   Layout layout = layoutMap.at(source);
-  auto reductionDims =
-      llvm::to_vector(reductionOp.getReductionDims().getAsRange<IntegerAttr>());
+  ArrayRef<int64_t> reductionDims = reductionOp.getReductionDims();
   vector::CombiningKind combiningKind = reductionOp.getKind();
   // Only support reduction on one dimension
   if (reductionDims.size() > 1)
     return;
-  int reductionDim = reductionDims[0].getInt();
+  int reductionDim = reductionDims[0];
   std::array<int, 4> reductionOrder = layout.order[reductionDim];
   std::array<int, 4> parallelOrder = layout.order[!reductionDim];
   Value acc = reductionOp.getAcc();
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVInitialVectorLowering.cpp b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVInitialVectorLowering.cpp
index c39d4f8..500edac 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVInitialVectorLowering.cpp
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVInitialVectorLowering.cpp
@@ -202,9 +202,9 @@
   // Unroll all reduction dimensions by size 1 for vector.multi_reduction.
   VectorType srcVectorType = op.getSourceVectorType();
   auto nativeSize = llvm::to_vector(srcVectorType.getShape());
-  auto dims = op.getReductionDims().getAsValueRange<IntegerAttr>();
-  for (const auto &dimAttr : dims) {
-    nativeSize[dimAttr.getZExtValue()] = 1;
+  ArrayRef<int64_t> dims = op.getReductionDims();
+  for (const int64_t dim : dims) {
+    nativeSize[dim] = 1;
   }
   return nativeSize;
 }
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/channel_creation.mlir b/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/channel_creation.mlir
index 4894e06..a0f52b2 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/channel_creation.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/channel_creation.mlir
@@ -6,7 +6,7 @@
   // CHECK-NOT: util.global private @_mesh_mesh_1d_axes_0
   mesh.mesh @mesh_1d(shape = 2)
   util.func public @f(%arg0: tensor<1xi8>) -> tensor<1xi8> {
-    %0 = mesh.all_reduce %arg0 on @mesh_1d mesh_axes = [0] reduction = <sum> : tensor<1xi8> -> tensor<1xi8>
+    %0 = mesh.all_reduce %arg0 on @mesh_1d mesh_axes = [0] reduction = sum : tensor<1xi8> -> tensor<1xi8>
     util.return %0 : tensor<1xi8>
   }
 }
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/collectives.mlir b/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/collectives.mlir
index 53471b2..f7aa51f 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/collectives.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/collectives.mlir
@@ -50,7 +50,7 @@
   // CHECK-DAG: %[[INITIAL_VAL:.+]] = tensor.empty() : tensor<1xi8>
   //      CHECK: %[[RES:.+]] = flow.collective.all_reduce minimum, ui8, %[[INITIAL_VAL]], %[[ARG]], %[[CHANNEL]]
   // CHECK-SAME: (tensor<1xi8>, tensor<1xi8>, !flow.channel) -> %[[INITIAL_VAL]] as tensor<1xi8>
-  %0 = mesh.all_reduce %arg on @mesh_2d mesh_axes = [1, 0] reduction = <min>
+  %0 = mesh.all_reduce %arg on @mesh_2d mesh_axes = [1, 0] reduction = min
       : tensor<1xi8> -> tensor<1xi8>
   //      CHECK: util.return %[[RES]] : tensor<1xi8>
   util.return %0 : tensor<1xi8>
diff --git a/third_party/llvm-project b/third_party/llvm-project
index 4369eee..6b7afaa 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit 4369eee315d571f4f67f19d3fd05e42d921f26c9
+Subproject commit 6b7afaa9db8f904ebf0262774e38e54b36598782