Integrate LLVM at llvm/llvm-project@8825fec37e73
Updates LLVM usage to match
[8825fec37e73](https://github.com/llvm/llvm-project/commit/8825fec37e73)
Adapt to upstream subview / subtensor changes.
PiperOrigin-RevId: 335023452
diff --git a/SUBMODULE_VERSIONS b/SUBMODULE_VERSIONS
index 4d9d076..71317b6 100644
--- a/SUBMODULE_VERSIONS
+++ b/SUBMODULE_VERSIONS
@@ -5,7 +5,7 @@
a5d9d0f7d368054fd1691aedf1db4116efcc233e third_party/flatbuffers
4fb0ff7069bd88ee85902f4d0bb62794e5f6d021 third_party/flatcc
f2fb48c3b3d79a75a88a99fba6576b25d42ec528 third_party/googletest
-bfd7ee92ccec2904d98b20b475f48addadc4ec5f third_party/llvm-project
+8825fec37e73eea1bc3e4f5c125e1fd02d002d6c third_party/llvm-project
17b12a4481daa150e2d1ea3ada086b551b856707 third_party/marl
08c41f61f4f5f17728c9e0f48640eaa32588b63b third_party/mlir-emitc
d8c7ee00a687ac369e62e2032514a93a9b413502 third_party/pybind11
diff --git a/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp b/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp
index c3aa9ad..07d720a 100644
--- a/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp
+++ b/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp
@@ -112,7 +112,7 @@
template <typename IdOp, typename NProcsOp>
static SmallVector<linalg::ProcInfo, 2> getGpuProcIds(
- OpBuilder &b, Location loc, ArrayRef<SubViewOp::Range> parallelLoopRanges) {
+ OpBuilder &b, Location loc, ArrayRef<Range> parallelLoopRanges) {
if (parallelLoopRanges.size() != 2)
llvm_unreachable("expected two parallel loops for matmul operation");
Type indexType = b.getIndexType();
@@ -128,7 +128,7 @@
constexpr int numSubgroupY = 2;
static SmallVector<linalg::ProcInfo, 2> getSubgroupIds(
- OpBuilder &b, Location loc, ArrayRef<SubViewOp::Range> parallelLoopRanges) {
+ OpBuilder &b, Location loc, ArrayRef<Range> parallelLoopRanges) {
if (parallelLoopRanges.size() != 2)
llvm_unreachable("expected two parallel loops for matmul operation");
Type indexType = b.getIndexType();
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp b/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp
index 135961a..219104e 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp
@@ -112,8 +112,7 @@
/// Distribution options for linalg.matmul when targeting workgroups.
static linalg::LinalgLoopDistributionOptions matmulDistributionOptions = {
- [](OpBuilder &builder, Location loc,
- ArrayRef<SubViewOp::Range> parallelLoopRanges) {
+ [](OpBuilder &builder, Location loc, ArrayRef<Range> parallelLoopRanges) {
return getGPUProcessorIdsAndCounts<gpu::BlockIdOp, gpu::GridDimOp>(
builder, loc, parallelLoopRanges.size());
},
@@ -176,8 +175,7 @@
/// Distribution options for targeting workgroups for convolution/pooling
/// operations.
static linalg::LinalgLoopDistributionOptions convPoolDistributionOptions = {
- [](OpBuilder &builder, Location loc,
- ArrayRef<SubViewOp::Range> parallelLoopRanges) {
+ [](OpBuilder &builder, Location loc, ArrayRef<Range> parallelLoopRanges) {
return getGPUProcessorIdsAndCounts<gpu::BlockIdOp, gpu::GridDimOp>(
builder, loc, parallelLoopRanges.size());
},
@@ -353,7 +351,7 @@
auto getSubgroupProcInfoFn =
[&launchConfig](OpBuilder &builder, Location loc,
- ArrayRef<SubViewOp::Range> parallelLoopRanges) {
+ ArrayRef<Range> parallelLoopRanges) {
ArrayRef<int64_t> numSubgroups =
launchConfig.getNumSubgroups().take_front(
parallelLoopRanges.size());