Merge google -> main (#7429)
* 5747b0e Synchronize submodules with LLVM at llvm/llvm-project@94213bc
* 043f2b5 Integrate LLVM at llvm/llvm-project@94213bc
* 4b4f5a0 Merge pull request Merge main -> google #7426 from google:main-to-google
* 4ef5eb9 Merge pull request Merge main -> google #7416 from google:main-to-google
* c4dbdf2 Integrate LLVM at llvm/llvm-project@7bbd7e9
* 35c0f05 Integrate LLVM at llvm/llvm-project@76db6d8
* 3f9a6d2 Integrate LLVM at llvm/llvm-project@6b6564f
* 7b8788b Synchronize submodules with LLVM at llvm/llvm-project@57e0081
* 9f3fbbb Integrate LLVM at llvm/llvm-project@57e0081
diff --git a/SUBMODULE_VERSIONS.txt b/SUBMODULE_VERSIONS.txt
index abe87ce..71c0afd 100644
--- a/SUBMODULE_VERSIONS.txt
+++ b/SUBMODULE_VERSIONS.txt
@@ -4,14 +4,14 @@
aa533abfd4232b01f9e57041d70114d5a77e6de0 third_party/googletest
88b845dee001723c4a0db1fe5477de735b6d3bb0 third_party/liburing
acd6f6f014c25e46363e718381e0b35205df2d83 third_party/libyaml
-b9cfa016daae725e4ed8173b431afb5e01cb80a6 third_party/llvm-project
-b13ef5a9a950d5ab0bf07b7cb8f6a84d17c63387 third_party/mlir-hlo
+94213bc7decb1d4c131fb6feb5101e859640478f third_party/llvm-project
+a387844caf52d7d5b985b653ce8f49c00f4d3a8e third_party/mlir-hlo
3f701faace7addc75d16dea8a6cd769fa5b3f260 third_party/musl
4c7697dbe973ed01ae6fbec37d186ebd05982e1f third_party/pybind11
2e1b5fb39ebc2ef4cb77005f8267e4f3a6241ba1 third_party/spirv_cross
f5417a4b6633c3217c9a1bc2f0c70b1454975ba7 third_party/spirv_headers
b42009b3b9d4ca35bc703f5310eedc74f584be58 third_party/stblib
-c0f1c92b7fdecca04c35d6b13604d33ea7d548ad third_party/tensorflow
+ff7b66c3103ccdee55badaa825d282355b0c860e third_party/tensorflow
058e89011fceca912d43638ebb6b85992147fcfe third_party/tracy
9d10a96f2d57c3c37e167f2e73c9a31ac2e51fa5 third_party/vulkan_headers
8d4a9e9174a9c6ad6a3a3ae981b915ef13fc12c4 third_party/vulkan_memory_allocator
diff --git a/iree/compiler/Codegen/Common/FlattenMemRefSubspanPass.cpp b/iree/compiler/Codegen/Common/FlattenMemRefSubspanPass.cpp
index d5408ee..ff2375a 100644
--- a/iree/compiler/Codegen/Common/FlattenMemRefSubspanPass.cpp
+++ b/iree/compiler/Codegen/Common/FlattenMemRefSubspanPass.cpp
@@ -90,7 +90,7 @@
// byte buffer with potentially unknown total size, as transformation
// passes can concatenate buffers, etc.
return MemRefType::get(ShapedType::kDynamicSize, type.getElementType(),
- ArrayRef<AffineMap>(), type.getMemorySpace());
+ AffineMap(), type.getMemorySpace());
});
}
};
@@ -135,7 +135,7 @@
AllocOpTy allocOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
auto oldType = allocOp.getType().template dyn_cast<MemRefType>();
- if (!oldType || !oldType.getAffineMaps().empty()) return failure();
+ if (!oldType || !oldType.getLayout().isIdentity()) return failure();
Value dynamicDim = createTotalElementCountValue(
oldType, allocOp.getDynamicSizes(), allocOp.getLoc(), rewriter);
@@ -166,13 +166,13 @@
memref::GlobalOp globalOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
auto oldType = globalOp.type().dyn_cast<MemRefType>();
- if (!oldType || !oldType.getAffineMaps().empty()) return failure();
+ if (!oldType || !oldType.getLayout().isIdentity()) return failure();
auto tensorType = RankedTensorType::get({oldType.getNumElements()},
oldType.getElementType());
auto memRefType =
MemRefType::get({oldType.getNumElements()}, oldType.getElementType(),
- {}, oldType.getMemorySpace());
+ AffineMap(), oldType.getMemorySpace());
auto newInitialValue =
flattenAttribute(globalOp.initial_valueAttr(), tensorType);
rewriter.replaceOpWithNewOp<memref::GlobalOp>(
@@ -192,7 +192,7 @@
memref::GetGlobalOp getOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
auto oldType = getOp.getType().dyn_cast<MemRefType>();
- if (!oldType || !oldType.getAffineMaps().empty()) return failure();
+ if (!oldType || !oldType.getLayout().isIdentity()) return failure();
auto globalOp = dyn_cast_or_null<memref::GlobalOp>(
SymbolTable::lookupNearestSymbolFrom(getOp, getOp.nameAttr()));
@@ -218,7 +218,7 @@
auto oldType = subspanOp.getType().dyn_cast<MemRefType>();
// IREE subspan ops only use memref types with the default identity
// layout maps.
- if (!oldType || !oldType.getAffineMaps().empty()) return failure();
+ if (!oldType || !oldType.getLayout().isIdentity()) return failure();
Value dynamicDim = createTotalElementCountValue(
oldType, subspanOp.dynamic_dims(), subspanOp.getLoc(), rewriter);
diff --git a/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp b/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp
index abc8454..aba1332 100644
--- a/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp
+++ b/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp
@@ -767,8 +767,8 @@
template <typename TensorType>
static MemRefType getMemrefTypeForTensor(TensorType tensorType,
- ArrayRef<AffineMap> layout = {},
- unsigned memorySpace = 0) {
+ Attribute layout = Attribute(),
+ Attribute memorySpace = nullptr) {
return MemRefType::get(tensorType.getShape(), tensorType.getElementType(),
layout, memorySpace);
}
@@ -907,7 +907,7 @@
Value resultBuffer) {
auto memrefType = getMemrefTypeForTensor(
reshapeOp.getSrcType(), {},
- resultBuffer.getType().cast<MemRefType>().getMemorySpaceAsInt());
+ resultBuffer.getType().cast<MemRefType>().getMemorySpace());
using ReverseReshapeOpTy = typename std::conditional<
std::is_same<TensorReshapeOpTy, linalg::TensorCollapseShapeOp>::value,
memref::ExpandShapeOp, memref::CollapseShapeOp>::type;
@@ -921,8 +921,8 @@
Value resultBuffer) {
auto memrefType = getMemrefTypeForTensor(
castOp.source().getType().cast<RankedTensorType>(),
- resultBuffer.getType().cast<MemRefType>().getAffineMaps(),
- resultBuffer.getType().cast<MemRefType>().getMemorySpaceAsInt());
+ resultBuffer.getType().cast<MemRefType>().getLayout(),
+ resultBuffer.getType().cast<MemRefType>().getMemorySpace());
return b.create<memref::CastOp>(castOp.getLoc(), memrefType, resultBuffer);
}
@@ -1027,7 +1027,7 @@
Value resultTensor = castOp.dest();
auto outputType = getMemrefTypeForTensor(
resultTensor.getType().cast<RankedTensorType>(), {},
- inputBuffer.getType().cast<MemRefType>().getMemorySpaceAsInt());
+ inputBuffer.getType().cast<MemRefType>().getMemorySpace());
return b.create<memref::CastOp>(castOp.getLoc(), outputType, inputBuffer);
}
@@ -1058,7 +1058,7 @@
// Create the reshape op.
MemRefType inputBufferType = inputBuffer.getType().cast<MemRefType>();
auto reshapeResultType = getMemrefTypeForTensor(
- resultTensorType, {}, inputBufferType.getMemorySpaceAsInt());
+ resultTensorType, {}, inputBufferType.getMemorySpace());
using ReshapeOpTy = typename std::conditional<
std::is_same<TensorReshapeOpTy, linalg::TensorCollapseShapeOp>::value,
memref::CollapseShapeOp, memref::ExpandShapeOp>::type;
diff --git a/iree/compiler/Codegen/LLVMCPU/LLVMCPUVectorization.cpp b/iree/compiler/Codegen/LLVMCPU/LLVMCPUVectorization.cpp
index e0df89c..2fc0185 100644
--- a/iree/compiler/Codegen/LLVMCPU/LLVMCPUVectorization.cpp
+++ b/iree/compiler/Codegen/LLVMCPU/LLVMCPUVectorization.cpp
@@ -130,7 +130,7 @@
}));
if (llvm::any_of(shape, [](int64_t v) { return v == -1; })) return {};
MemRefType allocType =
- MemRefType::get(shape, subview.getType().getElementType(), {});
+ MemRefType::get(shape, subview.getType().getElementType(), AffineMap());
Value buffer = b.create<memref::AllocaOp>(subview.getLoc(), allocType);
return buffer;
}
diff --git a/iree/compiler/Codegen/SPIRV/SPIRVVectorToCooperativeOps.cpp b/iree/compiler/Codegen/SPIRV/SPIRVVectorToCooperativeOps.cpp
index 81a9ca0..470170e 100644
--- a/iree/compiler/Codegen/SPIRV/SPIRVVectorToCooperativeOps.cpp
+++ b/iree/compiler/Codegen/SPIRV/SPIRVVectorToCooperativeOps.cpp
@@ -232,10 +232,13 @@
typeConverter.addConversion(
[&typeConverter](MemRefType type) -> Optional<Type> {
if (!type.hasStaticShape()) return llvm::None;
- auto flattenType =
+ // In IREE all MemRefs are originated from subspan ops, which should
+ // have identity layout.
+ if (!type.getLayout().isIdentity()) return llvm::None;
+ auto flattenedType =
MemRefType::get(ShapedType::kDynamicSize, type.getElementType(),
- type.getAffineMaps(), type.getMemorySpace());
- return typeConverter.convertType(flattenType);
+ AffineMap(), type.getMemorySpace());
+ return typeConverter.convertType(flattenedType);
});
// Add unrealized conversion cast ops to bridge type conversions: we are
diff --git a/iree/compiler/Codegen/SPIRV/test/BUILD b/iree/compiler/Codegen/SPIRV/test/BUILD
index a2b5454..f3c95cb 100644
--- a/iree/compiler/Codegen/SPIRV/test/BUILD
+++ b/iree/compiler/Codegen/SPIRV/test/BUILD
@@ -47,7 +47,10 @@
"vectorize_load_store.mlir",
],
include = ["*.mlir"],
- exclude = ["promote_workgroup_memory.mlir"],
+ # TODO(b/203528778) reenable
+ exclude = [
+ "promote_workgroup_memory.mlir",
+ ],
),
data = [
"//iree/tools:IreeFileCheck",
diff --git a/iree/tools/test/BUILD b/iree/tools/test/BUILD
index 7321e5f..41489e0 100644
--- a/iree/tools/test/BUILD
+++ b/iree/tools/test/BUILD
@@ -36,7 +36,9 @@
"//iree/tools:iree-run-module",
"//iree/tools:iree-translate",
],
- tags = ["hostonly"],
+ tags = [
+ "hostonly",
+ ],
)
iree_lit_test_suite(
@@ -47,5 +49,7 @@
"//iree/tools:iree-benchmark-module",
"//iree/tools:iree-translate",
],
- tags = ["hostonly"],
+ tags = [
+ "hostonly",
+ ],
)
diff --git a/third_party/llvm-project b/third_party/llvm-project
index b9cfa01..94213bc 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit b9cfa016daae725e4ed8173b431afb5e01cb80a6
+Subproject commit 94213bc7decb1d4c131fb6feb5101e859640478f
diff --git a/third_party/mlir-hlo b/third_party/mlir-hlo
index b13ef5a..a387844 160000
--- a/third_party/mlir-hlo
+++ b/third_party/mlir-hlo
@@ -1 +1 @@
-Subproject commit b13ef5a9a950d5ab0bf07b7cb8f6a84d17c63387
+Subproject commit a387844caf52d7d5b985b653ce8f49c00f4d3a8e
diff --git a/third_party/tensorflow b/third_party/tensorflow
index c0f1c92..ff7b66c 160000
--- a/third_party/tensorflow
+++ b/third_party/tensorflow
@@ -1 +1 @@
-Subproject commit c0f1c92b7fdecca04c35d6b13604d33ea7d548ad
+Subproject commit ff7b66c3103ccdee55badaa825d282355b0c860e