Integrate llvm-project and bump dependencies. (#10140)
* llvm-project: 619fd8c2ab505d8f79cbbbe3fd09b02f6640e1b1
* mlir-hlo: cb55a7168c1841d05287677746a39a5de7cb855f
* tensorflow: fc4021a8dd654606cd95e61a033691157853e122
Additional changes:
* rename member functions for tenor ops
* Remove reluN tosa tests
* carry patches for llvm and mhlo
diff --git a/compiler/src/iree/compiler/Codegen/Common/TileDispatchUsingInterface.cpp b/compiler/src/iree/compiler/Codegen/Common/TileDispatchUsingInterface.cpp
index fc28200..b89da7a 100644
--- a/compiler/src/iree/compiler/Codegen/Common/TileDispatchUsingInterface.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/TileDispatchUsingInterface.cpp
@@ -445,7 +445,7 @@
LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp,
PatternRewriter &rewriter) const override {
- OpResult producer = sliceOp.source().dyn_cast<OpResult>();
+ OpResult producer = sliceOp.getSource().dyn_cast<OpResult>();
if (!producer) {
return rewriter.notifyMatchFailure(sliceOp, "source uses bb arg");
}
@@ -473,7 +473,7 @@
LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp,
PatternRewriter &rewriter) const override {
auto loadOp =
- sliceOp.source().getDefiningOp<IREE::Flow::DispatchTensorLoadOp>();
+ sliceOp.getSource().getDefiningOp<IREE::Flow::DispatchTensorLoadOp>();
if (!loadOp) return failure();
SmallVector<OpFoldResult> combinedOffsets, combinedSizes, combinedStrides;
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp b/compiler/src/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp
index ccd7633..ec0aabd 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp
@@ -461,7 +461,7 @@
RewritePatternSet patterns(&getContext());
MLIRContext *context = &getContext();
- auto typeConverter = mhlo::createHloToLinalgSignedIntegerConverter();
+ auto typeConverter = mhlo::createHloToLinalgTypeConverter();
typeConverter->addArgumentMaterialization(scalarToTensor);
// NOTE: not using corresponding setupMHLOToFlowPatterns because the entire
// MHLO dialects are marked illegal by this pass.
diff --git a/integrations/tensorflow/WORKSPACE b/integrations/tensorflow/WORKSPACE
index ab13b28..560384c 100644
--- a/integrations/tensorflow/WORKSPACE
+++ b/integrations/tensorflow/WORKSPACE
@@ -7,7 +7,7 @@
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
-TENSORFLOW_COMMIT = "1986967601040a368e44b79339567d49f72a368f"
+TENSORFLOW_COMMIT = "fc4021a8dd654606cd95e61a033691157853e122"
git_repository(
name = "org_tensorflow",
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
index 5f7d24a..2f670cb 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
@@ -1486,7 +1486,7 @@
for (OpOperand *opOperand : op.getInputOperands()) {
auto tensorCastOp = opOperand->get().getDefiningOp<tensor::CastOp>();
newOperands.push_back(canFoldIntoConsumerOp(tensorCastOp)
- ? tensorCastOp.source()
+ ? tensorCastOp.getSource()
: opOperand->get());
}
// Init tensors may fold, in which case the resultType must also change.
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Fusion.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Fusion.cpp
index c4860bc..9ab0798 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Fusion.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Fusion.cpp
@@ -78,7 +78,7 @@
.getDefiningOp<tensor::ExtractSliceOp>();
if (!sliceOp)
return failure();
- auto producerOp = sliceOp.source().getDefiningOp<TilingInterface>();
+ auto producerOp = sliceOp.getSource().getDefiningOp<TilingInterface>();
if (!producerOp || producerOp->getNumResults() != 1)
return failure();
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Tiling.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Tiling.cpp
index ae42b30..0c71b15 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Tiling.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Tiling.cpp
@@ -192,7 +192,7 @@
/// tensor.extract_slice` to `tensor.extract_slice -> `TilingInterface`.
FailureOr<Operation *> SwapTilingInterfaceOp::returningMatchAndRewrite(
tensor::ExtractSliceOp sliceOp, PatternRewriter &rewriter) const {
- auto sourceOp = sliceOp.source().getDefiningOp<TilingInterface>();
+ auto sourceOp = sliceOp.getSource().getDefiningOp<TilingInterface>();
if (!sourceOp)
return failure();
SmallVector<Operation *> tiledOps = sourceOp.getTiledImplementation(
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Utils.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Utils.cpp
index d76d4fa..4248ed2 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Utils.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/Utils.cpp
@@ -81,7 +81,7 @@
OpBuilder &b, Location loc, tensor::ExtractSliceOp subsetExtractOp,
Value source, Value dest) {
return b.create<tensor::InsertSliceOp>(
- loc, subsetExtractOp.source().getType(), source, dest,
+ loc, subsetExtractOp.getSource().getType(), source, dest,
subsetExtractOp.offsets(), subsetExtractOp.sizes(),
subsetExtractOp.strides(), subsetExtractOp.static_offsets(),
subsetExtractOp.static_sizes(), subsetExtractOp.static_strides());
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/StructuredTransformOpsExt.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/StructuredTransformOpsExt.cpp
index 50bce4a..ffc93c9 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/StructuredTransformOpsExt.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/StructuredTransformOpsExt.cpp
@@ -335,7 +335,7 @@
// mechanism of tracking op replacement at creation, or even different
// patterns that identify the "main" result of a transformation.
while (auto castOp = value.getDefiningOp<tensor::CastOp>())
- value = castOp.source();
+ value = castOp.getSource();
if (auto currentSourceOp = value.getDefiningOp<linalg::LinalgOp>()) {
if (!sourceOp || sourceOp == currentSourceOp) {
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
index 25344d7..8be1db9 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
@@ -1487,7 +1487,7 @@
for (OpOperand *opOperand : op.getInputOperands()) {
auto tensorCastOp = opOperand->get().getDefiningOp<tensor::CastOp>();
newOperands.push_back(canFoldIntoConsumerOp(tensorCastOp)
- ? tensorCastOp.source()
+ ? tensorCastOp.getSource()
: opOperand->get());
}
// Init tensors may fold, in which case the resultType must also change.
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Passes/Tiling.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Passes/Tiling.cpp
index f69bb7c..f0b23e1 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Passes/Tiling.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Passes/Tiling.cpp
@@ -362,8 +362,7 @@
buildFlowWorkgroupInfoOp<IREE::Input::DispatchWorkgroupIDOp>(
builder, dim),
buildFlowWorkgroupInfoOp<IREE::Input::DispatchWorkgroupCountOp>(
- builder, dim),
- linalg::DistributionMethod::Cyclic};
+ builder, dim)};
}
return procInfo;
}};
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Fusion.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Fusion.cpp
index 7e05463..52efaa6 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Fusion.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Fusion.cpp
@@ -33,7 +33,7 @@
.getDefiningOp<tensor::ExtractSliceOp>();
if (!sliceOp)
return failure();
- auto producerOp = sliceOp.source().getDefiningOp<TilingInterface>();
+ auto producerOp = sliceOp.getSource().getDefiningOp<TilingInterface>();
if (!producerOp || producerOp->getNumResults() != 1)
return failure();
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Tiling.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Tiling.cpp
index ae42b30..0c71b15 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Tiling.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Tiling.cpp
@@ -192,7 +192,7 @@
/// tensor.extract_slice` to `tensor.extract_slice -> `TilingInterface`.
FailureOr<Operation *> SwapTilingInterfaceOp::returningMatchAndRewrite(
tensor::ExtractSliceOp sliceOp, PatternRewriter &rewriter) const {
- auto sourceOp = sliceOp.source().getDefiningOp<TilingInterface>();
+ auto sourceOp = sliceOp.getSource().getDefiningOp<TilingInterface>();
if (!sourceOp)
return failure();
SmallVector<Operation *> tiledOps = sourceOp.getTiledImplementation(
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Utils.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Utils.cpp
index d76d4fa..4248ed2 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Utils.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/Utils.cpp
@@ -81,7 +81,7 @@
OpBuilder &b, Location loc, tensor::ExtractSliceOp subsetExtractOp,
Value source, Value dest) {
return b.create<tensor::InsertSliceOp>(
- loc, subsetExtractOp.source().getType(), source, dest,
+ loc, subsetExtractOp.getSource().getType(), source, dest,
subsetExtractOp.offsets(), subsetExtractOp.sizes(),
subsetExtractOp.strides(), subsetExtractOp.static_offsets(),
subsetExtractOp.static_sizes(), subsetExtractOp.static_strides());
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/StructuredTransformOpsExt.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/StructuredTransformOpsExt.cpp
index 50bce4a..ffc93c9 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/StructuredTransformOpsExt.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/StructuredTransformOpsExt.cpp
@@ -335,7 +335,7 @@
// mechanism of tracking op replacement at creation, or even different
// patterns that identify the "main" result of a transformation.
while (auto castOp = value.getDefiningOp<tensor::CastOp>())
- value = castOp.source();
+ value = castOp.getSource();
if (auto currentSourceOp = value.getDefiningOp<linalg::LinalgOp>()) {
if (!sourceOp || sourceOp == currentSourceOp) {
diff --git a/tests/e2e/tosa_ops/BUILD b/tests/e2e/tosa_ops/BUILD
index 66791bb..2ee394f 100644
--- a/tests/e2e/tosa_ops/BUILD
+++ b/tests/e2e/tosa_ops/BUILD
@@ -50,7 +50,6 @@
"pad.mlir",
"reciprocal.mlir",
"reduce.mlir",
- "reluN.mlir",
"reshape.mlir",
"rsqrt.mlir",
"select.mlir",
@@ -103,7 +102,6 @@
"negate.mlir",
"pad.mlir",
"reciprocal.mlir",
- "reluN.mlir",
"reshape.mlir",
"rsqrt.mlir",
"select.mlir",
@@ -162,7 +160,6 @@
"pad.mlir",
"reciprocal.mlir",
"reduce.mlir",
- "reluN.mlir",
"reshape.mlir",
"rsqrt.mlir",
"select.mlir",
@@ -220,7 +217,6 @@
"pad.mlir",
"reciprocal.mlir",
"reduce.mlir",
- "reluN.mlir",
"reshape.mlir",
"rsqrt.mlir",
"select.mlir",
diff --git a/tests/e2e/tosa_ops/CMakeLists.txt b/tests/e2e/tosa_ops/CMakeLists.txt
index b4d9bcb..260da42 100644
--- a/tests/e2e/tosa_ops/CMakeLists.txt
+++ b/tests/e2e/tosa_ops/CMakeLists.txt
@@ -44,7 +44,6 @@
"pad.mlir"
"reciprocal.mlir"
"reduce.mlir"
- "reluN.mlir"
"reshape.mlir"
"rsqrt.mlir"
"select.mlir"
@@ -95,7 +94,6 @@
"negate.mlir"
"pad.mlir"
"reciprocal.mlir"
- "reluN.mlir"
"reshape.mlir"
"rsqrt.mlir"
"select.mlir"
@@ -147,7 +145,6 @@
"pad.mlir"
"reciprocal.mlir"
"reduce.mlir"
- "reluN.mlir"
"reshape.mlir"
"rsqrt.mlir"
"select.mlir"
@@ -200,7 +197,6 @@
"pad.mlir"
"reciprocal.mlir"
"reduce.mlir"
- "reluN.mlir"
"reshape.mlir"
"rsqrt.mlir"
"select.mlir"
diff --git a/tests/e2e/tosa_ops/reluN.mlir b/tests/e2e/tosa_ops/reluN.mlir
deleted file mode 100644
index d6a89d9..0000000
--- a/tests/e2e/tosa_ops/reluN.mlir
+++ /dev/null
@@ -1,13 +0,0 @@
-func.func @tensor_float() {
- %0 = util.unfoldable_constant dense<[1.0, -1.0, 3.0, 5.0]> : tensor<4xf32>
- %result = "tosa.reluN"(%0) {max_fp = 4.0 : f32, max_int = 4 : i64} : (tensor<4xf32>) -> tensor<4xf32>
- check.expect_almost_eq_const(%result, dense<[1.0, 0.0, 3.0, 4.0]> : tensor<4xf32>) : tensor<4xf32>
- return
-}
-
-func.func @tensor_int() {
- %0 = util.unfoldable_constant dense<[1, -1, 3, 5]> : tensor<4xi32>
- %result = "tosa.reluN"(%0) {max_fp = 4.0 : f32, max_int = 4 : i64} : (tensor<4xi32>) -> tensor<4xi32>
- check.expect_eq_const(%result, dense<[1, 0, 3, 4]> : tensor<4xi32>) : tensor<4xi32>
- return
-}
diff --git a/third_party/llvm-project b/third_party/llvm-project
index 8741b1a..fddf9c4 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit 8741b1a9c42b72e7758c272c94cde0e6f72c7eb3
+Subproject commit fddf9c42793208ebc6b25b4a079d84aa1d5601e7
diff --git a/third_party/mlir-hlo b/third_party/mlir-hlo
index c7e9ad2..dadfe35 160000
--- a/third_party/mlir-hlo
+++ b/third_party/mlir-hlo
@@ -1 +1 @@
-Subproject commit c7e9ad27d419231db90e4ef9bbbe989ccf5c5538
+Subproject commit dadfe3574b68225ae21710393248282a0a4c8c34
diff --git a/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp b/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp
index c852ab2..b224491 100644
--- a/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp
+++ b/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp
@@ -478,7 +478,7 @@
for (OpOperand *opOperand : op.getInputOperands()) {
auto tensorCastOp = opOperand->get().getDefiningOp<tensor::CastOp>();
newOperands.push_back(canFoldIntoConsumerOp(tensorCastOp)
- ? tensorCastOp.source()
+ ? tensorCastOp.getSource()
: opOperand->get());
}
// Init tensors may fold, in which case the resultType must also change.