Integrate LLVM at llvm/llvm-project@0fc1aa22ee6a
Updates LLVM usage to match
[0fc1aa22ee6a](https://github.com/llvm/llvm-project/commit/0fc1aa22ee6a)
PiperOrigin-RevId: 339239851
diff --git a/SUBMODULE_VERSIONS b/SUBMODULE_VERSIONS
index d146ee4..9ca3f02 100644
--- a/SUBMODULE_VERSIONS
+++ b/SUBMODULE_VERSIONS
@@ -5,7 +5,7 @@
a5d9d0f7d368054fd1691aedf1db4116efcc233e third_party/flatbuffers
4fb0ff7069bd88ee85902f4d0bb62794e5f6d021 third_party/flatcc
f2fb48c3b3d79a75a88a99fba6576b25d42ec528 third_party/googletest
-26750a1264b3df114a1efae7cde6f0784206b2ce third_party/llvm-project
+0fc1aa22ee6ac337a5d51fa5666c9cd61da61b07 third_party/llvm-project
17b12a4481daa150e2d1ea3ada086b551b856707 third_party/marl
d2cdb70e038370b5e28f353fe98ccd70af1cbc25 third_party/mlir-emitc
d8c7ee00a687ac369e62e2032514a93a9b413502 third_party/pybind11
diff --git a/experimental/ModelBuilder/ModelRunner.cpp b/experimental/ModelBuilder/ModelRunner.cpp
index f0845fc..d7a7e49 100644
--- a/experimental/ModelBuilder/ModelRunner.cpp
+++ b/experimental/ModelBuilder/ModelRunner.cpp
@@ -32,6 +32,7 @@
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Target/LLVMIR.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "mlir/Transforms/Passes.h"
static llvm::cl::opt<bool> mlirDebug(
@@ -63,7 +64,7 @@
mlir::vector::populateVectorContractLoweringPatterns(
patterns, module->getContext(),
compilationOptions.vectorTransformsOptions);
- mlir::applyPatternsAndFoldGreedily(*module, patterns);
+ mlir::applyPatternsAndFoldGreedily(*module, std::move(patterns));
}
runLoweringPass(compilationOptions.loweringPasses
? compilationOptions.loweringPasses
diff --git a/integrations/tensorflow/compiler/LegalizeTF.cpp b/integrations/tensorflow/compiler/LegalizeTF.cpp
index 9531521..e93c85c 100644
--- a/integrations/tensorflow/compiler/LegalizeTF.cpp
+++ b/integrations/tensorflow/compiler/LegalizeTF.cpp
@@ -16,6 +16,7 @@
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/chlo_ops.h"
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.h"
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/rewriters.h"
@@ -81,16 +82,20 @@
DenseSet<Operation *> prevUnconvertedOps;
DenseSet<Operation *> unconvertedOps;
+ FrozenRewritePatternList frozenPatterns(std::move(patterns));
+ FrozenRewritePatternList frozenCanonicalizePatterns(
+ std::move(canonicalizePatterns));
while (true) {
- if (failed(
- applyPartialConversion(op, target, patterns, &unconvertedOps))) {
+ if (failed(applyPartialConversion(op, target, frozenPatterns,
+ &unconvertedOps))) {
return signalPassFailure();
}
if (prevUnconvertedOps == unconvertedOps) break;
prevUnconvertedOps = std::move(unconvertedOps);
- if (failed(applyPatternsAndFoldGreedily(op, canonicalizePatterns))) {
+ if (failed(
+ applyPatternsAndFoldGreedily(op, frozenCanonicalizePatterns))) {
return signalPassFailure();
}
}
diff --git a/integrations/tensorflow/compiler/dialect/tf_strings/conversion/convert_tf_to_tf_strings.cc b/integrations/tensorflow/compiler/dialect/tf_strings/conversion/convert_tf_to_tf_strings.cc
index d5c3961..c893721 100644
--- a/integrations/tensorflow/compiler/dialect/tf_strings/conversion/convert_tf_to_tf_strings.cc
+++ b/integrations/tensorflow/compiler/dialect/tf_strings/conversion/convert_tf_to_tf_strings.cc
@@ -133,8 +133,8 @@
populateCallOpTypeConversionPattern(patterns, &getContext(), typeConverter);
populateTFToTFStringsPatterns(&getContext(), patterns);
- auto result =
- applyPartialConversion(module.getOperation(), target, patterns);
+ auto result = applyPartialConversion(module.getOperation(), target,
+ std::move(patterns));
// Partial conversion doesn't include return types. Update in a separate
// walk.
diff --git a/integrations/tensorflow/compiler/dialect/tf_tensorlist/conversion/convert_tf_to_tf_tensorlist.cc b/integrations/tensorflow/compiler/dialect/tf_tensorlist/conversion/convert_tf_to_tf_tensorlist.cc
index 17d2230..71e42f6 100644
--- a/integrations/tensorflow/compiler/dialect/tf_tensorlist/conversion/convert_tf_to_tf_tensorlist.cc
+++ b/integrations/tensorflow/compiler/dialect/tf_tensorlist/conversion/convert_tf_to_tf_tensorlist.cc
@@ -97,7 +97,7 @@
target.addIllegalOp<TF::TensorListConcatV2Op>();
target.addIllegalOp<TF::TensorListStackOp>();
- if (failed(applyPartialConversion(func, target, patterns))) {
+ if (failed(applyPartialConversion(func, target, std::move(patterns)))) {
func.emitError() << "unable to lower to tf_tensorlist dialect";
return signalPassFailure();
}
diff --git a/iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.cpp b/iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.cpp
index 7cb58c7..f06fb14 100644
--- a/iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.cpp
+++ b/iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.cpp
@@ -49,6 +49,7 @@
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "mlir/Transforms/LoopUtils.h"
#include "mlir/Transforms/Passes.h"
@@ -212,7 +213,7 @@
// Emplace patterns one at a time while also maintaining a simple chained
// state transition.
unsigned stepCount = 0;
- SmallVector<OwningRewritePatternList, 4> stage1Patterns;
+ SmallVector<FrozenRewritePatternList, 4> stage1Patterns;
auto zeroState = Identifier::get(std::to_string(stepCount), context);
auto currentState = zeroState;
for (auto &t : transformationSequence) {
@@ -235,7 +236,7 @@
promoteSingleIterationLoops(cast<FuncOp>(op));
return success();
};
- linalg::applyStagedPatterns(func, stage1Patterns, stage2Patterns,
+ linalg::applyStagedPatterns(func, stage1Patterns, std::move(stage2Patterns),
stage3Transforms);
auto postStageTransforms = [this](Operation *op) {
@@ -267,13 +268,13 @@
ContractionOpToMatmulOpLowering, ContractionOpLowering>(
vectorTransformsOptions, context);
- applyPatternsAndFoldGreedily(func, vectorContractLoweringPatterns);
+ applyPatternsAndFoldGreedily(func, std::move(vectorContractLoweringPatterns));
// Programmatic controlled lowering of vector.transfer only.
OwningRewritePatternList vectorToLoopsPatterns;
populateVectorToSCFConversionPatterns(vectorToLoopsPatterns, context,
vectorToSCFOptions);
- applyPatternsAndFoldGreedily(func, vectorToLoopsPatterns);
+ applyPatternsAndFoldGreedily(func, std::move(vectorToLoopsPatterns));
}
MatmulCodegenStrategy &MatmulCodegenStrategy::setDefaultCPULowering() {
diff --git a/iree/compiler/Conversion/HLOToHLO/BUILD b/iree/compiler/Conversion/HLOToHLO/BUILD
index b7e7ce1..fccd391 100644
--- a/iree/compiler/Conversion/HLOToHLO/BUILD
+++ b/iree/compiler/Conversion/HLOToHLO/BUILD
@@ -27,9 +27,9 @@
"Passes.h",
],
deps = [
- "@llvm-project//mlir:CFGTransforms",
"@llvm-project//mlir:IR",
"@llvm-project//mlir:Pass",
+ "@llvm-project//mlir:TransformUtils",
"@org_tensorflow//tensorflow/compiler/mlir/hlo",
],
)
diff --git a/iree/compiler/Conversion/HLOToHLO/DecomposeHLOClamp.cpp b/iree/compiler/Conversion/HLOToHLO/DecomposeHLOClamp.cpp
index 5a9cbef..8a143f5 100644
--- a/iree/compiler/Conversion/HLOToHLO/DecomposeHLOClamp.cpp
+++ b/iree/compiler/Conversion/HLOToHLO/DecomposeHLOClamp.cpp
@@ -14,6 +14,7 @@
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.h"
namespace mlir {
@@ -61,7 +62,7 @@
MLIRContext *context = &getContext();
OwningRewritePatternList patterns;
patterns.insert<DecomposeClampOp>(context);
- applyPatternsAndFoldGreedily(getOperation(), patterns);
+ applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
};
} // namespace
diff --git a/iree/compiler/Conversion/HLOToLinalg/FusionOfTensorOps.cpp b/iree/compiler/Conversion/HLOToLinalg/FusionOfTensorOps.cpp
index 8350d11..f2f7f3f 100644
--- a/iree/compiler/Conversion/HLOToLinalg/FusionOfTensorOps.cpp
+++ b/iree/compiler/Conversion/HLOToLinalg/FusionOfTensorOps.cpp
@@ -25,7 +25,7 @@
#include "iree/compiler/Dialect/HAL/IR/HALOps.h"
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Linalg/Passes.h"
-#include "mlir/IR/PatternMatch.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace iree_compiler {
@@ -76,12 +76,15 @@
MLIRContext *context = op->getContext();
interfacePatterns.insert<FuseWithHALInterfaceLoadTensor,
FuseWithHALInterfaceStoreTensor>(context);
- applyPatternsAndFoldGreedily(op->getRegions(), interfacePatterns);
+ FrozenRewritePatternList frozenInterfacePatterns(
+ std::move(interfacePatterns));
+
+ applyPatternsAndFoldGreedily(op->getRegions(), frozenInterfacePatterns);
populateLinalgTensorOpsFusionPatterns(context, fusionPatterns);
- applyPatternsAndFoldGreedily(op->getRegions(), fusionPatterns);
+ applyPatternsAndFoldGreedily(op->getRegions(), std::move(fusionPatterns));
- applyPatternsAndFoldGreedily(op->getRegions(), interfacePatterns);
+ applyPatternsAndFoldGreedily(op->getRegions(), frozenInterfacePatterns);
}
};
} // namespace
diff --git a/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnBuffers.cpp b/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnBuffers.cpp
index 8c8650b..9f33bd1 100644
--- a/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnBuffers.cpp
+++ b/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnBuffers.cpp
@@ -1470,7 +1470,7 @@
// Let the rest fall through.
target.markUnknownOpDynamicallyLegal([](Operation *) { return true; });
- if (failed(applyFullConversion(getFunction(), target, patterns))) {
+ if (failed(applyFullConversion(getFunction(), target, std::move(patterns)))) {
return signalPassFailure();
}
}
diff --git a/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnTensors.cpp b/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnTensors.cpp
index f7878ac..9ae86c7 100644
--- a/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnTensors.cpp
+++ b/iree/compiler/Conversion/HLOToLinalg/HLOToLinalgOnTensors.cpp
@@ -145,7 +145,8 @@
// Let the rest fall through.
target.markUnknownOpDynamicallyLegal([](Operation *) { return true; });
- if (failed(applyPartialConversion(getFunction(), target, patterns))) {
+ if (failed(applyPartialConversion(getFunction(), target,
+ std::move(patterns)))) {
signalPassFailure();
}
}
diff --git a/iree/compiler/Conversion/HLOToLinalg/ResolveShapeOps.cpp b/iree/compiler/Conversion/HLOToLinalg/ResolveShapeOps.cpp
index d7223f6..88716ca 100644
--- a/iree/compiler/Conversion/HLOToLinalg/ResolveShapeOps.cpp
+++ b/iree/compiler/Conversion/HLOToLinalg/ResolveShapeOps.cpp
@@ -26,6 +26,7 @@
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace iree_compiler {
@@ -104,7 +105,8 @@
ConversionTarget target(*context);
target.addIllegalOp<DimOp>();
target.markUnknownOpDynamicallyLegal([](Operation *) { return true; });
- if (failed(applyFullConversion(getFunction(), target, dimPatterns))) {
+ if (failed(
+ applyFullConversion(getFunction(), target, std::move(dimPatterns)))) {
return signalPassFailure();
}
@@ -114,7 +116,7 @@
// Then elide all shapex.tie_shape ops and canonicalize shapex.ranked_dim
// given that we don't need the shape annotation anymore.
- applyPatternsAndFoldGreedily(getFunction(), shapePatterns);
+ applyPatternsAndFoldGreedily(getFunction(), std::move(shapePatterns));
}
std::unique_ptr<OperationPass<FuncOp>> createResolveShapeOpsPass() {
diff --git a/iree/compiler/Conversion/LinalgToLLVM/ConvImg2ColMatmulConversion.cpp b/iree/compiler/Conversion/LinalgToLLVM/ConvImg2ColMatmulConversion.cpp
index 3601d99..6697372 100644
--- a/iree/compiler/Conversion/LinalgToLLVM/ConvImg2ColMatmulConversion.cpp
+++ b/iree/compiler/Conversion/LinalgToLLVM/ConvImg2ColMatmulConversion.cpp
@@ -14,8 +14,8 @@
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/IR/Builders.h"
-#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace iree_compiler {
@@ -203,7 +203,7 @@
auto context = funcOp.getContext();
OwningRewritePatternList patterns;
populateConvImg2ColMatmulConversionPatterns(context, patterns);
- applyPatternsAndFoldGreedily(funcOp, patterns);
+ applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}
std::unique_ptr<FunctionPass> createConvImg2ColMatmulConversionPass() {
diff --git a/iree/compiler/Conversion/LinalgToLLVM/ConvertToLLVM.cpp b/iree/compiler/Conversion/LinalgToLLVM/ConvertToLLVM.cpp
index f2525d4..16c399c 100644
--- a/iree/compiler/Conversion/LinalgToLLVM/ConvertToLLVM.cpp
+++ b/iree/compiler/Conversion/LinalgToLLVM/ConvertToLLVM.cpp
@@ -30,6 +30,7 @@
#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace iree_compiler {
@@ -324,7 +325,7 @@
&getContext());
vector::populateVectorSlicesLoweringPatterns(patterns, &getContext());
vector::populateVectorContractLoweringPatterns(patterns, &getContext());
- applyPatternsAndFoldGreedily(getOperation(), patterns);
+ applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
//
auto module = getOperation();
@@ -358,7 +359,7 @@
funcOp.walk([&](IREE::PlaceholderOp placeholderOp) { any = true; });
return any ? false : true;
});
- if (failed(applyPartialConversion(module, target, patterns))) {
+ if (failed(applyPartialConversion(module, target, std::move(patterns)))) {
signalPassFailure();
}
}
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/ConvertToGPUPass.cpp b/iree/compiler/Conversion/LinalgToSPIRV/ConvertToGPUPass.cpp
index 5421cc9..b5af0c5 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/ConvertToGPUPass.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/ConvertToGPUPass.cpp
@@ -759,6 +759,7 @@
MapLinalgOpToLocalInvocationId<linalg::PoolingMinOp>,
MapLinalgOpToLocalInvocationId<linalg::PoolingSumOp>,
RemoveLinalgRange, SerializeParallelLoopPattern>(context);
+ FrozenRewritePatternList frozenPatterns(std::move(patterns));
for (FuncOp funcOp : getOperation().getOps<FuncOp>()) {
if (!isEntryPoint(funcOp)) continue;
@@ -767,7 +768,7 @@
funcOp.emitError("unhandled dispatch function with multiple blocks");
return signalPassFailure();
}
- if (failed(applyFullConversion(funcOp, target, patterns)))
+ if (failed(applyFullConversion(funcOp, target, frozenPatterns)))
return signalPassFailure();
}
}
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/ConvertToSPIRVPass.cpp b/iree/compiler/Conversion/LinalgToSPIRV/ConvertToSPIRVPass.cpp
index 508d0dc..4bb9d8d 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/ConvertToSPIRVPass.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/ConvertToSPIRVPass.cpp
@@ -425,8 +425,9 @@
functions.push_back(fn);
}
+ FrozenRewritePatternList frozenPatterns(std::move(patterns));
for (FuncOp fn : functions)
- if (failed(applyFullConversion(fn, *target, patterns)))
+ if (failed(applyFullConversion(fn, *target, frozenPatterns)))
return signalPassFailure();
// Collect all SPIR-V ops into a spv.module.
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/DeclareNumWorkgroupsFnPass.cpp b/iree/compiler/Conversion/LinalgToSPIRV/DeclareNumWorkgroupsFnPass.cpp
index 3bd8074..0cc1637 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/DeclareNumWorkgroupsFnPass.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/DeclareNumWorkgroupsFnPass.cpp
@@ -26,9 +26,9 @@
#include "iree/compiler/Dialect/Shape/IR/ShapeOps.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/Function.h"
-#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace iree_compiler {
@@ -136,7 +136,7 @@
OwningRewritePatternList patterns;
MLIRContext *context = &getContext();
patterns.insert<DeclareNumWorkgroupsFn>(context);
- applyPatternsAndFoldGreedily(getOperation(), patterns);
+ applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
std::unique_ptr<OperationPass<ModuleOp>> createDeclareNumWorkgroupsFnPass() {
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/LegalizeNumWorkgroupsFnPass.cpp b/iree/compiler/Conversion/LinalgToSPIRV/LegalizeNumWorkgroupsFnPass.cpp
index 1d832c1..f118b78 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/LegalizeNumWorkgroupsFnPass.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/LegalizeNumWorkgroupsFnPass.cpp
@@ -26,6 +26,7 @@
#include "mlir/IR/Module.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace iree_compiler {
@@ -94,6 +95,7 @@
MLIRContext *context = &getContext();
populateLegalizeNumWorkgroupsFnPattern(context, patterns);
+ FrozenRewritePatternList frozenPatterns(std::move(patterns));
SymbolTable symbolTable(module.getOperation());
for (FuncOp fn : fns) {
if (!isEntryPoint(fn)) continue;
@@ -107,7 +109,7 @@
<< numWorkgroupsFnName;
return signalPassFailure();
}
- if (failed(applyPatternsAndFoldGreedily(numWorkgroupsFn, patterns)))
+ if (failed(applyPatternsAndFoldGreedily(numWorkgroupsFn, frozenPatterns)))
return signalPassFailure();
}
}
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp b/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp
index b8a34af..0b6dabf 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp
@@ -37,6 +37,7 @@
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/FoldUtils.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#define DEBUG_TYPE "iree-linalg-tile-and-fuse"
@@ -401,7 +402,7 @@
AffineApplyOp::getCanonicalizationPatterns(canonicalizationPatterns, context);
AffineMinOp::getCanonicalizationPatterns(canonicalizationPatterns, context);
SubViewOp::getCanonicalizationPatterns(canonicalizationPatterns, context);
- applyPatternsAndFoldGreedily(op, canonicalizationPatterns);
+ applyPatternsAndFoldGreedily(op, std::move(canonicalizationPatterns));
}
void LinalgTileAndFusePass::runOnOperation() {
@@ -466,7 +467,7 @@
OwningRewritePatternList firstLevelTilingPatterns;
populateTilingToWorkgroupPatterns(context, dependenceGraph, launchConfig,
firstLevelTilingPatterns);
- applyPatternsAndFoldGreedily(funcOp, firstLevelTilingPatterns);
+ applyPatternsAndFoldGreedily(funcOp, std::move(firstLevelTilingPatterns));
applyCanonicalizationPatterns(context, funcOp);
// Delete the ops that are marked for deletion.
@@ -482,7 +483,7 @@
// which requires some folding to trigger.
OwningRewritePatternList promotionPatterns;
populatePromotionPatterns(context, promotionPatterns);
- applyPatternsAndFoldGreedily(funcOp, promotionPatterns);
+ applyPatternsAndFoldGreedily(funcOp, std::move(promotionPatterns));
applyCanonicalizationPatterns(context, funcOp);
}
@@ -490,13 +491,14 @@
OwningRewritePatternList secondLevelTilingPatterns;
populateTilingToSubgroupPatterns(context, launchConfig,
secondLevelTilingPatterns);
- applyPatternsAndFoldGreedily(funcOp, secondLevelTilingPatterns);
+ applyPatternsAndFoldGreedily(funcOp,
+ std::move(secondLevelTilingPatterns));
applyCanonicalizationPatterns(context, funcOp);
OwningRewritePatternList vectorizationPatterns;
populateVectorizationPatterns(context, launchConfig,
vectorizationPatterns);
- applyPatternsAndFoldGreedily(funcOp, vectorizationPatterns);
+ applyPatternsAndFoldGreedily(funcOp, std::move(vectorizationPatterns));
}
launchConfig.finalize(funcOp);
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/VectorToGPUPass.cpp b/iree/compiler/Conversion/LinalgToSPIRV/VectorToGPUPass.cpp
index eb2a32e..5d2acb3 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/VectorToGPUPass.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/VectorToGPUPass.cpp
@@ -42,6 +42,7 @@
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace iree_compiler {
@@ -182,8 +183,8 @@
target->markUnknownOpDynamicallyLegal([](Operation *) { return true; });
OwningRewritePatternList tileAndDistributePattern;
populateLinalgTileAndDistributePatterns(context, tileAndDistributePattern);
- if (failed(
- applyPartialConversion(funcOp, *target, tileAndDistributePattern))) {
+ if (failed(applyPartialConversion(funcOp, *target,
+ std::move(tileAndDistributePattern)))) {
return signalPassFailure();
}
@@ -193,7 +194,7 @@
canonicalizePatterns.insert<AffineMinCanonicalizationPattern,
linalg::AffineMinSCFCanonicalizationPattern>(
context);
- applyPatternsAndFoldGreedily(funcOp, canonicalizePatterns);
+ applyPatternsAndFoldGreedily(funcOp, std::move(canonicalizePatterns));
// 3. Vectorize the tiled linalg to be able to map it to load/store vector.
OwningRewritePatternList vectorizationPatterns;
@@ -201,7 +202,7 @@
.insert<linalg::LinalgVectorizationPattern<linalg::CopyOp>>(
context, linalg::LinalgMarker(
Identifier::get(getVectorizeMarker(), context), {}));
- applyPatternsAndFoldGreedily(funcOp, vectorizationPatterns);
+ applyPatternsAndFoldGreedily(funcOp, std::move(vectorizationPatterns));
}
// Convert vector transfer_read to a load if possible. This is the case only if
@@ -329,7 +330,7 @@
OwningRewritePatternList patterns;
patterns.insert<VectorContractLowering, VectorTransferReadToLoad,
VectorTransferWriteToStore, ExtractStridedLowering>(context);
- applyPatternsAndFoldGreedily(funcOp, patterns);
+ applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}
void ConvertVectorToGPUPass::runOnOperation() {
@@ -351,7 +352,7 @@
target->addLegalOp<scf::YieldOp>();
target->addLegalOp<scf::ForOp>();
target->addLegalDialect<gpu::GPUDialect>();
- if (failed(applyPartialConversion(funcOp, *target, patterns)))
+ if (failed(applyPartialConversion(funcOp, *target, std::move(patterns))))
return signalPassFailure();
}
} // namespace
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/VectorizeMemref.cpp b/iree/compiler/Conversion/LinalgToSPIRV/VectorizeMemref.cpp
index 7ff1530..9391676 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/VectorizeMemref.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/VectorizeMemref.cpp
@@ -360,7 +360,7 @@
return !memrefUsageAnalysis->transferConvert(op);
return true;
});
- if (failed(applyPartialConversion(module, target, patterns)))
+ if (failed(applyPartialConversion(module, target, std::move(patterns))))
return signalPassFailure();
}
diff --git a/iree/compiler/Conversion/LinalgToVector/LoadStoreVectorization.cpp b/iree/compiler/Conversion/LinalgToVector/LoadStoreVectorization.cpp
index fe53860..075837c 100644
--- a/iree/compiler/Conversion/LinalgToVector/LoadStoreVectorization.cpp
+++ b/iree/compiler/Conversion/LinalgToVector/LoadStoreVectorization.cpp
@@ -294,7 +294,8 @@
return llvm::any_of(op->getOperandTypes(), isVectorType) ||
llvm::any_of(op->getResultTypes(), isVectorType);
}));
- if (failed(applyPartialConversion(getOperation(), target, patterns)))
+ if (failed(applyPartialConversion(getOperation(), target,
+ std::move(patterns))))
return signalPassFailure();
}
};
diff --git a/iree/compiler/Dialect/Flow/Transforms/HLOToHLOPreprocessing.cpp b/iree/compiler/Dialect/Flow/Transforms/HLOToHLOPreprocessing.cpp
index ada669a..10965ed 100644
--- a/iree/compiler/Dialect/Flow/Transforms/HLOToHLOPreprocessing.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/HLOToHLOPreprocessing.cpp
@@ -23,6 +23,7 @@
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LogicalResult.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.h"
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/rewriters.h"
@@ -787,7 +788,7 @@
if (conv1x1toDot) {
patterns.insert<Lower1x1ConvolutionToDotOp>(context);
}
- applyPatternsAndFoldGreedily(getOperation(), patterns);
+ applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
};
diff --git a/iree/compiler/Dialect/Flow/Transforms/PrePostPartitioningConversion.cpp b/iree/compiler/Dialect/Flow/Transforms/PrePostPartitioningConversion.cpp
index 818f64a..f25f67e 100644
--- a/iree/compiler/Dialect/Flow/Transforms/PrePostPartitioningConversion.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/PrePostPartitioningConversion.cpp
@@ -103,7 +103,7 @@
populateStandardToFlowPatterns(context, conversionPatterns);
if (failed(applyPartialConversion(getFunction(), conversionTarget,
- conversionPatterns))) {
+ std::move(conversionPatterns)))) {
getFunction().emitError() << "module is not in a compatible input format";
return signalPassFailure();
}
@@ -133,7 +133,7 @@
populateStandardToFlowPatterns(context, conversionPatterns);
if (failed(applyPartialConversion(getFunction(), conversionTarget,
- conversionPatterns))) {
+ std::move(conversionPatterns)))) {
getFunction().emitError() << "module is not in a compatible input format";
return signalPassFailure();
}
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp
index 759e7fc..f8bbbda 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp
@@ -138,7 +138,7 @@
typeConverter);
if (failed(applyPartialConversion(outerModuleOp, conversionTarget,
- conversionPatterns))) {
+ std::move(conversionPatterns)))) {
outerModuleOp.emitError() << "conversion to vm.module failed";
return signalPassFailure();
}
diff --git a/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp b/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp
index 340ff06..f50e261 100644
--- a/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp
+++ b/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp
@@ -100,7 +100,7 @@
// that don't need anything HAL-specific to pass through. This is handled by
// the fallback type legality support of the
if (failed(applyPartialConversion(getOperation(), conversionTarget,
- patterns))) {
+ std::move(patterns)))) {
return signalPassFailure();
}
}
diff --git a/iree/compiler/Dialect/HAL/Transforms/ResolveEntryPointOrdinals.cpp b/iree/compiler/Dialect/HAL/Transforms/ResolveEntryPointOrdinals.cpp
index 03030d2..35b3d1e 100644
--- a/iree/compiler/Dialect/HAL/Transforms/ResolveEntryPointOrdinals.cpp
+++ b/iree/compiler/Dialect/HAL/Transforms/ResolveEntryPointOrdinals.cpp
@@ -14,8 +14,8 @@
#include "iree/compiler/Dialect/HAL/IR/HALOps.h"
#include "iree/compiler/Dialect/HAL/Transforms/Passes.h"
-#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace iree_compiler {
@@ -87,7 +87,7 @@
OwningRewritePatternList patterns;
patterns.insert<ResolveCommandBufferDispatchOrdinals>(context);
patterns.insert<ResolveCommandBufferDispatchIndirectOrdinals>(context);
- applyPatternsAndFoldGreedily(getOperation(), patterns);
+ applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
};
diff --git a/iree/compiler/Dialect/Shape/Conversion/ConvertShapeToShapex.cpp b/iree/compiler/Dialect/Shape/Conversion/ConvertShapeToShapex.cpp
index e9034ea..7802918 100644
--- a/iree/compiler/Dialect/Shape/Conversion/ConvertShapeToShapex.cpp
+++ b/iree/compiler/Dialect/Shape/Conversion/ConvertShapeToShapex.cpp
@@ -250,7 +250,8 @@
patterns.insert<ConvertToExtentTensorOp>(context);
patterns.insert<ConvertTensorCastOp>(context);
- if (failed(applyPartialConversion(module, conversionTarget, patterns))) {
+ if (failed(applyPartialConversion(module, conversionTarget,
+ std::move(patterns)))) {
return signalPassFailure();
}
}
diff --git a/iree/compiler/Dialect/Shape/Transforms/CleanupPlaceholdersPass.cpp b/iree/compiler/Dialect/Shape/Transforms/CleanupPlaceholdersPass.cpp
index 2889a91..4419683 100644
--- a/iree/compiler/Dialect/Shape/Transforms/CleanupPlaceholdersPass.cpp
+++ b/iree/compiler/Dialect/Shape/Transforms/CleanupPlaceholdersPass.cpp
@@ -17,6 +17,7 @@
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/PassRegistry.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace iree_compiler {
@@ -39,7 +40,7 @@
void runOnFunction() override {
OwningRewritePatternList patterns;
patterns.insert<CleanupTieShapePattern>(&getContext());
- applyPatternsAndFoldGreedily(getFunction(), patterns);
+ applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
};
diff --git a/iree/compiler/Dialect/Shape/Transforms/ConvertHLOToShapeDialectPass.cpp b/iree/compiler/Dialect/Shape/Transforms/ConvertHLOToShapeDialectPass.cpp
index 79c64ab..1b06f2d 100644
--- a/iree/compiler/Dialect/Shape/Transforms/ConvertHLOToShapeDialectPass.cpp
+++ b/iree/compiler/Dialect/Shape/Transforms/ConvertHLOToShapeDialectPass.cpp
@@ -85,7 +85,7 @@
conversionPatterns.insert<ConvertDynamicIota>(&getContext());
if (failed(applyPartialConversion(getFunction(), conversionTarget,
- conversionPatterns))) {
+ std::move(conversionPatterns)))) {
return signalPassFailure();
}
}
diff --git a/iree/compiler/Dialect/Shape/Transforms/MaterializeShapeCalculationsPass.cpp b/iree/compiler/Dialect/Shape/Transforms/MaterializeShapeCalculationsPass.cpp
index 0a78738..5841522 100644
--- a/iree/compiler/Dialect/Shape/Transforms/MaterializeShapeCalculationsPass.cpp
+++ b/iree/compiler/Dialect/Shape/Transforms/MaterializeShapeCalculationsPass.cpp
@@ -28,6 +28,7 @@
#include "mlir/IR/StandardTypes.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace iree_compiler {
@@ -54,7 +55,7 @@
populateMaterializeShapeCalculationsConversionPatterns(conversionPatterns,
context);
if (failed(applyPartialConversion(getOperation(), target,
- conversionPatterns))) {
+ std::move(conversionPatterns)))) {
signalPassFailure();
return;
}
@@ -70,7 +71,7 @@
RankedDimsOp::getCanonicalizationPatterns(patterns, context);
TieShapeOp::getCanonicalizationPatterns(patterns, context);
FromExtentTensorOp::getCanonicalizationPatterns(patterns, context);
- applyPatternsAndFoldGreedily(getOperation(), patterns);
+ applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
};
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVMTest.cpp b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVMTest.cpp
index af87784..15b9ed3 100644
--- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVMTest.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVMTest.cpp
@@ -47,7 +47,8 @@
// NOTE: we allow other dialects besides just VM during this pass as we are
// only trying to eliminate the std ops. When used as part of a larger set
// of rewrites a full conversion should be used instead.
- if (failed(applyPartialConversion(getOperation(), target, patterns))) {
+ if (failed(applyPartialConversion(getOperation(), target,
+ std::move(patterns)))) {
return signalPassFailure();
}
}
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp
index 82d7983..ecfee52f 100644
--- a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp
+++ b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp
@@ -173,7 +173,7 @@
}
}
- if (failed(applyFullConversion(moduleOp, target, patterns))) {
+ if (failed(applyFullConversion(moduleOp, target, std::move(patterns)))) {
return moduleOp.emitError() << "unable to fully apply conversion to module";
}
diff --git a/iree/compiler/Dialect/VM/Transforms/Conversion.cpp b/iree/compiler/Dialect/VM/Transforms/Conversion.cpp
index 1837326..68f4c12 100644
--- a/iree/compiler/Dialect/VM/Transforms/Conversion.cpp
+++ b/iree/compiler/Dialect/VM/Transforms/Conversion.cpp
@@ -137,7 +137,7 @@
setupCompilerHintsLegality(context, conversionTarget, typeConverter);
if (failed(applyPartialConversion(outerModuleOp, conversionTarget,
- conversionPatterns))) {
+ std::move(conversionPatterns)))) {
outerModuleOp.emitError() << "conversion to vm.module failed";
return signalPassFailure();
}
diff --git a/iree/compiler/Dialect/VMLA/Conversion/VMLAToVM/ConvertVMLAToVM.cpp b/iree/compiler/Dialect/VMLA/Conversion/VMLAToVM/ConvertVMLAToVM.cpp
index ef9238e..979088d 100644
--- a/iree/compiler/Dialect/VMLA/Conversion/VMLAToVM/ConvertVMLAToVM.cpp
+++ b/iree/compiler/Dialect/VMLA/Conversion/VMLAToVM/ConvertVMLAToVM.cpp
@@ -409,7 +409,7 @@
Shape::populateFoldConversionPatterns(&getContext(), conversionPatterns);
if (failed(applyPartialConversion(outerModuleOp, conversionTarget,
- conversionPatterns))) {
+ std::move(conversionPatterns)))) {
outerModuleOp.emitError() << "conversion to vm.module failed";
return signalPassFailure();
}
diff --git a/iree/compiler/Dialect/VMLA/Transforms/Conversion.cpp b/iree/compiler/Dialect/VMLA/Transforms/Conversion.cpp
index fe87d80..5c5ce94 100644
--- a/iree/compiler/Dialect/VMLA/Transforms/Conversion.cpp
+++ b/iree/compiler/Dialect/VMLA/Transforms/Conversion.cpp
@@ -127,7 +127,7 @@
conversionTarget.addIllegalOp<Shape::RankedBroadcastInDimOp>();
if (failed(applyPartialConversion(getOperation(), conversionTarget,
- conversionPatterns))) {
+ std::move(conversionPatterns)))) {
getOperation().emitError() << "conversion to the VMLA dialect failed";
return signalPassFailure();
}
diff --git a/iree/compiler/Dialect/VMLA/Transforms/PreConversionLowering.cpp b/iree/compiler/Dialect/VMLA/Transforms/PreConversionLowering.cpp
index 4964412..fef7f26 100644
--- a/iree/compiler/Dialect/VMLA/Transforms/PreConversionLowering.cpp
+++ b/iree/compiler/Dialect/VMLA/Transforms/PreConversionLowering.cpp
@@ -33,6 +33,7 @@
#include "mlir/IR/SymbolTable.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.h"
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/rewriters.h"
@@ -398,7 +399,8 @@
// conversions.
OwningRewritePatternList greedyPatterns;
mhlo::PopulateComplexLoweringPatterns(context, &greedyPatterns);
- if (failed(applyPatternsAndFoldGreedily(getOperation(), greedyPatterns))) {
+ if (failed(applyPatternsAndFoldGreedily(getOperation(),
+ std::move(greedyPatterns)))) {
return signalPassFailure();
}
@@ -422,7 +424,8 @@
target.addIllegalOp<mhlo::FftOp>();
patterns.insert<LowerFftOp>(context);
- if (failed(applyPartialConversion(getOperation(), target, patterns))) {
+ if (failed(applyPartialConversion(getOperation(), target,
+ std::move(patterns)))) {
return signalPassFailure();
}
}