Integrate llvm-project at 9816fada1667ecc3343d285295c95848826693af (#8612)
* Reset third_party/llvm-project: 9816fada1667ecc3343d285295c95848826693af (2022-03-18 10:50:55 +0100): [bazel] Port a954ade8ed41
* Cherry-picked https://github.com/llvm/llvm-project/commit/3b74aac29c97b587c30d940e00b338af384ede59 additionally to fix bufferization issues
* TensorFlow commit: c9ddfac348b1f423e1463ae78b046514a8b03a48
* Cherry-picked https://github.com/tensorflow/tensorflow/commit/9642bbdbb6b27443d339b714f233272eb74ca637 additionally to fix leftover `dump()` calls for debugging
* MHLO commit: 467cd37703dc0c4195ce6351617ef320bb60e927
* Updated external model registration
* Updated FuncOp usages
* The attribute carrying type information now should be `function_type`, instead of `type`.
* `mlir::FuncOp` is now in `mlir::func::FuncOp`.
* `builtin.func` in assembly should be `func.func` now.
* Updated MHLO attribute assembly for `comparison_direction`, `precision`, etc.
* Fixed TFL/StripMetadata.cpp to avoid dropping `tf_saved_model` attributes for ABI information
diff --git a/integrations/tensorflow/WORKSPACE b/integrations/tensorflow/WORKSPACE
index c947831..8284510 100644
--- a/integrations/tensorflow/WORKSPACE
+++ b/integrations/tensorflow/WORKSPACE
@@ -7,7 +7,7 @@
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
-TENSORFLOW_COMMIT = "05f17fca35623f4ab6d275ed95f0e1363c939f73"
+TENSORFLOW_COMMIT = "9d9e78f6735a933b42febaafe2f303eec25f70ee"
git_repository(
name = "org_tensorflow",
diff --git a/integrations/tensorflow/iree-dialects/BUILD b/integrations/tensorflow/iree-dialects/BUILD
index 3f47bd6..3280a69 100644
--- a/integrations/tensorflow/iree-dialects/BUILD
+++ b/integrations/tensorflow/iree-dialects/BUILD
@@ -631,6 +631,7 @@
"@llvm-project//mlir:ArithmeticDialect",
"@llvm-project//mlir:BufferizationDialect",
"@llvm-project//mlir:BufferizationTransforms",
+ "@llvm-project//mlir:FuncDialect",
"@llvm-project//mlir:LinalgOps",
"@llvm-project//mlir:LLVMDialect",
"@llvm-project//mlir:PDLDialect",
diff --git a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/PassDetail.h b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/PassDetail.h
index 3f3fe9b..417a582 100644
--- a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/PassDetail.h
+++ b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/PassDetail.h
@@ -7,6 +7,7 @@
#ifndef IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASS_DETAIL_H_
#define IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASS_DETAIL_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
diff --git a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.h b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.h
index febec87..98e37ac 100644
--- a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.h
+++ b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.h
@@ -7,6 +7,7 @@
#ifndef IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASSES_H_
#define IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASSES_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
@@ -14,9 +15,9 @@
namespace IREE {
namespace LinalgExt {
-std::unique_ptr<OperationPass<FuncOp>> createTiledOpInterfaceTilingPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createTiledOpInterfaceTilingPass();
-std::unique_ptr<OperationPass<FuncOp>> createLinalgExtToLoopsPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createLinalgExtToLoopsPass();
std::unique_ptr<OperationPass<>> createPadContractionToBlockSizePass();
diff --git a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.td b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.td
index 54a0484..95ae571 100644
--- a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.td
+++ b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.td
@@ -10,13 +10,13 @@
include "mlir/Pass/PassBase.td"
def LinalgExtToLoops :
- Pass<"iree-linalg-ext-to-loops", "FuncOp"> {
+ Pass<"iree-linalg-ext-to-loops", "func::FuncOp"> {
let summary = "Convert LinalgExt ops to loops and Linalg ops.";
let constructor = "mlir::iree_compiler::IREE::LinalgExt::createLinalgExtToLoopsPass()";
}
def TiledOpInterfaceTiling :
- Pass<"iree-linalg-ext-tile", "FuncOp"> {
+ Pass<"iree-linalg-ext-tile", "func::FuncOp"> {
let summary = "Test pass for tiling using TiledOpInterface";
let constructor = "mlir::iree_compiler::IREE::LinalgExt::createTiledOpInterfaceTilingPass()";
}
diff --git a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/LinalgTransformOps.h b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/LinalgTransformOps.h
index 6eda492..7683ae0 100644
--- a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/LinalgTransformOps.h
+++ b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/LinalgTransformOps.h
@@ -11,6 +11,7 @@
#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.h"
#include "iree-dialects/Dialect/LinalgTransform/TrackingListener.h"
#include "iree-dialects/Dialect/LinalgTransform/TransformOpInterface.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/PDL/IR/PDLTypes.h"
#include "mlir/IR/BuiltinAttributes.h"
diff --git a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.h b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.h
index 905f3a3..c038c0b 100644
--- a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.h
+++ b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.h
@@ -12,10 +12,12 @@
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
+#include "mlir/IR/FunctionInterfaces.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/SymbolTable.h"
+#include "mlir/Interfaces/CallInterfaces.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
diff --git a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.td b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.td
index bc5b181..9b4aca3 100644
--- a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.td
+++ b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.td
@@ -124,7 +124,7 @@
}];
let arguments = (ins SymbolNameAttr:$sym_name,
- TypeAttr:$type,
+ TypeAttr:$function_type,
OptionalAttr<StrArrayAttr>:$arg_names,
OptionalAttr<StrArrayAttr>:$free_vars,
OptionalAttr<StrArrayAttr>:$cell_vars,
@@ -143,21 +143,21 @@
}
/// Returns the type of this function.
- FunctionType getType() {
+ FunctionType getFunctionType() {
return getOperation()->getAttrOfType<TypeAttr>(getTypeAttrName())
.getValue()
.cast<FunctionType>();
}
/// Returns the argument types of this function.
- ArrayRef<Type> getArgumentTypes() { return getType().getInputs(); }
+ ArrayRef<Type> getArgumentTypes() { return getFunctionType().getInputs(); }
/// Returns the result types of this function.
- ArrayRef<Type> getResultTypes() { return getType().getResults(); }
+ ArrayRef<Type> getResultTypes() { return getFunctionType().getResults(); }
/// Returns the python return type of the function (second return type).
Type getPyReturnType() {
- return getType().getResult(1);
+ return getFunctionType().getResult(1);
}
/// Hook for Trait::FunctionLike, called after verifying that the 'type'
@@ -167,7 +167,7 @@
Region *getCallableRegion() { return &body(); }
ArrayRef<Type> getCallableResults() {
- return getType().getResults();
+ return getFunctionType().getResults();
}
/// Defines SymbolOpInterface::isDeclaration().
@@ -177,8 +177,8 @@
}];
let builders = [
- OpBuilder<(ins "StringAttr":$name, "FunctionType":$type), [{
- build($_builder, $_state, name, TypeAttr::get(type),
+ OpBuilder<(ins "StringAttr":$name, "FunctionType":$function_type), [{
+ build($_builder, $_state, name, TypeAttr::get(function_type),
nullptr, nullptr, nullptr, nullptr);
}]>
];
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/TiledOpInterface.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/TiledOpInterface.cpp
index e509489..604e498 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/TiledOpInterface.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/TiledOpInterface.cpp
@@ -305,9 +305,12 @@
DialectRegistry ®istry) {
LLVM_DEBUG(
{ llvm::dbgs() << "Adding external models of tiled op interface\n"; });
- registry
- .addOpInterface<tensor::ExtractSliceOp, ExtractSliceTiledOpInterface>();
- registry.addOpInterface<tensor::InsertSliceOp, InsertSliceTiledOpInterface>();
+
+ registry.addExtension(+[](MLIRContext *ctx, tensor::TensorDialect *dialect) {
+ tensor::ExtractSliceOp::attachInterface<ExtractSliceTiledOpInterface>(*ctx);
+ tensor::InsertSliceOp::attachInterface<InsertSliceTiledOpInterface>(*ctx);
+ });
+
// TODO(ravishankarm): Needs custom PadTiledOpInterface or equiv.
// registry.addOpInterface<tensor::PadOp,
// ForwardToTilingInterface<tensor::PadOp>>();
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/LinalgExtBufferization.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/LinalgExtBufferization.cpp
index 2b8f8ec..9b3c2fb 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/LinalgExtBufferization.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/LinalgExtBufferization.cpp
@@ -6,11 +6,11 @@
#include "iree-dialects/Dialect/LinalgExt/LinalgExtBufferization.h"
-#include <mlir/IR/BuiltinOps.h>
-
+#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtDialect.h"
#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
+#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/PatternMatch.h"
using namespace mlir;
@@ -22,7 +22,6 @@
using bufferization::BufferRelation;
using bufferization::getMemRefType;
using bufferization::replaceOpWithBufferizedValues;
-using bufferization::replaceOpWithNewBufferizedOp;
using tensor::ExtractSliceOp;
/// Return the destinations that an InParallelOp is inserting into. One per
@@ -341,9 +340,12 @@
void mlir::iree_compiler::IREE::LinalgExt::
registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry) {
- registry.addOpInterface<InParallelOp, InParallelOpInterface>();
- registry
- .addOpInterface<PerformConcurrentlyOp, PerformConcurrentlyOpInterface>();
- registry
- .addOpInterface<ParallelInsertSliceOp, ParallelInsertSliceOpInterface>();
+ registry.addExtension(
+ +[](MLIRContext *ctx, LinalgExt::IREELinalgExtDialect *dialect) {
+ InParallelOp::attachInterface<InParallelOpInterface>(*ctx);
+ PerformConcurrentlyOp::attachInterface<PerformConcurrentlyOpInterface>(
+ *ctx);
+ ParallelInsertSliceOp::attachInterface<ParallelInsertSliceOpInterface>(
+ *ctx);
+ });
}
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/TilingExternalModels.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/TilingExternalModels.cpp
index 174d4ff..0758d7f 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/TilingExternalModels.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/TilingExternalModels.cpp
@@ -157,23 +157,25 @@
} // namespace
template <typename OpType>
-void registerOne(DialectRegistry ®istry) {
- registry.addOpInterface<OpType, LinalgOpTilingInterface<OpType>>();
+void registerOne(MLIRContext *ctx) {
+ OpType::template attachInterface<LinalgOpTilingInterface<OpType>>(*ctx);
}
/// Variadic helper function.
template <typename... OpTypes>
-void registerAll(DialectRegistry ®istry) {
+void registerAll(MLIRContext *ctx) {
// FIXME: In c++17 this can be simplified by using 'fold expressions'.
- (void)std::initializer_list<int>{0, (registerOne<OpTypes>(registry), 0)...};
+ (void)std::initializer_list<int>{0, (registerOne<OpTypes>(ctx), 0)...};
}
#define GET_OP_LIST
void mlir::iree_compiler::IREE::LinalgExt::
registerTilingInterfaceExternalModels(DialectRegistry ®istry) {
- registerOne<linalg::GenericOp>(registry);
- registerAll<
+ registry.addExtension(+[](MLIRContext *ctx, linalg::LinalgDialect *dialect) {
+ registerOne<linalg::GenericOp>(ctx);
+ registerAll<
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
- >(registry);
+ >(ctx);
+ });
}
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
index e28dcd3..03d39ac 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
@@ -31,6 +31,7 @@
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.h"
#include "mlir/Dialect/Linalg/Passes.h"
@@ -581,7 +582,7 @@
// Perform buffer-level hoistings.
state.getTopLevel()->walk(
- [&](FuncOp funcOp) { hoistRedundantVectorTransfers(funcOp); });
+ [&](func::FuncOp funcOp) { hoistRedundantVectorTransfers(funcOp); });
return success();
}
@@ -597,8 +598,8 @@
// the end. Keep module-level for now.
PassManager pm(getContext());
- pm.addNestedPass<FuncOp>(createConvertVectorToSCFPass());
- pm.addNestedPass<FuncOp>(createConvertLinalgToLoopsPass());
+ pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
+ pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
if (enable_async()) {
pm.addPass(createAsyncToAsyncRuntimePass());
pm.addPass(createAsyncRuntimeRefCountingPass());
@@ -618,7 +619,7 @@
.enableAMX(enable_amx())
.enableX86Vector(enable_x86vector())));
// clang-format on
- pm.addNestedPass<FuncOp>(createConvertMathToLLVMPass());
+ pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
pm.addPass(createMemRefToLLVMPass());
if (enable_async())
pm.addPass(createConvertAsyncToLLVMPass());
@@ -631,7 +632,9 @@
// FIXME: this is a terrible hack!
state.getTopLevel()->walk([](LLVM::LLVMFuncOp funcOp) {
for (int64_t i = 0; i < funcOp.getNumArguments(); ++i) {
- if (!funcOp.getType().getParamType(i).isa<LLVM::LLVMPointerType>())
+ if (!funcOp.getFunctionType()
+ .getParamType(i)
+ .isa<LLVM::LLVMPointerType>())
continue;
funcOp.setArgAttr(i, "llvm.noalias", UnitAttr::get(funcOp.getContext()));
}
@@ -760,15 +763,15 @@
return executeRegionOp;
}
-static FailureOr<FuncOp> outlineLoop(scf::ForOp loop, StringRef funcName,
- transform::TransformState &state) {
+static FailureOr<func::FuncOp> outlineLoop(scf::ForOp loop, StringRef funcName,
+ transform::TransformState &state) {
PatternRewriterListener rewriter(loop->getContext());
auto &listener = state.getExtension<TrackingListener>();
rewriter.addListener(&listener);
Location loc = loop.getLoc();
scf::ExecuteRegionOp exec = outlineInExecuteRegion(rewriter, loop);
assert(exec && "failed to produce execute_region");
- FailureOr<FuncOp> outlined =
+ FailureOr<func::FuncOp> outlined =
outlineSingleBlockRegion(rewriter, loc, exec.getRegion(), funcName);
if (failed(listener.checkErrorState()))
return failure();
@@ -781,7 +784,7 @@
SmallVector<Operation *> resultVector;
auto res =
applyTransformToEach(state.getPayloadOps(target()), resultVector,
- [&](scf::ForOp loop) -> FailureOr<FuncOp> {
+ [&](scf::ForOp loop) -> FailureOr<func::FuncOp> {
return outlineLoop(loop, func_name(), state);
});
if (failed(res))
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/IR/PyDMOps.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/IR/PyDMOps.cpp
index b281874..f4ad03a 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/IR/PyDMOps.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/IR/PyDMOps.cpp
@@ -567,9 +567,7 @@
}
void PyFuncOp::print(OpAsmPrinter &p) {
- FunctionType fnType = getType();
- function_interface_impl::printFunctionOp(
- p, *this, fnType.getInputs(), /*isVariadic=*/false, fnType.getResults());
+ function_interface_impl::printFunctionOp(p, *this, /*isVariadic=*/false);
}
//===----------------------------------------------------------------------===//
@@ -764,7 +762,7 @@
<< "' does not reference a valid function";
// Verify that the operand and result types match the callee.
- auto fnType = fn.getType();
+ auto fnType = fn.getFunctionType();
if (fnType.getNumInputs() != getNumOperands())
return emitOpError("incorrect number of operands for callee");
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/Transforms/Optimize/FixateWeakNumeric.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/Transforms/Optimize/FixateWeakNumeric.cpp
index b6e5b51..5d4be33 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/Transforms/Optimize/FixateWeakNumeric.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/Transforms/Optimize/FixateWeakNumeric.cpp
@@ -41,7 +41,7 @@
// Special cases for operations.
if (auto funcOp = llvm::dyn_cast<PYDM::FuncOp>(op)) {
- FunctionType existingFt = funcOp.getType();
+ FunctionType existingFt = funcOp.getFunctionType();
FunctionType newFt = convertFunctionType(existingFt);
if (newFt != existingFt) {
funcOp.setType(newFt);
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/Transforms/ToIREE/LoweringPatterns.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/Transforms/ToIREE/LoweringPatterns.cpp
index 9d92711..2e14e37 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/Transforms/ToIREE/LoweringPatterns.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/PyDM/Transforms/ToIREE/LoweringPatterns.cpp
@@ -637,7 +637,7 @@
LogicalResult
matchAndRewrite(PYDM::FuncOp srcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- FunctionType srcFuncType = srcOp.getType();
+ FunctionType srcFuncType = srcOp.getFunctionType();
TypeConverter::SignatureConversion signatureConversion(
srcOp.getNumArguments());
@@ -839,7 +839,7 @@
auto parentFunc = srcOp->getParentOfType<mlir::FuncOp>();
if (!parentFunc)
return rewriter.notifyMatchFailure(srcOp, "not contained by a func");
- Type convertedReturnType = parentFunc.getType().getResult(1);
+ Type convertedReturnType = parentFunc.getFunctionType().getResult(1);
// Split the entry block.
Block *entryBlock = rewriter.getInsertionBlock();
diff --git a/integrations/tensorflow/iree-dialects/python/iree/compiler/dialects/_iree_pydm_ops_ext.py b/integrations/tensorflow/iree-dialects/python/iree/compiler/dialects/_iree_pydm_ops_ext.py
index 5155972..7fc8653 100644
--- a/integrations/tensorflow/iree-dialects/python/iree/compiler/dialects/_iree_pydm_ops_ext.py
+++ b/integrations/tensorflow/iree-dialects/python/iree/compiler/dialects/_iree_pydm_ops_ext.py
@@ -18,7 +18,7 @@
@property
def type(self):
- return ir.FunctionType(ir.TypeAttr(self.attributes["type"]).value)
+ return ir.FunctionType(ir.TypeAttr(self.attributes["function_type"]).value)
@property
def py_return_type(self) -> ir.Type:
diff --git a/integrations/tensorflow/iree-dialects/python/iree/compiler/dialects/iree_pydm/importer/importer.py b/integrations/tensorflow/iree-dialects/python/iree/compiler/dialects/iree_pydm/importer/importer.py
index 68b1227..fae6591 100644
--- a/integrations/tensorflow/iree-dialects/python/iree/compiler/dialects/iree_pydm/importer/importer.py
+++ b/integrations/tensorflow/iree-dialects/python/iree/compiler/dialects/iree_pydm/importer/importer.py
@@ -85,7 +85,7 @@
context=ic.context)
f_op = d.FuncOp(
ir.StringAttr.get(symbol),
- type=ir.TypeAttr.get(ir_f_type),
+ function_type=ir.TypeAttr.get(ir_f_type),
arg_names=f_arg_names,
free_vars=f_var_names,
cell_vars=ir.ArrayAttr.get([]),
diff --git a/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-cse.mlir b/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-cse.mlir
index 13d2994..ebfc348 100644
--- a/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-cse.mlir
+++ b/integrations/tensorflow/iree-dialects/test/Transforms/test-listener-cse.mlir
@@ -233,7 +233,7 @@
%0 = arith.constant 1 : i32
// CHECK-NEXT: @nested_func
- builtin.func @nested_func() {
+ func.func @nested_func() {
// CHECK-NEXT: arith.constant 1
%foo = arith.constant 1 : i32
"foo.yield"(%foo) : (i32) -> ()
diff --git a/integrations/tensorflow/iree_tf_compiler/MHLO/Passes.cpp b/integrations/tensorflow/iree_tf_compiler/MHLO/Passes.cpp
index d2cd879..d26e8fc 100644
--- a/integrations/tensorflow/iree_tf_compiler/MHLO/Passes.cpp
+++ b/integrations/tensorflow/iree_tf_compiler/MHLO/Passes.cpp
@@ -25,7 +25,7 @@
// Import pipelines should end with canonicalization because they may have
// access to dialects and patterns that the core compiler does not.
- pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
+ pm.addNestedPass<func::FuncOp>(mlir::createCanonicalizerPass());
}
void registerMHLOImportPassPipeline() {
diff --git a/integrations/tensorflow/iree_tf_compiler/MHLO/Passes.h b/integrations/tensorflow/iree_tf_compiler/MHLO/Passes.h
index 670104a..8930e85 100644
--- a/integrations/tensorflow/iree_tf_compiler/MHLO/Passes.h
+++ b/integrations/tensorflow/iree_tf_compiler/MHLO/Passes.h
@@ -7,6 +7,7 @@
#ifndef IREE_INTEGRATIONS_TENSORFLOW_IREE_TF_COMPILER_MHLO_PASSES_H_
#define IREE_INTEGRATIONS_TENSORFLOW_IREE_TF_COMPILER_MHLO_PASSES_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
@@ -27,7 +28,7 @@
// Annotates an appropriate iree.abi attribute on public functions that
// operate exclusively on tensor types. This corresponds to the expectations
// of MHLO and is suitable for such programs.
-std::unique_ptr<OperationPass<FuncOp>> createEmitDefaultIREEABIPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createEmitDefaultIREEABIPass();
//===----------------------------------------------------------------------===//
// Registration
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/Passes.h b/integrations/tensorflow/iree_tf_compiler/TF/Passes.h
index 6a3712c..9e9d6c3 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/Passes.h
+++ b/integrations/tensorflow/iree_tf_compiler/TF/Passes.h
@@ -7,6 +7,7 @@
#ifndef IREE_INTEGRATIONS_TENSORFLOW_IREE_TF_COMPILER_TF_PASSES_H_
#define IREE_INTEGRATIONS_TENSORFLOW_IREE_TF_COMPILER_TF_PASSES_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
@@ -48,14 +49,14 @@
std::unique_ptr<OperationPass<ModuleOp>> createPropagateResourceCastsPass();
// Strips tf.Assert ops.
-std::unique_ptr<OperationPass<FuncOp>> createStripAssertsPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createStripAssertsPass();
// Strips all TF-related attributes; none are needed by IREE.
std::unique_ptr<OperationPass<ModuleOp>> createStripModuleMetadataPass();
-std::unique_ptr<OperationPass<FuncOp>> createStripFunctionMetadataPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createStripFunctionMetadataPass();
// Validates whether any Tensorflow operations remain.
-std::unique_ptr<OperationPass<FuncOp>> createVerifyFullyConvertedPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createVerifyFullyConvertedPass();
//===----------------------------------------------------------------------===//
// Patterns
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/convert_to_mhlo.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/convert_to_mhlo.mlir
index dbc1ec7..87213e8 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/convert_to_mhlo.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/convert_to_mhlo.mlir
@@ -2,7 +2,10 @@
// CHECK-LABEL: @sigmoid
func @sigmoid(%arg0: tensor<2xf32>) -> tensor<2xf32> {
- // CHECK-DAG: [[HALF:%.+]] = mhlo.constant dense<5.000000e-01> : tensor<2xf32>
+ // CHECK-DAG: [[SCALAR:%.+]] = mhlo.constant dense<5.000000e-01> : tensor<f32>
+ // CHECK-DAG: [[SHAPE_OF:%.+]] = shape.shape_of %arg0 : tensor<2xf32> -> tensor<1xindex>
+ // CHECK-DAG: [[SHAPE_VAL:%.+]] = shape.to_extent_tensor [[SHAPE_OF]] : tensor<1xindex> -> tensor<1xindex>
+ // CHECK-DAG: [[HALF:%.+]] = "mhlo.dynamic_broadcast_in_dim"([[SCALAR]], [[SHAPE_VAL]]) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>, tensor<1xindex>) -> tensor<2xf32>
// CHECK-DAG: [[R1:%.+]] = mhlo.multiply %arg0, [[HALF]] : tensor<2xf32>
// CHECK-DAG: [[R2:%.+]] = "mhlo.tanh"([[R1]]) : (tensor<2xf32>) -> tensor<2xf32>
// CHECK-DAG: [[R3:%.+]] = mhlo.multiply [[R2]], [[HALF]] : tensor<2xf32>
@@ -13,7 +16,7 @@
// CHECK-LABEL: @sigmoid_complex
func @sigmoid_complex(%arg0: tensor<2xcomplex<f32>>) -> tensor<2xcomplex<f32>> {
- // CHECK: [[R0:%.+]] = mhlo.constant dense<(5.000000e-01,0.000000e+00)> : tensor<2xcomplex<f32>>
+ // CHECK: [[R0:%.+]] = mhlo.constant dense<(5.000000e-01,0.000000e+00)> : tensor<complex<f32>>
// CHECK-NOT: tf.Sigmoid
%0 = "tf.Sigmoid"(%arg0) : (tensor<2xcomplex<f32>>) -> tensor<2xcomplex<f32>>
return %0 : tensor<2xcomplex<f32>>
@@ -22,7 +25,8 @@
// CHECK-LABEL: @sigmoid_unranked
func @sigmoid_unranked(%arg0: tensor<*xf32>) -> tensor<*xf32> {
// CHECK-DAG: [[SCALAR:%.+]] = mhlo.constant dense<5.000000e-01> : tensor<f32>
- // CHECK-DAG: [[SHAPE_VAL:%.+]] = shape.shape_of %arg0 : tensor<*xf32> -> tensor<?xindex>
+ // CHECK-DAG: [[SHAPE_OF:%.+]] = shape.shape_of %arg0 : tensor<*xf32> -> tensor<?xindex>
+ // CHECK-DAG: [[SHAPE_VAL:%.+]] = shape.to_extent_tensor [[SHAPE_OF]] : tensor<?xindex> -> tensor<?xindex>
// CHECK-DAG: [[HALF:%.+]] = "mhlo.dynamic_broadcast_in_dim"([[SCALAR]], [[SHAPE_VAL]]) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>, tensor<?xindex>) -> tensor<*xf32>
// CHECK-DAG: [[R1:%.+]] = mhlo.multiply %arg0, [[HALF]] : tensor<*xf32>
// CHECK-DAG: [[R2:%.+]] = "mhlo.tanh"([[R1]]) : (tensor<*xf32>) -> tensor<*xf32>
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir
index 029e580..bf5a2e7 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir
@@ -13,7 +13,7 @@
// CHECK: func private @__inference_binary_func_70
// CHECK-NOT: tf_saved_model
builtin.module @binary_func attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} {
- builtin.func @__inference_binary_func_70(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}, %arg1: tensor<16xf32> {tf._user_specified_name = "b", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = [0]}, tensor<16xf32> {tf_saved_model.index_path = [1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>], tf_saved_model.exported_names = ["binary_func"]} {
+ func.func @__inference_binary_func_70(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}, %arg1: tensor<16xf32> {tf._user_specified_name = "b", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = [0]}, tensor<16xf32> {tf_saved_model.index_path = [1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>], tf_saved_model.exported_names = ["binary_func"]} {
%0 = "tf.Identity"(%arg0) {device = ""} : (tensor<16xf32>) -> tensor<16xf32>
%1 = "tf.Identity"(%arg1) {device = ""} : (tensor<16xf32>) -> tensor<16xf32>
return %0, %1 : tensor<16xf32>, tensor<16xf32>
@@ -31,7 +31,7 @@
// CHECK: func private @__inference_unary_func_240
// CHECK-NOT: tf_saved_model
builtin.module @unary_func attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} {
- builtin.func @__inference_unary_func_240(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}) -> (tensor<16xf32> {tf_saved_model.index_path = []}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>], tf_saved_model.exported_names = ["unary_func"]} {
+ func.func @__inference_unary_func_240(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}) -> (tensor<16xf32> {tf_saved_model.index_path = []}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>], tf_saved_model.exported_names = ["unary_func"]} {
%0 = "tf.Identity"(%arg0) {device = ""} : (tensor<16xf32>) -> tensor<16xf32>
return %0 : tensor<16xf32>
}
@@ -50,7 +50,7 @@
// CHECK: func private @__inference_return_list_260
// CHECK-NOT: tf_saved_model
builtin.module @return_list attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} {
- builtin.func @__inference_return_list_260(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}, %arg1: tensor<16xf32> {tf._user_specified_name = "b", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = [0]}, tensor<16xf32> {tf_saved_model.index_path = [1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>], tf_saved_model.exported_names = ["return_list"]} {
+ func.func @__inference_return_list_260(%arg0: tensor<16xf32> {tf._user_specified_name = "a", tf_saved_model.index_path = [0]}, %arg1: tensor<16xf32> {tf._user_specified_name = "b", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = [0]}, tensor<16xf32> {tf_saved_model.index_path = [1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<16>, #tf_type.shape<16>], tf_saved_model.exported_names = ["return_list"]} {
%0 = "tf.Identity"(%arg0) {device = ""} : (tensor<16xf32>) -> tensor<16xf32>
%1 = "tf.Identity"(%arg1) {device = ""} : (tensor<16xf32>) -> tensor<16xf32>
return %0, %1 : tensor<16xf32>, tensor<16xf32>
@@ -101,7 +101,7 @@
// CHECK: func private @__inference_dict_nest_190
// CHECK-NOT: tf_saved_model
builtin.module @dict_nest attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} {
- builtin.func @__inference_dict_nest_190(
+ func.func @__inference_dict_nest_190(
%arg0: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "dict", "a"]},
%arg1: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "dict", "b"]},
%arg2: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "list", 0]},
@@ -120,7 +120,7 @@
// CHECK: func @dict_nest(%arg0: !iree_input.buffer_view, %arg1: !iree_input.buffer_view, %arg2: !iree_input.buffer_view) -> !iree_input.list<!iree_input.variant>
// CHECK-SAME{LITERAL}: iree.abi = "{\22a\22:[[\22named\22,\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22named\22,\22b\22,[\22ndarray\22,\22f32\22,1,16]],[\22named\22,\22scalar\22,[\22ndarray\22,\22f32\22,0]]],\22r\22:[[\22sdict\22,[\22dict\22,[\22sdict\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]],[\22scalar\22,[\22ndarray\22,\22f32\22,0]]]]]],\22v\22:1}"
builtin.module @kwargs attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 729 : i32}, tf_saved_model.semantics} {
- builtin.func @__inference_dict_nest_190(
+ func.func @__inference_dict_nest_190(
%arg0: tensor<16xf32> {tf_saved_model.index_path = ["a"]},
%arg1: tensor<16xf32> {tf_saved_model.index_path = ["b"]},
%arg2: tensor<f32> {tf._user_specified_name = "scalar", tf_saved_model.index_path = ["scalar"]}) ->
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/strip_asserts.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/strip_asserts.mlir
index 2711fd4..d784b98 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/strip_asserts.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/strip_asserts.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-tf-opt -split-input-file -verify-diagnostics -pass-pipeline='builtin.func(iree-tf-strip-asserts)' %s | FileCheck %s
+// RUN: iree-tf-opt -split-input-file -verify-diagnostics -pass-pipeline='func.func(iree-tf-strip-asserts)' %s | FileCheck %s
// CHECK-LABEL: @asserts
// CHECK-NOT: tf.Assert
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/test/strip_metadata.mlir b/integrations/tensorflow/iree_tf_compiler/TF/test/strip_metadata.mlir
index db23522..1cc7e83 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/test/strip_metadata.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TF/test/strip_metadata.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-tf-opt -split-input-file -verify-diagnostics -pass-pipeline='iree-tf-strip-module-metadata,builtin.func(iree-tf-strip-function-metadata)' %s | FileCheck %s
+// RUN: iree-tf-opt -split-input-file -verify-diagnostics -pass-pipeline='iree-tf-strip-module-metadata,func.func(iree-tf-strip-function-metadata)' %s | FileCheck %s
// CHECK-LABEL: @tf_module
// CHECK-NOT: attributes
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/LowerGlobalTensors.cpp b/integrations/tensorflow/iree_tf_compiler/TFL/LowerGlobalTensors.cpp
index f7765a1..ecc9ab6 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/LowerGlobalTensors.cpp
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/LowerGlobalTensors.cpp
@@ -37,7 +37,7 @@
DenseMap<StringRef, FuncOp> symNameToFunction;
for (auto func : moduleOp.getOps<FuncOp>()) {
- symNameToFunction[func.sym_name()] = func;
+ symNameToFunction[func.getSymName()] = func;
}
DenseMap<StringRef, DenseElementsAttr> sharedNameToConstant;
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/PassDetail.h b/integrations/tensorflow/iree_tf_compiler/TFL/PassDetail.h
index 0d943d7..5cc38bc 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/PassDetail.h
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/PassDetail.h
@@ -7,6 +7,7 @@
#ifndef IREE_INTEGRATIONS_TENSORFLOW_IREE_TF_COMPILER_TFL_PASS_DETAIL_H_
#define IREE_INTEGRATIONS_TENSORFLOW_IREE_TF_COMPILER_TFL_PASS_DETAIL_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/Passes.h b/integrations/tensorflow/iree_tf_compiler/TFL/Passes.h
index bd836f6..f04f64d 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/Passes.h
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/Passes.h
@@ -7,6 +7,7 @@
#ifndef IREE_INTEGRATIONS_TENSORFLOW_IREE_TF_COMPILER_TFL_PASSES_H_
#define IREE_INTEGRATIONS_TENSORFLOW_IREE_TF_COMPILER_TFL_PASSES_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
@@ -30,17 +31,18 @@
// Converts TFLite attributes that are useful to corresponding IREE attributes.
std::unique_ptr<OperationPass<ModuleOp>> createConvertModuleMetadataPass();
-std::unique_ptr<OperationPass<FuncOp>> createConvertFunctionMetadataPass();
+std::unique_ptr<OperationPass<func::FuncOp>>
+createConvertFunctionMetadataPass();
// Lowers TFLite's global tensor operations to the Util dialect.
std::unique_ptr<OperationPass<ModuleOp>> createLowerGlobalTensorsPass();
// Strips all leftover TFLite-related attributes; none are needed by IREE.
std::unique_ptr<OperationPass<ModuleOp>> createStripModuleMetadataPass();
-std::unique_ptr<OperationPass<FuncOp>> createStripFunctionMetadataPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createStripFunctionMetadataPass();
// Validates whether any TFLite operations remain.
-std::unique_ptr<OperationPass<FuncOp>> createVerifyFullyConvertedPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createVerifyFullyConvertedPass();
//===----------------------------------------------------------------------===//
// Registration
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/Passes.td b/integrations/tensorflow/iree_tf_compiler/TFL/Passes.td
index 43ab16a..70eaab7 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/Passes.td
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/Passes.td
@@ -10,7 +10,7 @@
include "mlir/Pass/PassBase.td"
def ConvertFunctionMetadata :
- Pass<"iree-tflite-convert-function-metadata", "mlir::FuncOp"> {
+ Pass<"iree-tflite-convert-function-metadata", "mlir::func::FuncOp"> {
let summary = "Converts TFLite attributes to IREE attributes on functions.";
let constructor = "mlir::iree_integrations::TFL::createConvertFunctionMetadataPass()";
}
@@ -34,7 +34,7 @@
}
def StripFunctionMetadata :
- Pass<"iree-tflite-strip-function-metadata", "mlir::FuncOp"> {
+ Pass<"iree-tflite-strip-function-metadata", "mlir::func::FuncOp"> {
let summary = "Guarantees that functions used by tfl.call_once are retained.";
let constructor = "mlir::iree_integrations::TFL::createStripFunctionMetadataPass()";
}
@@ -46,7 +46,7 @@
}
def VerifyFullyConverted :
- Pass<"iree-tflite-verify-fully-converted", "mlir::FuncOp"> {
+ Pass<"iree-tflite-verify-fully-converted", "mlir::func::FuncOp"> {
let summary = "Verifies that all TFLite frontend ops were converted and none remain.";
let constructor = "mlir::iree_integrations::TFL::createVerifyFullyConvertedPass()";
}
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/RetainCallOnceFuncs.cpp b/integrations/tensorflow/iree_tf_compiler/TFL/RetainCallOnceFuncs.cpp
index f6b9d94..c9a605e 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/RetainCallOnceFuncs.cpp
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/RetainCallOnceFuncs.cpp
@@ -29,11 +29,11 @@
auto moduleOp = getOperation();
llvm::DenseMap<StringRef, FuncOp> funcMap;
- for (auto func : moduleOp.getOps<mlir::FuncOp>()) {
- funcMap[func.sym_name()] = func;
+ for (auto func : moduleOp.getOps<mlir::func::FuncOp>()) {
+ funcMap[func.getSymName()] = func;
}
- for (auto func : moduleOp.getOps<mlir::FuncOp>()) {
+ for (auto func : moduleOp.getOps<mlir::func::FuncOp>()) {
for (auto callOnce : func.getOps<mlir::TFL::CallOnceOp>()) {
auto callFunc = funcMap[callOnce.session_init_function()];
callOnce->setAttr("session_init_function_symbol",
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/StripMetadata.cpp b/integrations/tensorflow/iree_tf_compiler/TFL/StripMetadata.cpp
index af1dbe6..181962c 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/StripMetadata.cpp
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/StripMetadata.cpp
@@ -17,6 +17,9 @@
static bool isTFLAttr(NamedAttribute &namedAttr) {
// NOTE: tflite mixes tf and tfl, for some reason.
auto name = namedAttr.getName().strref();
+ // Don't trim attributes from tf_saved_model---they carry ABI information.
+ if (name.startswith("tf_saved_model.")) return false;
+
if (name.startswith("tf.") || name.startswith("tf_") ||
name.startswith("tfl.") || name.startswith("tfl_")) {
return true;
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/test/convert_metadata.mlir b/integrations/tensorflow/iree_tf_compiler/TFL/test/convert_metadata.mlir
index 4a8825e..84752cd 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/test/convert_metadata.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/test/convert_metadata.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt-tflite -split-input-file -pass-pipeline='iree-tflite-convert-module-metadata,builtin.func(iree-tflite-convert-function-metadata)' %s | FileCheck %s
+// RUN: iree-opt-tflite -split-input-file -pass-pipeline='iree-tflite-convert-module-metadata,func.func(iree-tflite-convert-function-metadata)' %s | FileCheck %s
module attributes {tfl.schema_version = 3 : i32} {
// CHECK: func @main(
diff --git a/integrations/tensorflow/iree_tf_compiler/TFL/test/strip_metadata.mlir b/integrations/tensorflow/iree_tf_compiler/TFL/test/strip_metadata.mlir
index 8cce458..b0b192c 100644
--- a/integrations/tensorflow/iree_tf_compiler/TFL/test/strip_metadata.mlir
+++ b/integrations/tensorflow/iree_tf_compiler/TFL/test/strip_metadata.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt-tflite -split-input-file -verify-diagnostics -pass-pipeline='iree-tflite-strip-module-metadata,builtin.func(iree-tflite-strip-function-metadata)' %s | FileCheck %s
+// RUN: iree-opt-tflite -split-input-file -verify-diagnostics -pass-pipeline='iree-tflite-strip-module-metadata,func.func(iree-tflite-strip-function-metadata)' %s | FileCheck %s
// CHECK-LABEL: module {
// CHECK-NOT: tf.schema_version
diff --git a/iree/compiler/Bindings/Native/Transforms/Passes.cpp b/iree/compiler/Bindings/Native/Transforms/Passes.cpp
index 4ec5152..65dd8e4 100644
--- a/iree/compiler/Bindings/Native/Transforms/Passes.cpp
+++ b/iree/compiler/Bindings/Native/Transforms/Passes.cpp
@@ -8,6 +8,7 @@
#include <memory>
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/PassOptions.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Transforms/Passes.h"
@@ -23,8 +24,8 @@
// Cleanup the IR after manipulating it.
passManager.addPass(createInlinerPass());
- passManager.addNestedPass<FuncOp>(createCanonicalizerPass());
- passManager.addNestedPass<FuncOp>(createCSEPass());
+ passManager.addNestedPass<func::FuncOp>(createCanonicalizerPass());
+ passManager.addNestedPass<func::FuncOp>(createCSEPass());
passManager.addPass(createSymbolDCEPass());
}
diff --git a/iree/compiler/Bindings/Native/Transforms/WrapEntryPoints.cpp b/iree/compiler/Bindings/Native/Transforms/WrapEntryPoints.cpp
index ce174bc..c643cfc 100644
--- a/iree/compiler/Bindings/Native/Transforms/WrapEntryPoints.cpp
+++ b/iree/compiler/Bindings/Native/Transforms/WrapEntryPoints.cpp
@@ -46,7 +46,7 @@
void runOnOperation() override {
auto moduleOp = getOperation();
- SmallVector<FuncOp, 4> entryFuncOps;
+ SmallVector<func::FuncOp, 4> entryFuncOps;
for (auto funcOp : moduleOp.getOps<FuncOp>()) {
if (funcOp.isPublic() && !funcOp->hasAttr("iree.abi.stub")) {
entryFuncOps.push_back(funcOp);
@@ -103,13 +103,13 @@
//
// NOTE: today we only support a single entry point; with minor tweaks we
// could fix this up to support multiple if we wanted.
- FuncOp createWrapperFunc(FuncOp entryFuncOp) {
+ func::FuncOp createWrapperFunc(FuncOp entryFuncOp) {
// Convert argument types to those required by the binding ABI.
//
// NOTE: this is where we could change our signature to provide additional
// values from the runtime bindings as may be required - like semaphores for
// async behavior or cancellation.
- auto entryFuncType = entryFuncOp.getType();
+ auto entryFuncType = entryFuncOp.getFunctionType();
SmallVector<Type> inputTypes;
for (auto oldType : entryFuncType.getInputs()) {
inputTypes.push_back(mapToABIType(oldType));
diff --git a/iree/compiler/Bindings/TFLite/Transforms/Passes.cpp b/iree/compiler/Bindings/TFLite/Transforms/Passes.cpp
index 6ddf1e5..088c4fe 100644
--- a/iree/compiler/Bindings/TFLite/Transforms/Passes.cpp
+++ b/iree/compiler/Bindings/TFLite/Transforms/Passes.cpp
@@ -8,6 +8,7 @@
#include <memory>
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/PassOptions.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Transforms/Passes.h"
@@ -23,8 +24,8 @@
// Cleanup the IR after manipulating it.
passManager.addPass(createInlinerPass());
- passManager.addNestedPass<FuncOp>(createCanonicalizerPass());
- passManager.addNestedPass<FuncOp>(createCSEPass());
+ passManager.addNestedPass<func::FuncOp>(createCanonicalizerPass());
+ passManager.addNestedPass<func::FuncOp>(createCSEPass());
passManager.addPass(createSymbolDCEPass());
}
diff --git a/iree/compiler/Bindings/TFLite/Transforms/WrapEntryPoints.cpp b/iree/compiler/Bindings/TFLite/Transforms/WrapEntryPoints.cpp
index e330c8d..ec94030 100644
--- a/iree/compiler/Bindings/TFLite/Transforms/WrapEntryPoints.cpp
+++ b/iree/compiler/Bindings/TFLite/Transforms/WrapEntryPoints.cpp
@@ -131,8 +131,8 @@
// Creates dynamic dim globals for each input and output of |funcOp|.
static std::pair<SmallVector<DynamicDims>, SmallVector<DynamicDims>>
createDynamicDimGlobals(Location loc, StringRef namePrefix,
- mlir::FuncOp funcOp, OpBuilder &moduleBuilder) {
- auto funcType = funcOp.getType();
+ mlir::func::FuncOp funcOp, OpBuilder &moduleBuilder) {
+ auto funcType = funcOp.getFunctionType();
// TFLite requires the tensor names at runtime. If they've previously been
// extracted into iree.identifiers we use those and otherwise fallback to
@@ -183,14 +183,14 @@
}
// Derives a shape calculation function from the given entry point |funcOp|.
- static mlir::FuncOp createShapeCalculationFunc(
- Location loc, StringRef namePrefix, mlir::FuncOp funcOp,
+ static mlir::func::FuncOp createShapeCalculationFunc(
+ Location loc, StringRef namePrefix, mlir::func::FuncOp funcOp,
ArrayRef<DynamicDims> inputDynamicDims,
ArrayRef<DynamicDims> outputDynamicDims,
IREE::Util::GlobalOp dirtyGlobalOp, OpBuilder &moduleBuilder) {
// Clone the entire entry function with all its IR.
auto calcFuncOp =
- cast<mlir::FuncOp>(moduleBuilder.clone(*funcOp.getOperation()));
+ cast<mlir::func::FuncOp>(moduleBuilder.clone(*funcOp.getOperation()));
calcFuncOp.setName(
moduleBuilder.getStringAttr(namePrefix.str() + "_calculate_shapes"));
calcFuncOp.setPrivate();
@@ -364,7 +364,7 @@
void createQueryInputShapeFunc(Location loc, StringRef namePrefix,
ArrayRef<DynamicDims> inputDynamicDims,
OpBuilder &moduleBuilder) {
- auto queryFuncOp = moduleBuilder.create<mlir::FuncOp>(
+ auto queryFuncOp = moduleBuilder.create<mlir::func::FuncOp>(
loc, namePrefix.str() + "_query_input_shape",
moduleBuilder.getFunctionType(/*inputs=*/
TypeRange{
@@ -397,7 +397,7 @@
ArrayRef<DynamicDims> inputDynamicDims,
IREE::Util::GlobalOp dirtyGlobalOp,
OpBuilder &moduleBuilder) {
- auto resizeFuncOp = moduleBuilder.create<mlir::FuncOp>(
+ auto resizeFuncOp = moduleBuilder.create<mlir::func::FuncOp>(
loc, namePrefix.str() + "_resize_input_shape",
moduleBuilder.getFunctionType(/*inputs=*/
TypeRange{
@@ -432,7 +432,7 @@
// func @_query_output_shape(%index : index, %shape : !util.list<index>)
void createQueryOutputShapeFunc(Location loc, StringRef namePrefix,
ArrayRef<DynamicDims> outputDynamicDims,
- mlir::FuncOp calculateShapeFuncOp,
+ mlir::func::FuncOp calculateShapeFuncOp,
OpBuilder &moduleBuilder) {
auto queryFuncOp = moduleBuilder.create<FuncOp>(
loc, namePrefix.str() + "_query_output_shape",
@@ -474,7 +474,7 @@
//
// NOTE: today we only support a single entry point; with minor tweaks we
// could fix this up to support multiple if we wanted.
- void createWrapperFunc(StringRef namePrefix, mlir::FuncOp entryFuncOp,
+ void createWrapperFunc(StringRef namePrefix, mlir::func::FuncOp entryFuncOp,
ArrayRef<DynamicDims> inputDynamicDims,
ArrayRef<DynamicDims> outputDynamicDims,
IREE::Util::GlobalOp dirtyGlobalOp,
@@ -482,14 +482,14 @@
// NOTE: this is where we could change our signature to provide additional
// values from the runtime bindings as may be required - like semaphores for
// async behavior or cancellation.
- auto entryFuncType = entryFuncOp.getType();
+ auto entryFuncType = entryFuncOp.getFunctionType();
auto bufferType = moduleBuilder.getType<IREE::HAL::BufferType>();
SmallVector<Type> inputTypes(entryFuncType.getNumInputs(), bufferType);
SmallVector<Type> outputTypes(entryFuncType.getNumResults(), bufferType);
auto wrapperFuncType =
moduleBuilder.getFunctionType(inputTypes, outputTypes);
- auto wrapperFuncOp = moduleBuilder.create<mlir::FuncOp>(
+ auto wrapperFuncOp = moduleBuilder.create<mlir::func::FuncOp>(
entryFuncOp.getLoc(), "_tflite_main", wrapperFuncType);
wrapperFuncOp.setPublic();
wrapperFuncOp.getOperation()->setAttr("iree.abi.stub",
@@ -563,7 +563,7 @@
callResults);
}
- void wrapEntryPoint(mlir::FuncOp funcOp) {
+ void wrapEntryPoint(mlir::func::FuncOp funcOp) {
auto loc = funcOp.getLoc();
auto namePrefix = ("_tflite_" + funcOp.getName()).str();
OpBuilder moduleBuilder(funcOp);
@@ -606,8 +606,8 @@
// Populates attributes on |wrapperFuncOp| to support runtime reflection like
// IO tensor names and quantization information.
- void populateReflectionAttrs(mlir::FuncOp entryFuncOp,
- mlir::FuncOp wrapperFuncOp) {
+ void populateReflectionAttrs(mlir::func::FuncOp entryFuncOp,
+ mlir::func::FuncOp wrapperFuncOp) {
SmallVector<NamedAttribute, 4> attrs;
attrs.push_back(buildIONamesAttr(entryFuncOp));
// TODO(#3972): tfl.io.quant: quantization information.
@@ -620,7 +620,7 @@
// tfl.io.names=arg0;arg1;ret0;ret1
//
// Default names will be used if no iree.identifiers are set on the function.
- NamedAttribute buildIONamesAttr(mlir::FuncOp entryFuncOp) {
+ NamedAttribute buildIONamesAttr(mlir::func::FuncOp entryFuncOp) {
SmallVector<std::string, 4> pieces;
for (int i = 0; i < entryFuncOp.getNumArguments(); ++i) {
auto identifierAttr =
diff --git a/iree/compiler/Codegen/Common/BufferizationAnalysis.cpp b/iree/compiler/Codegen/Common/BufferizationAnalysis.cpp
index 17f9d5d..4b2d5de 100644
--- a/iree/compiler/Codegen/Common/BufferizationAnalysis.cpp
+++ b/iree/compiler/Codegen/Common/BufferizationAnalysis.cpp
@@ -513,7 +513,7 @@
}
}
-LogicalResult createTensorEquivalenceClasses(FuncOp funcOp,
+LogicalResult createTensorEquivalenceClasses(func::FuncOp funcOp,
BufferizationPlan &plan) {
auto bufferMappingFn = [&](Operation *op) -> WalkResult {
return TypeSwitch<Operation *, LogicalResult>(op)
diff --git a/iree/compiler/Codegen/Common/BufferizationAnalysis.h b/iree/compiler/Codegen/Common/BufferizationAnalysis.h
index e324eeb..aa936bf 100644
--- a/iree/compiler/Codegen/Common/BufferizationAnalysis.h
+++ b/iree/compiler/Codegen/Common/BufferizationAnalysis.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/Support/Debug.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Value.h"
@@ -97,7 +98,7 @@
/// Analysis the `tensor` values in `funcOp` and groups them together into
/// equivalence classes such that each class contains tensors that can be mapped
/// to the same buffer.
-LogicalResult createTensorEquivalenceClasses(FuncOp funcOp,
+LogicalResult createTensorEquivalenceClasses(func::FuncOp funcOp,
BufferizationPlan &plan);
} // namespace iree_compiler
diff --git a/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp b/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp
index 7e09857..0b586e5 100644
--- a/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp
+++ b/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp
@@ -133,19 +133,19 @@
FloatTypeConverter converter;
RewritePatternSet patterns(&getContext());
patterns.insert<GenericTypeConvert>(context, converter);
- populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
- converter);
+ populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
+ converter);
ConversionTarget target(*context);
// Operations are legal if they don't contain any illegal type.
target.markUnknownOpDynamicallyLegal([](Operation *op) {
if (auto globalOp = dyn_cast<IREE::Util::GlobalOp>(op)) {
return !isIllegalType(globalOp.type());
}
- if (auto funcOp = dyn_cast<FuncOp>(op)) {
- for (Type type : funcOp.getType().getInputs()) {
+ if (auto funcOp = dyn_cast<func::FuncOp>(op)) {
+ for (Type type : funcOp.getFunctionType().getInputs()) {
if (isIllegalType(type)) return false;
}
- for (Type type : funcOp.getType().getResults()) {
+ for (Type type : funcOp.getFunctionType().getResults()) {
if (isIllegalType(type)) return false;
}
}
diff --git a/iree/compiler/Codegen/Common/DestructiveUpdateUtils.cpp b/iree/compiler/Codegen/Common/DestructiveUpdateUtils.cpp
index 592ed8d..5bc2203 100644
--- a/iree/compiler/Codegen/Common/DestructiveUpdateUtils.cpp
+++ b/iree/compiler/Codegen/Common/DestructiveUpdateUtils.cpp
@@ -294,11 +294,11 @@
// Return true if any control flow is found in the DispatchWorkgroupsOp besides
// scf::ForOp.
-static bool hasNonScfForControlFlow(FuncOp funcOp) {
+static bool hasNonScfForControlFlow(func::FuncOp funcOp) {
return funcOp
->walk([&](Operation *op) {
if (isa<BranchOpInterface>(op) || isa<RegionBranchOpInterface>(op)) {
- if (!isa<scf::ForOp, scf::IfOp>(op) &&
+ if (!isa<scf::ForOp, scf::IfOp>(op) && !isa<linalg::LinalgOp>(op) &&
!isa<IREE::Flow::DispatchWorkgroupsOp>(op))
return WalkResult::interrupt();
}
@@ -335,7 +335,7 @@
return success();
}
-LogicalResult rewriteLinalgDestructiveUpdates(FuncOp funcOp) {
+LogicalResult rewriteLinalgDestructiveUpdates(func::FuncOp funcOp) {
// Bail on any control-flow for now.
if (hasNonScfForControlFlow(funcOp)) return success();
diff --git a/iree/compiler/Codegen/Common/DestructiveUpdateUtils.h b/iree/compiler/Codegen/Common/DestructiveUpdateUtils.h
index 04197ec..a0bdc7a 100644
--- a/iree/compiler/Codegen/Common/DestructiveUpdateUtils.h
+++ b/iree/compiler/Codegen/Common/DestructiveUpdateUtils.h
@@ -7,6 +7,7 @@
#ifndef IREE_COMPILER_CODEGEN_COMMON_DESTRUCTIVEUPDATEUTILS_H_
#define IREE_COMPILER_CODEGEN_COMMON_DESTRUCTIVEUPDATEUTILS_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
namespace mlir {
@@ -78,7 +79,7 @@
// tensor<...> -> !flow.dispatch.tensor<writeonly:...>
// ```
// is elided.
-LogicalResult rewriteLinalgDestructiveUpdates(FuncOp parentOp);
+LogicalResult rewriteLinalgDestructiveUpdates(func::FuncOp parentOp);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir b/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
index 5e40783..d8f5ae0 100644
--- a/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
+++ b/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
@@ -2206,7 +2206,7 @@
// -----
-builtin.func @tensor_insert_slice() {
+func.func @tensor_insert_slice() {
%c0 = arith.constant 0 : index
%1 = hal.interface.constant.load[0] : index
%2 = hal.interface.constant.load[1] : index
@@ -2256,7 +2256,7 @@
// -----
-builtin.func @dynamic_update_slice() {
+func.func @dynamic_update_slice() {
%c0 = arith.constant 0 : index
%c3 = arith.constant 3 : index
%c0_i32 = arith.constant 0 : i32
diff --git a/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir b/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir
index c5b6022..0505aae 100644
--- a/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir
+++ b/iree/compiler/Codegen/Common/test/remove_trivial_loops.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-codegen-remove-single-iteration-loop))))' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(func.func(iree-codegen-remove-single-iteration-loop))))' %s | FileCheck %s
#executable_layout = #hal.executable.layout<push_constants = 1, sets = [
#hal.descriptor_set.layout<0, bindings = [
@@ -14,7 +14,7 @@
workgroup_size = [64: index, 1: index, 1:index]
}
builtin.module {
- builtin.func @dispatch_0() {
+ func.func @dispatch_0() {
%c2 = arith.constant 2 : index
%c256 = arith.constant 256 : index
// CHECK: %[[C250:.+]] = arith.constant 250 : index
@@ -61,7 +61,7 @@
translation_info = #translation
}
builtin.module {
- builtin.func @workgroup_tile_loop() {
+ func.func @workgroup_tile_loop() {
%c2048 = arith.constant 2048 : index
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
@@ -95,7 +95,7 @@
translation_info = #translation
}
builtin.module {
- builtin.func @workgroup_tile_loop_negative() {
+ func.func @workgroup_tile_loop_negative() {
%c2048 = arith.constant 2048 : index
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
@@ -132,7 +132,7 @@
workgroup_size = [8: index, 2: index, 1: index]
}
builtin.module {
- builtin.func @both_workgroup_and_workitem() {
+ func.func @both_workgroup_and_workitem() {
%c8 = arith.constant 8 : index
%c32 = arith.constant 32 : index
%c112 = arith.constant 112 : index
diff --git a/iree/compiler/Codegen/Common/test/tile_and_distribute_to_workgroups.mlir b/iree/compiler/Codegen/Common/test/tile_and_distribute_to_workgroups.mlir
index 8aef8cb..2a9a331 100644
--- a/iree/compiler/Codegen/Common/test/tile_and_distribute_to_workgroups.mlir
+++ b/iree/compiler/Codegen/Common/test/tile_and_distribute_to_workgroups.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-codegen-tile-and-distribute-to-workgroups)))), canonicalize, cse' -split-input-file %s | FileCheck %s
+// RUN: iree-opt -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(func.func(iree-codegen-tile-and-distribute-to-workgroups)))), canonicalize, cse' -split-input-file %s | FileCheck %s
#config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 0], [16, 4, 64], [4, 4, 4]], native_vector_size = [4, 4, 4]>
#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
diff --git a/iree/compiler/Codegen/Dialect/test/lowering_config_attr.mlir b/iree/compiler/Codegen/Dialect/test/lowering_config_attr.mlir
index 06613df..b8042ef 100644
--- a/iree/compiler/Codegen/Dialect/test/lowering_config_attr.mlir
+++ b/iree/compiler/Codegen/Dialect/test/lowering_config_attr.mlir
@@ -11,7 +11,7 @@
// -----
module {
- builtin.func @test() attributes {
+ func.func @test() attributes {
translation_info = #iree_codegen.translation_info<CPUDefault>} {
return
}
@@ -21,7 +21,7 @@
// -----
module {
- builtin.func @test() attributes {
+ func.func @test() attributes {
lowering_config = #iree_codegen.lowering_config<tile_sizes = []>} {
return
}
diff --git a/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp b/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp
index 9302dd1..1818ddb 100644
--- a/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp
+++ b/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp
@@ -279,10 +279,13 @@
vector::registerBufferizableOpInterfaceExternalModels(registry);
// Register IREE operations.
- registry.addOpInterface<IREE::Flow::DispatchTensorLoadOp,
- DispatchTensorLoadOpInterface>();
- registry.addOpInterface<IREE::Flow::DispatchTensorStoreOp,
- DispatchTensorStoreOpInterface>();
+ registry.addExtension(
+ +[](MLIRContext *ctx, IREE::Flow::FlowDialect *dialect) {
+ IREE::Flow::DispatchTensorLoadOp::attachInterface<
+ DispatchTensorLoadOpInterface>(*ctx);
+ IREE::Flow::DispatchTensorStoreOp::attachInterface<
+ DispatchTensorStoreOpInterface>(*ctx);
+ });
}
void addPostAnalysisTransformations(OneShotBufferizationOptions &options) {
diff --git a/iree/compiler/Codegen/Interfaces/ProcessorOpInterfaces.cpp b/iree/compiler/Codegen/Interfaces/ProcessorOpInterfaces.cpp
index 9fe9962..2fabe05 100644
--- a/iree/compiler/Codegen/Interfaces/ProcessorOpInterfaces.cpp
+++ b/iree/compiler/Codegen/Interfaces/ProcessorOpInterfaces.cpp
@@ -6,6 +6,7 @@
#include "iree/compiler/Codegen/Interfaces/ProcessorOpInterfaces.h"
+#include "iree/compiler/Dialect/HAL/IR/HALDialect.h"
#include "iree/compiler/Dialect/HAL/IR/HALOps.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
@@ -75,15 +76,19 @@
};
void registerProcessorOpInterfaceExternalModels(DialectRegistry ®istry) {
- registry.addOpInterface<gpu::ThreadIdOp, ThreadIdOpInterface>();
- registry.addOpInterface<gpu::BlockDimOp, BlockDimOpInterface>();
+ registry.addExtension(+[](MLIRContext *ctx, gpu::GPUDialect *dialect) {
+ gpu::ThreadIdOp::attachInterface<ThreadIdOpInterface>(*ctx);
+ gpu::BlockDimOp::attachInterface<BlockDimOpInterface>(*ctx);
+ });
- registry.addOpInterface<IREE::HAL::InterfaceWorkgroupIDOp,
- WorkgroupIdOpInterface>();
- registry.addOpInterface<IREE::HAL::InterfaceWorkgroupCountOp,
- WorkgroupCountOpInterface>();
- registry.addOpInterface<IREE::HAL::InterfaceWorkgroupSizeOp,
- WorkgroupTileSizeOpInterface>();
+ registry.addExtension(+[](MLIRContext *ctx, IREE::HAL::HALDialect *dialect) {
+ IREE::HAL::InterfaceWorkgroupIDOp::attachInterface<WorkgroupIdOpInterface>(
+ *ctx);
+ IREE::HAL::InterfaceWorkgroupCountOp::attachInterface<
+ WorkgroupCountOpInterface>(*ctx);
+ IREE::HAL::InterfaceWorkgroupSizeOp::attachInterface<
+ WorkgroupTileSizeOpInterface>(*ctx);
+ });
}
} // namespace iree_compiler
diff --git a/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp b/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp
index 12963e1..b2f5db9 100644
--- a/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp
+++ b/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp
@@ -627,7 +627,7 @@
public:
explicit ConvertHALEntryPointFuncOp(MLIRContext *context,
LLVMTypeConverter &converter)
- : ConvertToLLVMPattern(mlir::FuncOp::getOperationName(), context,
+ : ConvertToLLVMPattern(mlir::func::FuncOp::getOperationName(), context,
converter, 100) {}
LogicalResult matchAndRewrite(
@@ -635,7 +635,7 @@
ConversionPatternRewriter &rewriter) const override {
auto stdFuncOp = cast<FuncOp>(op);
if (!stdFuncOp.isPublic()) return failure();
- FunctionType fnType = stdFuncOp.getType();
+ FunctionType fnType = stdFuncOp.getFunctionType();
if (fnType.getNumInputs() != 0 || fnType.getNumResults() != 0) {
op->emitWarning() << "public functions on executables must be () -> ()";
return failure();
diff --git a/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir b/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir
index 4f219c4..56bc1b3 100644
--- a/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir
+++ b/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir
@@ -227,7 +227,7 @@
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
hal.executable.entry_point @preset_config layout(#executable_layout)
builtin.module {
- builtin.func @preset_config() {
+ func.func @preset_config() {
%cst = arith.constant 0.000000e+00 : f32
%lhs_binding = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer)
: !flow.dispatch.tensor<readonly:128x256xf32>
@@ -272,7 +272,7 @@
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
hal.executable.entry_point @tensor_insert_slice layout(#executable_layout)
builtin.module {
- builtin.func @tensor_insert_slice() {
+ func.func @tensor_insert_slice() {
%d0 = hal.interface.constant.load[0] : index
%d1 = hal.interface.constant.load[1] : index
%d2 = hal.interface.constant.load[2] : index
@@ -315,7 +315,7 @@
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
hal.executable.entry_point @extract_slice layout(#executable_layout)
builtin.module {
- builtin.func @extract_slice() {
+ func.func @extract_slice() {
%d0 = hal.interface.constant.load[0] : index
%d1 = hal.interface.constant.load[1] : index
%d2 = hal.interface.constant.load[2] : index
@@ -359,7 +359,7 @@
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
hal.executable.entry_point @static_1d_fft_stage2 layout(#executable_layout)
builtin.module {
- builtin.func @static_1d_fft_stage2() {
+ func.func @static_1d_fft_stage2() {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%cst = arith.constant dense<[1.000000e+00, 6.12323426E-17]> : tensor<2xf32>
@@ -397,7 +397,7 @@
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
hal.executable.entry_point @static_3d_fft_stage3 layout(#executable_layout)
builtin.module {
- builtin.func @static_3d_fft_stage3() {
+ func.func @static_3d_fft_stage3() {
%c3 = arith.constant 3 : index
%cst = arith.constant dense<[1.000000e+00, 0.707106769, 6.12323426E-17, -0.707106769]> : tensor<4xf32>
%cst_0 = arith.constant dense<[-0.000000e+00, -0.707106769, -1.000000e+00, -0.707106769]> : tensor<4xf32>
@@ -435,7 +435,7 @@
hal.executable.variant @system_elf_x86_64, target = <"llvm", "system-elf-x86_64"> {
hal.executable.entry_point @outs_fusion_fn layout(#executable_layout)
builtin.module {
- builtin.func @outs_fusion_fn() {
+ func.func @outs_fusion_fn() {
%cst = arith.constant 0.0 : f32
%d0 = hal.interface.constant.load[0] : index
%d1 = hal.interface.constant.load[1] : index
diff --git a/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp b/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp
index 4cce8cf..2ebdf28 100644
--- a/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp
+++ b/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp
@@ -210,13 +210,13 @@
class ConvertFunc : public ConvertToLLVMPattern {
public:
explicit ConvertFunc(MLIRContext *context, LLVMTypeConverter &converter)
- : ConvertToLLVMPattern(mlir::FuncOp::getOperationName(), context,
+ : ConvertToLLVMPattern(mlir::func::FuncOp::getOperationName(), context,
converter, 100) {}
LogicalResult matchAndRewrite(
Operation *op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
- auto funcOp = cast<FuncOp>(op);
- FunctionType fnType = funcOp.getType();
+ auto funcOp = cast<func::FuncOp>(op);
+ FunctionType fnType = funcOp.getFunctionType();
(void)fnType;
if (!funcOp.isPublic()) return failure();
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp b/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp
index 88b8457..69795a1 100644
--- a/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp
+++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp
@@ -128,7 +128,8 @@
}
/// Return a flattened Id Value by combining the 3D gpu thread IDs.
-static Value createFlatId(FuncOp funcOp, ArrayRef<int64_t> workgroupSize) {
+static Value createFlatId(func::FuncOp funcOp,
+ ArrayRef<int64_t> workgroupSize) {
OpBuilder b(funcOp.getBody());
Type indexType = b.getIndexType();
AffineExpr d0 = getAffineDimExpr(0, b.getContext());
@@ -148,7 +149,7 @@
}
/// Distribute a transfer read operations on the given thread ids.
-static void distributeTransferRead(FuncOp funcOp, Value flatThreadId,
+static void distributeTransferRead(func::FuncOp funcOp, Value flatThreadId,
int64_t flatWorkgroupSize) {
funcOp.walk([&](vector::TransferReadOp readOp) {
OpBuilder b(readOp);
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.cpp b/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.cpp
index 0963c3b..c7ab1aa 100644
--- a/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.cpp
+++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.cpp
@@ -52,7 +52,7 @@
return procInfo;
}
-std::array<int64_t, 3> getWorkgroupSize(mlir::FuncOp funcOp) {
+std::array<int64_t, 3> getWorkgroupSize(mlir::func::FuncOp funcOp) {
std::array<int64_t, 3> workgroupSize;
auto entryPointOp = mlir::iree_compiler::getEntryPoint(funcOp);
llvm::Optional<mlir::ArrayAttr> workgroupSizeAttr =
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.h b/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.h
index 35843f6..aaca6bd 100644
--- a/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.h
+++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPUUtils.h
@@ -30,7 +30,7 @@
llvm::ArrayRef<int64_t> numSubgroups);
/// return the workgroup size associated to the funcOp entry point.
-std::array<int64_t, 3> getWorkgroupSize(mlir::FuncOp funcOp);
+std::array<int64_t, 3> getWorkgroupSize(mlir::func::FuncOp funcOp);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir b/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir
index 7004ce8..6120f0a 100644
--- a/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir
+++ b/iree/compiler/Codegen/LLVMGPU/test/distribute_to_thread.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-llvmgpu-tile-and-distribute))))' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(func.func(iree-llvmgpu-tile-and-distribute))))' %s | FileCheck %s
#config = #iree_codegen.lowering_config<tile_sizes = [[2, 256, 4]]>
#translation = #iree_codegen.translation_info<LLVMGPUMatmulSimt, workload_per_wg = [256, 2]>
@@ -22,7 +22,7 @@
workgroup_size = [64 : index, 1 : index, 1 : index]
}
builtin.module {
- builtin.func @dot_dispatch_0() {
+ func.func @dot_dispatch_0() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%c1024 = arith.constant 1024 : index
@@ -181,7 +181,7 @@
workgroup_size = [64 : index, 8 : index, 1 : index]
}
builtin.module {
- builtin.func @dot_dispatch_0() {
+ func.func @dot_dispatch_0() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%c1024 = arith.constant 1024 : index
@@ -263,7 +263,7 @@
workgroup_size = [1: index, 1: index, 1: index]
}
builtin.module {
- builtin.func @predict_dispatch_153() {
+ func.func @predict_dispatch_153() {
%c0 = arith.constant 0 : index
%cst = arith.constant 0x7FC00000 : f32
%cst_0 = arith.constant 0xFF800000 : f32
diff --git a/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir b/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir
index 5511cce..dde4b51 100644
--- a/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir
+++ b/iree/compiler/Codegen/LLVMGPU/test/distribute_wg_copy.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-llvmgpu-distribute-shared-memory-copy))))' -cse %s | FileCheck %s
+// RUN: iree-opt -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(func.func(iree-llvmgpu-distribute-shared-memory-copy))))' -cse %s | FileCheck %s
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<()[s0, s1, s2] -> (s1 * 8 + s2 * 32 + s0 floordiv 4)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<()[s0] -> (s0 * 4 - (s0 floordiv 4) * 16)>
@@ -25,7 +25,7 @@
memref.global "private" @__shared_memory___0 : memref<256x4xf32, 3>
memref.global "private" @__shared_memory__ : memref<64x16xf32, 3>
// CHECK-LABEL: @shared_mem_cpy(
- builtin.func @shared_mem_cpy(
+ func.func @shared_mem_cpy(
%m0 : memref<64x16xf32>, %m1 : memref<256x4xf32>, %m2 : memref<3x512xf32>) {
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir b/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir
index 25370ff..68af7ed 100644
--- a/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir
+++ b/iree/compiler/Codegen/LLVMGPU/test/gpu_set_num_workgroups.mlir
@@ -134,7 +134,7 @@
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
hal.executable.entry_point @tensor_insert_slice layout(#executable_layout)
builtin.module {
- builtin.func @tensor_insert_slice() {
+ func.func @tensor_insert_slice() {
%c0 = arith.constant 0 : index
%size_y = hal.interface.constant.load[0] : index
%size_x = hal.interface.constant.load[1] : index
@@ -178,7 +178,7 @@
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
hal.executable.entry_point @copy_as_generic layout(#executable_layout)
builtin.module {
- builtin.func @copy_as_generic() {
+ func.func @copy_as_generic() {
%c0 = arith.constant 0 : index
%d0 = hal.interface.constant.load[0] : index
%d1 = hal.interface.constant.load[1] : index
@@ -214,7 +214,7 @@
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
hal.executable.entry_point @static_1d_fft_stage2 layout(#executable_layout)
builtin.module {
- builtin.func @static_1d_fft_stage2() {
+ func.func @static_1d_fft_stage2() {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%cst = arith.constant dense<[1.000000e+00, 6.12323426E-17]> : tensor<2xf32>
@@ -251,7 +251,7 @@
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
hal.executable.entry_point @static_3d_fft_stage3 layout(#executable_layout)
builtin.module {
- builtin.func @static_3d_fft_stage3() {
+ func.func @static_3d_fft_stage3() {
%c0 = arith.constant 0 : index
%c3 = arith.constant 3 : index
%c64 = arith.constant 64 : index
diff --git a/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir b/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir
index 0e2cf86..06444d8 100644
--- a/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir
+++ b/iree/compiler/Codegen/LLVMGPU/test/nvvm_pipeline_test.mlir
@@ -294,7 +294,7 @@
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
hal.executable.entry_point @vector_add_dispatch layout(#executable_layout)
builtin.module {
- builtin.func @vector_add_dispatch() {
+ func.func @vector_add_dispatch() {
%c0 = arith.constant 0 : index
%c16384 = arith.constant 16384 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:16384xf32>
@@ -343,7 +343,7 @@
hal.executable.variant @cuda, target = <"cuda", "cuda-nvptx-fb"> {
hal.executable.entry_point @vector_reduction_dispatch layout(#executable_layout)
builtin.module {
- builtin.func @vector_reduction_dispatch() {
+ func.func @vector_reduction_dispatch() {
%c0 = arith.constant 0 : index
%c16384 = arith.constant 16384 : index
%cst = arith.constant 1.000000e+00 : f32
diff --git a/iree/compiler/Codegen/PassDetail.h b/iree/compiler/Codegen/PassDetail.h
index 4ac9280..501417a 100644
--- a/iree/compiler/Codegen/PassDetail.h
+++ b/iree/compiler/Codegen/PassDetail.h
@@ -8,6 +8,7 @@
#define IREE_COMPILER_CONVERSIONS_PASS_DETAIL_H_
#include "iree/compiler/Dialect/HAL/IR/HALOps.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
diff --git a/iree/compiler/Codegen/Passes.td b/iree/compiler/Codegen/Passes.td
index f5d20a4..9637a2f 100644
--- a/iree/compiler/Codegen/Passes.td
+++ b/iree/compiler/Codegen/Passes.td
@@ -14,14 +14,14 @@
//------------------------------------------------------------------------------
def CleanupBufferAllocView :
- Pass<"iree-codegen-cleanup-buffer-alloc-view", "FuncOp"> {
+ Pass<"iree-codegen-cleanup-buffer-alloc-view", "func::FuncOp"> {
let summary =
"Performs cleanups over HAL interface/buffer allocation/view operations";
let constructor = "mlir::iree_compiler::createCleanupBufferAllocViewPass()";
}
def ConvertToDestinationPassingStyle :
- Pass<"iree-codegen-convert-to-destination-passing-style", "FuncOp"> {
+ Pass<"iree-codegen-convert-to-destination-passing-style", "func::FuncOp"> {
let summary =
"Transforms the code to make the dispatch use destination-passing style";
let constructor = "mlir::iree_compiler::createConvertToDestinationPassingStylePass()";
@@ -41,7 +41,7 @@
}
def FoldAffineMinInDistributedLoops :
- Pass<"iree-codegen-fold-affinemin-in-distributed-loops", "FuncOp"> {
+ Pass<"iree-codegen-fold-affinemin-in-distributed-loops", "func::FuncOp"> {
let summary = "Fold `affine.min` ops in distributed loops";
let constructor = "mlir::iree_compiler::createFoldAffineMinInDistributedLoopsPass()";
}
@@ -53,14 +53,14 @@
}
def ForOpCanonicalization :
- Pass<"iree-codegen-canonicalize-scf-for", "FuncOp"> {
+ Pass<"iree-codegen-canonicalize-scf-for", "func::FuncOp"> {
let summary =
"Adhoc canonicalization of selected loop-carried values/dependencies for scf.for ops";
let constructor = "mlir::iree_compiler::createForOpCanonicalizationPass()";
}
def LinalgBufferize :
- Pass<"iree-codegen-linalg-bufferize", "FuncOp"> {
+ Pass<"iree-codegen-linalg-bufferize", "func::FuncOp"> {
let summary = "Convert from to Linalg ops on tensors to buffers";
let constructor = "mlir::iree_compiler::createLinalgBufferizePass(nullptr)";
}
@@ -80,26 +80,26 @@
}
def OptimizeVectorTransfer :
- Pass<"iree-codegen-optimize-vector-transfer", "FuncOp"> {
+ Pass<"iree-codegen-optimize-vector-transfer", "func::FuncOp"> {
let summary =
"Run optimization transformations on vector transfer operations";
let constructor = "mlir::iree_compiler::createOptimizeVectorTransferPass()";
}
def TileAndDistributeToWorkgroups :
- Pass<"iree-codegen-tile-and-distribute-to-workgroups", "FuncOp"> {
+ Pass<"iree-codegen-tile-and-distribute-to-workgroups", "func::FuncOp"> {
let summary = "Tile and distribute operations to workgroups";
let constructor = "mlir::iree_compiler::createTileAndDistributeToWorkgroupsPass()";
}
def TypePropagation :
- Pass<"iree-codegen-type-propagation", "FuncOp"> {
+ Pass<"iree-codegen-type-propagation", "func::FuncOp"> {
let summary = "Propogate the type of tensor to avoid load/stores of illegal bit widths";
let constructor = "mlir::iree_compiler::createTypePropagationPass()";
}
def RemoveSingleIterationLoop :
- Pass<"iree-codegen-remove-single-iteration-loop", "FuncOp"> {
+ Pass<"iree-codegen-remove-single-iteration-loop", "func::FuncOp"> {
let summary = "Remove distributed loop with single iteration.";
let constructor = "mlir::iree_compiler::createRemoveSingleIterationLoopPass()";
}
@@ -116,21 +116,21 @@
// TODO: Rename argument to be fully qualified.
def LinalgToVectorVectorizeConv :
- Pass<"iree-codegen-vectorize-linalg-conv", "FuncOp"> {
+ Pass<"iree-codegen-vectorize-linalg-conv", "func::FuncOp"> {
let summary = "Vectorize a very specific form of linalg.conv";
let constructor =
"mlir::iree_compiler::createLinalgToVectorVectorizeConvPass()";
}
def LinalgToVectorVectorizeMMT4d :
- Pass<"iree-codegen-vectorize-linalg-mmt4d", "FuncOp"> {
+ Pass<"iree-codegen-vectorize-linalg-mmt4d", "func::FuncOp"> {
let summary = "Vectorize a very specific form of linalg.mmt4d";
let constructor =
"mlir::iree_compiler::createLinalgToVectorVectorizeMMT4dPass()";
}
def TensorToVectorVectorizePad :
- Pass<"iree-codegen-vectorize-tensor-pad", "FuncOp"> {
+ Pass<"iree-codegen-vectorize-tensor-pad", "func::FuncOp"> {
let summary = "Vectorize a very specific form of tensor.pad with "
"control flows";
let constructor =
@@ -145,7 +145,7 @@
}
def MemrefCopyToLinalgPass :
- Pass<"iree-codegen-memrefcopy-to-linalg", "FuncOp"> {
+ Pass<"iree-codegen-memrefcopy-to-linalg", "func::FuncOp"> {
let summary = "Convert memref.copy to linalg op";
let constructor =
"mlir::iree_compiler::createMemrefCopyToLinalgPass()";
@@ -183,20 +183,20 @@
}
def LLVMCPUTileFuseAndVectorize
- : Pass<"iree-llvmcpu-tile-fuse-and-vectorize", "FuncOp"> {
+ : Pass<"iree-llvmcpu-tile-fuse-and-vectorize", "func::FuncOp"> {
let summary = "Tile, fuse and vectorize Linalg ops";
let constructor =
"mlir::iree_compiler::createLLVMCPUTileFuseAndVectorizePass()";
}
def LLVMCPUUnfuseFMAOps :
- Pass<"iree-llvmcpu-unfuse-fma-pass", "FuncOp"> {
+ Pass<"iree-llvmcpu-unfuse-fma-pass", "func::FuncOp"> {
let summary = "Convert llvm.fma into unfused mulf and addf ops";
let constructor = "mlir::iree_compiler::createLLVMCPUUnfuseFMAOpsPass()";
}
def VectorContractCustomKernels :
- Pass<"iree-llvmcpu-vector-contract-custom-kernels", "FuncOp"> {
+ Pass<"iree-llvmcpu-vector-contract-custom-kernels", "func::FuncOp"> {
let summary = "Enable custom kernels (inline assembly or intrinsics) for some vector.contract ops";
let constructor = "mlir::iree_compiler::createVectorContractCustomKernelsPass()";
let options = [
@@ -240,49 +240,49 @@
}
def LLVMGPUTileAndDistribute :
- Pass<"iree-llvmgpu-tile-and-distribute", "FuncOp"> {
+ Pass<"iree-llvmgpu-tile-and-distribute", "func::FuncOp"> {
let summary = "Pass to tile and distribute linalg ops within a workgroup.";
let constructor = "mlir::iree_compiler::createLLVMGPUTileAndDistribute()";
}
def LLVMGPUVectorization :
- Pass<"iree-llvmgpu-vectorization", "FuncOp"> {
+ Pass<"iree-llvmgpu-vectorization", "func::FuncOp"> {
let summary = "Pass to convert linalg into Vector.";
let constructor = "mlir::iree_compiler::createLLVMGPUVectorizationPass()";
}
def LLVMGPUTensorCoreVectorization :
- Pass<"iree-llvmgpu-tensorcore-vectorization", "FuncOp"> {
+ Pass<"iree-llvmgpu-tensorcore-vectorization", "func::FuncOp"> {
let summary = "Pass to convert linalg into Vector and transform it to a form that can be lowered to GPU MMA ops";
let constructor = "mlir::iree_compiler::createLLVMGPUTensorCoreVectorizationPass()";
}
def LLVMGPUVectorLowering :
- Pass<"iree-llvmgpu-vector-lowering", "FuncOp"> {
+ Pass<"iree-llvmgpu-vector-lowering", "func::FuncOp"> {
let summary = "Pass to lower Vector ops before conversion to LLVM.";
let constructor = "mlir::iree_compiler::createLLVMGPUVectorLoweringPass()";
}
def LLVMGPUDistributeSharedMemoryCopy :
- Pass<"iree-llvmgpu-distribute-shared-memory-copy", "FuncOp"> {
+ Pass<"iree-llvmgpu-distribute-shared-memory-copy", "func::FuncOp"> {
let summary = "Pass to distribute shared memory copies to threads.";
let constructor = "mlir::iree_compiler::createLLVMGPUDistributeSharedMemoryCopy()";
}
def LLVMGPUPipelining :
- Pass<"iree-llvmgpu-pipelining", "FuncOp"> {
+ Pass<"iree-llvmgpu-pipelining", "func::FuncOp"> {
let summary = "Pass to do software pipelining.";
let constructor = "mlir::iree_compiler::createLLVMGPUPipeliningPass()";
}
def LLVMGPUMultiBuffering :
- Pass<"iree-llvmgpu-multi-buffering", "FuncOp"> {
+ Pass<"iree-llvmgpu-multi-buffering", "func::FuncOp"> {
let summary = "Pass to do multi buffering.";
let constructor = "mlir::iree_compiler::createLLVMGPUMultiBuffering()";
}
def LLVMGPUVectorToGPU :
- Pass<"iree-llvmgpu-vector-to-gpu", "FuncOp"> {
+ Pass<"iree-llvmgpu-vector-to-gpu", "func::FuncOp"> {
let summary = "Pass to convert vector to gpu.";
let constructor = "mlir::iree_compiler::createLLVMGPUVectorToGPU()";
}
@@ -311,24 +311,24 @@
let constructor = "mlir::iree_compiler::createSPIRVInitConfigPass()";
}
-def SPIRVTile : Pass<"iree-spirv-tile", "FuncOp"> {
+def SPIRVTile : Pass<"iree-spirv-tile", "func::FuncOp"> {
let summary = "Tile Linalg ops with tensor semantics to invocations";
let constructor = "mlir::iree_compiler::createSPIRVTilePass()";
}
-def SPIRVDistribute : Pass<"iree-spirv-distribute", "FuncOp"> {
+def SPIRVDistribute : Pass<"iree-spirv-distribute", "func::FuncOp"> {
let summary = "Distribute tiled loop nests to invocations";
let constructor = "mlir::iree_compiler::createSPIRVDistributePass()";
}
-def SPIRVTileAndDistribute : Pass<"iree-spirv-tile-and-distribute", "FuncOp"> {
+def SPIRVTileAndDistribute : Pass<"iree-spirv-tile-and-distribute", "func::FuncOp"> {
let summary = "Tile and distribute Linalg ops with buffer semantics to "
"invocations";
let constructor = "mlir::iree_compiler::createSPIRVTileAndDistributePass()";
}
def SPIRVTileAndVectorizeToCooperativeOps : Pass<
- "iree-spirv-tile-and-vectorize-to-cooperative-ops", "FuncOp"> {
+ "iree-spirv-tile-and-vectorize-to-cooperative-ops", "func::FuncOp"> {
let summary = "Tile Linalg ops with buffer semantics to subgroups and "
"vectorize to vector ops suitable for lowering to SPIR-V "
"cooperative ops";
@@ -336,13 +336,13 @@
"mlir::iree_compiler::createSPIRVTileAndVectorizeToCooperativeOpsPass()";
}
-def SPIRVVectorize : Pass<"iree-spirv-vectorize", "FuncOp"> {
+def SPIRVVectorize : Pass<"iree-spirv-vectorize", "func::FuncOp"> {
let summary = "Vectorize Linalg ops with buffer semantics";
let constructor = "mlir::iree_compiler::createSPIRVVectorizePass()";
}
def SPIRVVectorToCooperativeOps :
- Pass<"iree-spirv-vector-to-cooperative-ops", "FuncOp"> {
+ Pass<"iree-spirv-vector-to-cooperative-ops", "func::FuncOp"> {
let summary = "Convert vector ops to SPIR-V cooperative ops";
let constructor =
"mlir::iree_compiler::createSPIRVVectorToCooperativeOpsPass()";
@@ -355,13 +355,13 @@
}
def SPIRVFuseTensorPadWithConsumer :
- Pass<"iree-spirv-fuse-tensor-pad-with-consumer", "FuncOp"> {
+ Pass<"iree-spirv-fuse-tensor-pad-with-consumer", "func::FuncOp"> {
let summary = "Fuse tensor.pad op into its consumer op's tiled loop nest";
let constructor = "mlir::iree_compiler::createSPIRVFuseTensorPadWithConsumerPass()";
}
def SPIRVCreateFastSlowPath :
- Pass<"iree-spirv-create-fast-slow-path", "FuncOp"> {
+ Pass<"iree-spirv-create-fast-slow-path", "func::FuncOp"> {
let summary = "Create separate fast and slow paths to handle padding";
let constructor = "mlir::iree_compiler::createSPIRVCreateFastSlowPathPass()";
}
diff --git a/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp b/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp
index d0fb79f..835760a 100644
--- a/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp
+++ b/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp
@@ -489,7 +489,7 @@
FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {
TypeConverter::SignatureConversion signatureConverter(
- funcOp.getType().getNumInputs());
+ funcOp.getFunctionType().getNumInputs());
for (const auto &arg : llvm::enumerate(funcOp.getArguments())) {
if (memrefUsageAnalysis.shouldVectorizeMemRef(arg.value())) {
if (auto memrefType = getVectorizedMemRefType(rewriter, arg.value())) {
diff --git a/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir b/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir
index 005d010..7a491c2 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ext_ops.mlir
@@ -15,7 +15,7 @@
}> {
hal.executable.entry_point @static_1d_sort layout(#executable_layout)
builtin.module {
- builtin.func @static_1d_sort() {
+ func.func @static_1d_sort() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<readwrite:1000xi32>
%1 = flow.dispatch.tensor.load %0, offsets = [0], sizes = [1000], strides = [1] : !flow.dispatch.tensor<readwrite:1000xi32> -> tensor<1000xi32>
@@ -60,7 +60,7 @@
}> {
hal.executable.entry_point @static_3d_sort layout(#executable_layout)
builtin.module {
- builtin.func @static_3d_sort() {
+ func.func @static_3d_sort() {
%c64 = arith.constant 64 : index
%c128 = arith.constant 128 : index
%c0 = arith.constant 0 : index
@@ -109,7 +109,7 @@
}> {
hal.executable.entry_point @static_1d_fft_stage2 layout(#executable_layout)
builtin.module {
- builtin.func @static_1d_fft_stage2() {
+ func.func @static_1d_fft_stage2() {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%cst = arith.constant dense<[1.000000e+00, 6.12323426E-17]> : tensor<2xf32>
@@ -154,7 +154,7 @@
}> {
hal.executable.entry_point @static_3d_fft_stage3 layout(#executable_layout)
builtin.module {
- builtin.func @static_3d_fft_stage3() {
+ func.func @static_3d_fft_stage3() {
%c0 = arith.constant 0 : index
%c3 = arith.constant 3 : index
%c64 = arith.constant 64 : index
@@ -203,7 +203,7 @@
}> {
hal.executable.entry_point @tensor_insert layout(#executable_layout)
builtin.module {
- builtin.func @tensor_insert() {
+ func.func @tensor_insert() {
%offset_y = hal.interface.constant.load[0] : index
%offset_x = hal.interface.constant.load[1] : index
%source_size_y = hal.interface.constant.load[2] : index
@@ -251,7 +251,7 @@
}> {
hal.executable.entry_point @tensor_extract layout(#executable_layout)
builtin.module {
- builtin.func @tensor_extract() {
+ func.func @tensor_extract() {
%offset_y = hal.interface.constant.load[0] : index
%offset_x = hal.interface.constant.load[1] : index
%source_size_y = hal.interface.constant.load[2] : index
diff --git a/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ops.mlir b/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ops.mlir
index dd29936..a850da3 100644
--- a/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ops.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/config_default_linalg_ops.mlir
@@ -16,7 +16,7 @@
}> {
hal.executable.entry_point @copy_as_generic layout(#executable_layout)
builtin.module {
- builtin.func @copy_as_generic() {
+ func.func @copy_as_generic() {
%c0 = arith.constant 0 : index
%d0 = hal.interface.constant.load[0] : index
%d1 = hal.interface.constant.load[1] : index
@@ -60,7 +60,7 @@
}> {
hal.executable.entry_point @copy layout(#executable_layout)
builtin.module {
- builtin.func @copy() {
+ func.func @copy() {
%c0 = arith.constant 0 : index
%c224 = arith.constant 224 : index
%c3 = arith.constant 3 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/promote_workgroup_memory.mlir b/iree/compiler/Codegen/SPIRV/test/promote_workgroup_memory.mlir
index 4a9b306..c0a88e1 100644
--- a/iree/compiler/Codegen/SPIRV/test/promote_workgroup_memory.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/promote_workgroup_memory.mlir
@@ -1,5 +1,5 @@
// TODO(antiagainst): Fix promotion to workgroup and enable the test.
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-spirv-tile-and-distribute,iree-spirv-vectorize,canonicalize,cse))))' | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(func.func(iree-spirv-tile-and-distribute,iree-spirv-vectorize,canonicalize,cse))))' | FileCheck %s
hal.executable private @matmul_promote_workgroup_memory {
hal.interface @io {
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir
index 530531f..23663d7 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-spirv-tile-and-distribute))))' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(func.func(iree-spirv-tile-and-distribute))))' %s | FileCheck %s
#map0 = affine_map<()[s0] -> (s0 * 8)>
#map1 = affine_map<()[s0, s1] -> (8, s1 - s0 * 8)>
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir
index e657a16..e214fad 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_scatter.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-spirv-tile-and-distribute))))' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(func.func(iree-spirv-tile-and-distribute))))' %s | FileCheck %s
#config = #iree_codegen.lowering_config<tile_sizes = [[1, 16], [1, 1]]>
#translation = #iree_codegen.translation_info<SPIRVDistribute, workload_per_wg = [16, 1]>
@@ -16,7 +16,7 @@
workgroup_size = [16 : index, 1 : index, 1 : index]
}
builtin.module {
- builtin.func @static_scatter_update_slice() {
+ func.func @static_scatter_update_slice() {
%c40 = arith.constant 40 : index
%c500 = arith.constant 500 : index
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir
index 36a3daf..d9279e6 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_distribute_sort.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-spirv-tile-and-distribute, cse))))' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(func.func(iree-spirv-tile-and-distribute, cse))))' %s | FileCheck %s
#config = #iree_codegen.lowering_config<tile_sizes = [[1, 0, 16], [1, 0, 1]]>
#translation = #iree_codegen.translation_info<SPIRVDistribute, workload_per_wg = [16, 1]>
@@ -15,7 +15,7 @@
workgroup_size = [16 : index, 1 : index, 1 : index]
}
builtin.module {
- builtin.func @static_3d_sort() {
+ func.func @static_3d_sort() {
%c64 = arith.constant 64 : index
%c128 = arith.constant 128 : index
%c0 = arith.constant 0 : index
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir
index aaabd0c..f3f184d 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_batch_matmul.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(iree-set-num-workgroups,builtin.module(builtin.func(iree-spirv-tile,iree-spirv-vectorize))))' -cse %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(iree-set-num-workgroups,builtin.module(func.func(iree-spirv-tile,iree-spirv-vectorize))))' -cse %s | FileCheck %s
#config = #iree_codegen.lowering_config<tile_sizes = [[1, 8, 64], [1, 8, 4], [0, 0, 0, 4]]>
#translation = #iree_codegen.translation_info<SPIRVVectorize, workload_per_wg = [64, 8, 1]>
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir
index 631ce5e..245847c 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_conv.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(iree-set-num-workgroups,builtin.module(builtin.func(iree-spirv-create-fast-slow-path,iree-spirv-tile,iree-spirv-vectorize))))' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(iree-set-num-workgroups,builtin.module(func.func(iree-spirv-create-fast-slow-path,iree-spirv-tile,iree-spirv-vectorize))))' %s | FileCheck %s
#config = #iree_codegen.lowering_config<tile_sizes = [[0, 4, 4, 16], [0, 4, 1, 4], [0, 0, 0, 0, 1, 1, 4]]>
#translation = #iree_codegen.translation_info<SPIRVVectorize, workload_per_wg = [16, 4, 4]>
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir
index 816824b..3d562e2 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_matmul.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(iree-set-num-workgroups,builtin.module(builtin.func(iree-spirv-tile,iree-spirv-vectorize))))' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(iree-set-num-workgroups,builtin.module(func.func(iree-spirv-tile,iree-spirv-vectorize))))' %s | FileCheck %s
#config = #iree_codegen.lowering_config<tile_sizes = [[8, 64], [8, 4], [0, 0, 4]]>
#translation = #iree_codegen.translation_info<SPIRVVectorize, workload_per_wg = [64, 8]>
diff --git a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir
index e770765..287ad33 100644
--- a/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/tile_and_vectorize_to_cooperative_ops.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-spirv-tile-and-vectorize-to-cooperative-ops))))' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(func.func(iree-spirv-tile-and-vectorize-to-cooperative-ops))))' %s | FileCheck %s
#config = #iree_codegen.lowering_config<tile_sizes = [[16, 16, 16], [16, 16, 16]]>
#translation = #iree_codegen.translation_info<SPIRVVectorizeToCooperativeOps, workload_per_wg = [16, 16]>
diff --git a/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir b/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir
index 0700af7..301fea3 100644
--- a/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir
+++ b/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(builtin.func(iree-spirv-vector-to-cooperative-ops,cse))))' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='hal.executable(hal.executable.variant(builtin.module(func.func(iree-spirv-vector-to-cooperative-ops,cse))))' %s | FileCheck %s
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
diff --git a/iree/compiler/Codegen/Sandbox/test/fusion_expert.mlir b/iree/compiler/Codegen/Sandbox/test/fusion_expert.mlir
index beb6930..3ad00e8 100644
--- a/iree/compiler/Codegen/Sandbox/test/fusion_expert.mlir
+++ b/iree/compiler/Codegen/Sandbox/test/fusion_expert.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -pass-pipeline="builtin.func(linalg-fuse{tiling-level=0 vectorize}), canonicalize, cse" -split-input-file %s | FileCheck %s
+// RUN: iree-opt -pass-pipeline="func.func(linalg-fuse{tiling-level=0 vectorize}), canonicalize, cse" -split-input-file %s | FileCheck %s
func @matmul_bias_add(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?xf32>) -> tensor<?x?xf32> {
%cst = arith.constant 0.0 : f32
diff --git a/iree/compiler/Codegen/Sandbox/test/single_tiling_expert.mlir b/iree/compiler/Codegen/Sandbox/test/single_tiling_expert.mlir
index db90362..9a54ab8 100644
--- a/iree/compiler/Codegen/Sandbox/test/single_tiling_expert.mlir
+++ b/iree/compiler/Codegen/Sandbox/test/single_tiling_expert.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -pass-pipeline="builtin.func(linalg-single-tiling-expert-driver{tiling-level=0 vectorize})" -split-input-file %s | FileCheck %s
+// RUN: iree-opt -pass-pipeline="func.func(linalg-single-tiling-expert-driver{tiling-level=0 vectorize})" -split-input-file %s | FileCheck %s
func @matmul(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[10, 20, 30]]>}
diff --git a/iree/compiler/Codegen/Sandbox/test/unroll_one_vector_op.mlir b/iree/compiler/Codegen/Sandbox/test/unroll_one_vector_op.mlir
index 43825d1..e015c22 100644
--- a/iree/compiler/Codegen/Sandbox/test/unroll_one_vector_op.mlir
+++ b/iree/compiler/Codegen/Sandbox/test/unroll_one_vector_op.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt %s -pass-pipeline='builtin.func(unroll-one-vector-op{anchor-func=test anchor-op=vector.contract source-shape=4,4,3 target-shape=2,4,3})' | FileCheck %s
+// RUN: iree-opt %s -pass-pipeline='func.func(unroll-one-vector-op{anchor-func=test anchor-op=vector.contract source-shape=4,4,3 target-shape=2,4,3})' | FileCheck %s
#matmul_accesses = [
affine_map<(i, j, k) -> (i, k)>,
diff --git a/iree/compiler/Codegen/Transforms/AffineMinDistributedSCFCanonicalization.cpp b/iree/compiler/Codegen/Transforms/AffineMinDistributedSCFCanonicalization.cpp
index e13b25a..7ce8665 100644
--- a/iree/compiler/Codegen/Transforms/AffineMinDistributedSCFCanonicalization.cpp
+++ b/iree/compiler/Codegen/Transforms/AffineMinDistributedSCFCanonicalization.cpp
@@ -12,6 +12,7 @@
#include "iree/compiler/Codegen/Transforms/Transforms.h"
#include "mlir/Dialect/Affine/Utils.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
@@ -167,7 +168,7 @@
/// individually.
struct AffineMinDistributedSCFCanonicalizationPass
: public PassWrapper<AffineMinDistributedSCFCanonicalizationPass,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
StringRef getArgument() const override {
return "iree-codegen-affinemin-scf-canonicalization";
}
@@ -178,7 +179,7 @@
}
void runOnOperation() override {
- FuncOp funcOp = getOperation();
+ func::FuncOp funcOp = getOperation();
RewritePatternSet foldPattern(&getContext());
populateAffineMinSCFCanonicalizationPattern(foldPattern);
FrozenRewritePatternSet frozenPatterns(std::move(foldPattern));
diff --git a/iree/compiler/Codegen/Transforms/Transforms.cpp b/iree/compiler/Codegen/Transforms/Transforms.cpp
index 15b258b..a29f1c1 100644
--- a/iree/compiler/Codegen/Transforms/Transforms.cpp
+++ b/iree/compiler/Codegen/Transforms/Transforms.cpp
@@ -30,7 +30,7 @@
} // namespace
LogicalResult defineWorkgroupCountRegion(
- OpBuilder &builder, FuncOp funcOp,
+ OpBuilder &builder, func::FuncOp funcOp,
WorkgroupCountRegionBuilder regionBuilder) {
IREE::HAL::ExecutableEntryPointOp entryPointOp = getEntryPoint(funcOp);
if (!entryPointOp) {
diff --git a/iree/compiler/Codegen/Transforms/Transforms.h b/iree/compiler/Codegen/Transforms/Transforms.h
index cf3fdd2..b25590b 100644
--- a/iree/compiler/Codegen/Transforms/Transforms.h
+++ b/iree/compiler/Codegen/Transforms/Transforms.h
@@ -30,7 +30,7 @@
using WorkgroupCountRegionBuilder = std::function<std::array<Value, 3>(
OpBuilder &b, Location loc, std::array<Value, 3> workload)>;
LogicalResult defineWorkgroupCountRegion(
- OpBuilder &builder, FuncOp funcOp,
+ OpBuilder &builder, func::FuncOp funcOp,
WorkgroupCountRegionBuilder regionBuilder);
/// Insert patterns to perform folding of AffineMinOp by matching the pattern
diff --git a/iree/compiler/Codegen/Utils/Utils.cpp b/iree/compiler/Codegen/Utils/Utils.cpp
index 4651efc..98ad38b 100644
--- a/iree/compiler/Codegen/Utils/Utils.cpp
+++ b/iree/compiler/Codegen/Utils/Utils.cpp
@@ -29,9 +29,9 @@
// Utility functions to get entry points
//===----------------------------------------------------------------------===//
-bool isEntryPoint(FuncOp func) { return func.isPublic(); }
+bool isEntryPoint(func::FuncOp func) { return func.isPublic(); }
-IREE::HAL::ExecutableEntryPointOp getEntryPoint(FuncOp funcOp) {
+IREE::HAL::ExecutableEntryPointOp getEntryPoint(func::FuncOp funcOp) {
auto variantOp = funcOp->getParentOfType<IREE::HAL::ExecutableVariantOp>();
for (auto op : variantOp.getOps<IREE::HAL::ExecutableEntryPointOp>()) {
if (op.sym_name() == funcOp.getName()) {
@@ -436,10 +436,10 @@
}
LogicalResult getFilteredOps(
- FuncOp funcOp, RootOpFilteringFn filteringFn,
+ func::FuncOp funcOp, RootOpFilteringFn filteringFn,
SmallVectorImpl<Operation *> &filteredOps,
SmallVectorImpl<LoopTilingAndDistributionInfo> &tiledLoops) {
- Region ®ion = funcOp.body();
+ Region ®ion = funcOp.getBody();
if (!llvm::hasSingleElement(region)) {
return funcOp.emitError("unable dispatch function with multiple blocks");
}
@@ -463,7 +463,7 @@
}
LogicalResult getComputeOps(
- FuncOp funcOp, SmallVectorImpl<Operation *> &computeOps,
+ func::FuncOp funcOp, SmallVectorImpl<Operation *> &computeOps,
SmallVectorImpl<LoopTilingAndDistributionInfo> &tiledLoops) {
if (failed(getFilteredOps(
funcOp,
@@ -477,7 +477,7 @@
}
SmallVector<LoopTilingAndDistributionInfo> getTiledAndDistributedLoopInfo(
- FuncOp funcOp) {
+ func::FuncOp funcOp) {
SmallVector<LoopTilingAndDistributionInfo> info;
funcOp.walk([&](scf::ForOp forOp) {
if (auto tiledLoopInfo = isTiledAndDistributedLoop(forOp)) {
diff --git a/iree/compiler/Codegen/Utils/Utils.h b/iree/compiler/Codegen/Utils/Utils.h
index 5110269..e137eb1 100644
--- a/iree/compiler/Codegen/Utils/Utils.h
+++ b/iree/compiler/Codegen/Utils/Utils.h
@@ -11,6 +11,7 @@
#include "iree/compiler/Dialect/HAL/IR/HALOps.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Triple.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/IR/BuiltinOps.h"
@@ -25,24 +26,24 @@
//===----------------------------------------------------------------------===//
/// Returns true if the given `func` is a kernel dispatch entry point.
-bool isEntryPoint(FuncOp func);
+bool isEntryPoint(func::FuncOp func);
/// Returns a map from function symbol name to corresponding entry point op.
llvm::StringMap<IREE::HAL::ExecutableEntryPointOp> getAllEntryPoints(
ModuleOp module);
/// Returns the entry point op for the `funcOp`. Returns `nullptr` on failure.
-IREE::HAL::ExecutableEntryPointOp getEntryPoint(FuncOp funcOp);
+IREE::HAL::ExecutableEntryPointOp getEntryPoint(func::FuncOp funcOp);
/// Methods to get backend information.
bool isX86(IREE::HAL::ExecutableVariantOp variantOp);
-inline bool isX86(FuncOp entryPointFn) {
+inline bool isX86(func::FuncOp entryPointFn) {
auto variantOp =
entryPointFn->getParentOfType<IREE::HAL::ExecutableVariantOp>();
return isX86(variantOp);
}
bool isRISCV(IREE::HAL::ExecutableVariantOp variantOp);
-inline bool isRISCV(FuncOp entryPointFn) {
+inline bool isRISCV(func::FuncOp entryPointFn) {
auto variantOp =
entryPointFn->getParentOfType<IREE::HAL::ExecutableVariantOp>();
return isRISCV(variantOp);
@@ -50,7 +51,7 @@
inline bool isVMVXBackend(IREE::HAL::ExecutableVariantOp variantOp) {
return variantOp.target().getBackend().getValue() == "vmvx";
}
-inline bool isVMVXBackend(FuncOp entryPointFn) {
+inline bool isVMVXBackend(func::FuncOp entryPointFn) {
auto variantOp =
entryPointFn->getParentOfType<IREE::HAL::ExecutableVariantOp>();
return isVMVXBackend(variantOp);
@@ -122,14 +123,14 @@
/// other cases.
using RootOpFilteringFn = std::function<bool(Operation *)>;
LogicalResult getFilteredOps(
- FuncOp funcOp, RootOpFilteringFn filteringFn,
+ func::FuncOp funcOp, RootOpFilteringFn filteringFn,
SmallVectorImpl<Operation *> &filteredOps,
SmallVectorImpl<LoopTilingAndDistributionInfo> &tiledLoops);
/// Specialization of `getFilteredOps` for filtering `LinalgOp`s and
/// `LinagExtOp`s.
LogicalResult getComputeOps(
- FuncOp funcOp, SmallVectorImpl<Operation *> &computeOps,
+ func::FuncOp funcOp, SmallVectorImpl<Operation *> &computeOps,
SmallVectorImpl<LoopTilingAndDistributionInfo> &tiledLoops);
/// If the given `forOp` is a tiled and distributed loop, returns its tiling and
@@ -139,7 +140,7 @@
/// Collects information about loops matching tiled+distribute pattern.
SmallVector<LoopTilingAndDistributionInfo> getTiledAndDistributedLoopInfo(
- FuncOp funcOp);
+ func::FuncOp funcOp);
Operation *createLinalgCopyOp(OpBuilder &b, Location loc, Value from, Value to,
ArrayRef<NamedAttribute> attributes = {});
diff --git a/iree/compiler/Dialect/Flow/IR/PartitionableLoopsInterface.cpp b/iree/compiler/Dialect/Flow/IR/PartitionableLoopsInterface.cpp
index b6aca9e..1f245bf 100644
--- a/iree/compiler/Dialect/Flow/IR/PartitionableLoopsInterface.cpp
+++ b/iree/compiler/Dialect/Flow/IR/PartitionableLoopsInterface.cpp
@@ -6,6 +6,7 @@
#include "iree/compiler/Dialect/Flow/IR/PartitionableLoopsInterface.h"
+#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtDialect.h"
#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.h"
#include "iree-dialects/Dialect/LinalgExt/IR/TiledOpInterface.h"
#include "llvm/ADT/SmallVector.h"
@@ -131,40 +132,43 @@
/// needs to be done on a op-by-op basis since registration is on an op-by-op
/// basis.
template <typename OpTy>
-static void registerInterfaceForLinalgOps(DialectRegistry ®istry) {
- registry.addOpInterface<OpTy, LinalgOpPartitionableLoops>();
+static void registerInterfaceForLinalgOps(MLIRContext *ctx) {
+ OpTy::template attachInterface<LinalgOpPartitionableLoops>(*ctx);
}
/// Specializations of the registration method to use a different external model
/// instead of the generic external model for Linalg ops.
template <>
-void registerInterfaceForLinalgOps<linalg::Mmt4DOp>(DialectRegistry ®istry) {
- registry.addOpInterface<linalg::Mmt4DOp, Mmt4DOpPartitionableLoops>();
+void registerInterfaceForLinalgOps<linalg::Mmt4DOp>(MLIRContext *ctx) {
+ linalg::Mmt4DOp::attachInterface<Mmt4DOpPartitionableLoops>(*ctx);
}
/// Registers the external models for all Linalg operations.
template <typename OpTy1, typename OpTy2, typename... More>
-static void registerInterfaceForLinalgOps(DialectRegistry ®istry) {
- registerInterfaceForLinalgOps<OpTy1>(registry);
- registerInterfaceForLinalgOps<OpTy2, More...>(registry);
+static void registerInterfaceForLinalgOps(MLIRContext *ctx) {
+ registerInterfaceForLinalgOps<OpTy1>(ctx);
+ registerInterfaceForLinalgOps<OpTy2, More...>(ctx);
}
/// Registers the `TiledOpInterfacePartitionableLoops` model for operations.
template <typename OpTy>
-static void registerInterfaceForTiledOpInterfaceOps(DialectRegistry ®istry) {
- registry.addOpInterface<OpTy, TiledOpInterfacePartitionableLoops>();
+static void registerInterfaceForTiledOpInterfaceOps(MLIRContext *ctx) {
+ OpTy ::template attachInterface<TiledOpInterfacePartitionableLoops>(*ctx);
}
/// Registers the external models for all TiledOpInterface operations.
template <typename OpTy1, typename OpTy2, typename... More>
-static void registerInterfaceForTiledOpInterfaceOps(DialectRegistry ®istry) {
- registerInterfaceForTiledOpInterfaceOps<OpTy1>(registry);
- registerInterfaceForTiledOpInterfaceOps<OpTy2, More...>(registry);
+static void registerInterfaceForTiledOpInterfaceOps(MLIRContext *ctx) {
+ registerInterfaceForTiledOpInterfaceOps<OpTy1>(ctx);
+ registerInterfaceForTiledOpInterfaceOps<OpTy2, More...>(ctx);
}
void registerPartitionableLoopsInterfaceModels(DialectRegistry ®istry) {
- // clang-format off
- registerInterfaceForLinalgOps<
+ registry.insert<linalg::LinalgDialect>();
+
+ registry.addExtension(+[](MLIRContext *ctx, linalg::LinalgDialect *dialect) {
+ registerInterfaceForLinalgOps<
+ // clang-format off
// This is copy-pasted from LinalgStructuredOps.cpp.inc. In theory you could
// just include that generated file here, but that cause errors with bazel.
@@ -206,13 +210,22 @@
::mlir::linalg::QuantizedBatchMatmulOp,
::mlir::linalg::QuantizedMatmulOp,
::mlir::linalg::VecmatOp
- >(registry);
- // clang-format on
+ // clang-format on
+ >(ctx);
+ });
- registerInterfaceForTiledOpInterfaceOps<
- LinalgExt::FftOp, LinalgExt::ReverseOp, LinalgExt::ScanOp,
- LinalgExt::ScatterOp, LinalgExt::SortOp, tensor::ExtractSliceOp,
- tensor::InsertSliceOp>(registry);
+ registry.insert<LinalgExt::IREELinalgExtDialect>();
+
+ registry.addExtension(
+ +[](MLIRContext *ctx, LinalgExt::IREELinalgExtDialect *dialect) {
+ registerInterfaceForTiledOpInterfaceOps<
+ LinalgExt::FftOp, LinalgExt::ReverseOp, LinalgExt::ScanOp,
+ LinalgExt::ScatterOp, LinalgExt::SortOp>(ctx);
+ });
+ registry.addExtension(+[](MLIRContext *ctx, tensor::TensorDialect *dialect) {
+ registerInterfaceForTiledOpInterfaceOps<tensor::ExtractSliceOp,
+ tensor::InsertSliceOp>(ctx);
+ });
}
} // namespace Flow
diff --git a/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp b/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp
index 0e77a72..738b1b2 100644
--- a/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp
@@ -151,8 +151,8 @@
};
} // namespace
-std::unique_ptr<OperationPass<mlir::FuncOp>> createConvertLinalgTensorOpsPass(
- bool runBeforeDispatchRegionFormation) {
+std::unique_ptr<OperationPass<mlir::func::FuncOp>>
+createConvertLinalgTensorOpsPass(bool runBeforeDispatchRegionFormation) {
return std::make_unique<ConvertLinalgTensorOpsPass>(
runBeforeDispatchRegionFormation);
}
diff --git a/iree/compiler/Dialect/Flow/Transforms/DeduplicateExecutables.cpp b/iree/compiler/Dialect/Flow/Transforms/DeduplicateExecutables.cpp
index b33cdab..902b65a 100644
--- a/iree/compiler/Dialect/Flow/Transforms/DeduplicateExecutables.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/DeduplicateExecutables.cpp
@@ -207,13 +207,15 @@
}
// Walk all functions and ensure equivalent.
- if (!compare_ranges(
- lhsModule.getOps<mlir::FuncOp>(), rhsModule.getOps<mlir::FuncOp>(),
- [](mlir::FuncOp lhs, mlir::FuncOp rhs) {
- if (lhs.getType() != rhs.getType()) return false;
- if (lhs->getAttrs() != rhs->getAttrs()) return false;
- return isStructurallyEquivalentTo(lhs.getRegion(), rhs.getRegion());
- })) {
+ if (!compare_ranges(lhsModule.getOps<mlir::func::FuncOp>(),
+ rhsModule.getOps<mlir::func::FuncOp>(),
+ [](mlir::func::FuncOp lhs, mlir::func::FuncOp rhs) {
+ if (lhs.getFunctionType() != rhs.getFunctionType())
+ return false;
+ if (lhs->getAttrs() != rhs->getAttrs()) return false;
+ return isStructurallyEquivalentTo(lhs.getRegion(),
+ rhs.getRegion());
+ })) {
return false; // dispatch entry mismatch
}
diff --git a/iree/compiler/Dialect/Flow/Transforms/ExpandTensorShapes.cpp b/iree/compiler/Dialect/Flow/Transforms/ExpandTensorShapes.cpp
index a560f70..2c08f14 100644
--- a/iree/compiler/Dialect/Flow/Transforms/ExpandTensorShapes.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/ExpandTensorShapes.cpp
@@ -321,9 +321,9 @@
// ->
// func @foo(%0: tensor<?xf32>, %d: index) {
// %1 = flow.tensor.tie_shape %0 : tensor<?xf32>{%d}
-static void expandFuncOp(mlir::FuncOp op, ExpandedGlobalMap &globalMap,
+static void expandFuncOp(mlir::func::FuncOp op, ExpandedGlobalMap &globalMap,
IndexSet &indexSet, TensorDimMap &tensorDimMap) {
- auto oldType = op.getType();
+ auto oldType = op.getFunctionType();
auto inputTypes = expandTypes(oldType.getInputs());
auto resultTypes = expandTypes(oldType.getResults());
auto newType = FunctionType::get(op.getContext(), inputTypes, resultTypes);
@@ -482,7 +482,7 @@
expandGlobalStoreOp(storeOp, globalMap, indexSet, tensorDimMap);
} else if (auto initializerOp = dyn_cast<IREE::Util::InitializerOp>(op)) {
expandInitializerOp(initializerOp, globalMap, indexSet, tensorDimMap);
- } else if (auto funcOp = dyn_cast<mlir::FuncOp>(op)) {
+ } else if (auto funcOp = dyn_cast<mlir::func::FuncOp>(op)) {
expandFuncOp(funcOp, globalMap, indexSet, tensorDimMap);
} else if (auto callOp = dyn_cast<mlir::func::CallOp>(op)) {
expandCallOp(callOp, indexSet, tensorDimMap);
diff --git a/iree/compiler/Dialect/Flow/Transforms/ExportBenchmarkFuncs.cpp b/iree/compiler/Dialect/Flow/Transforms/ExportBenchmarkFuncs.cpp
index 0fa5bcd..61504aa 100644
--- a/iree/compiler/Dialect/Flow/Transforms/ExportBenchmarkFuncs.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/ExportBenchmarkFuncs.cpp
@@ -37,8 +37,8 @@
// Gather the functions we want to wrap for benchmarking and wrap them.
// Since we are inserting new functions as part of this pass we must perform
// the wrapping for only the inputs.
- SmallVector<mlir::FuncOp, 4> entryFuncOps;
- for (auto entryFuncOp : moduleOp.getOps<mlir::FuncOp>()) {
+ SmallVector<mlir::func::FuncOp, 4> entryFuncOps;
+ for (auto entryFuncOp : moduleOp.getOps<mlir::func::FuncOp>()) {
if (entryFuncOp.isPublic()) {
entryFuncOps.push_back(entryFuncOp);
}
@@ -71,14 +71,14 @@
}
LogicalResult createEntryPointBenchmarkFunc(mlir::ModuleOp moduleOp,
- mlir::FuncOp entryFuncOp) {
+ mlir::func::FuncOp entryFuncOp) {
OpBuilder moduleBuilder(&getContext());
moduleBuilder.setInsertionPointAfter(entryFuncOp);
// Create one dummy input variable per input.
Location loc = entryFuncOp.getLoc();
SmallVector<IREE::Util::GlobalOp, 4> dummyInputVariableOps;
- for (auto inputType : entryFuncOp.getType().getInputs()) {
+ for (auto inputType : entryFuncOp.getFunctionType().getInputs()) {
auto dummyVar = createDummyInputVariableOp(loc, inputType, moduleBuilder);
if (!dummyVar) return failure();
dummyInputVariableOps.push_back(dummyVar);
@@ -86,7 +86,7 @@
// Create a `() -> ()` entry point op the benchmark tool can run.
std::string funcName = std::string(entryFuncOp.getName()) + "_benchmark";
- auto funcOp = moduleBuilder.create<mlir::FuncOp>(
+ auto funcOp = moduleBuilder.create<mlir::func::FuncOp>(
loc, funcName, moduleBuilder.getFunctionType({}, {}));
funcOp.setPublic();
funcOp->setAttr("iree.abi.stub", moduleBuilder.getUnitAttr());
diff --git a/iree/compiler/Dialect/Flow/Transforms/OutlineDispatchRegions.cpp b/iree/compiler/Dialect/Flow/Transforms/OutlineDispatchRegions.cpp
index 43d51f2..2a7e7af 100644
--- a/iree/compiler/Dialect/Flow/Transforms/OutlineDispatchRegions.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/OutlineDispatchRegions.cpp
@@ -30,7 +30,7 @@
// Creates a flow.executable out of a set of functions, pulling in all other
// functions reachable by the provided functions.
static ExecutableOp createExecutable(Location loc, StringRef executableName,
- ArrayRef<mlir::FuncOp> funcOps,
+ ArrayRef<mlir::func::FuncOp> funcOps,
ModuleOp parentModuleOp) {
assert(!funcOps.empty() && "must have at least one entry function");
@@ -86,14 +86,15 @@
}
// Converts a dispatch region body to a free-floating function.
-static mlir::FuncOp createWorkgroupFunc(Location loc, StringRef functionName,
- Region ®ion) {
+static mlir::func::FuncOp createWorkgroupFunc(Location loc,
+ StringRef functionName,
+ Region ®ion) {
// Build function type matching the region signature.
auto functionType = FunctionType::get(
region.getContext(), region.getArgumentTypes(), /*results=*/{});
// Clone region into the function body.
- auto funcOp = mlir::FuncOp::create(loc, functionName, functionType);
+ auto funcOp = mlir::func::FuncOp::create(loc, functionName, functionType);
BlockAndValueMapping mapping;
region.cloneInto(&funcOp.getBody(), mapping);
@@ -158,7 +159,7 @@
// Generate a nice name if possible.
std::string opName;
- if (auto funcOp = llvm::dyn_cast<mlir::FuncOp>(operation)) {
+ if (auto funcOp = llvm::dyn_cast<mlir::func::FuncOp>(operation)) {
opName = funcOp.getName().str();
} else if (llvm::isa<IREE::Util::InitializerOp>(operation)) {
opName =
diff --git a/iree/compiler/Dialect/Flow/Transforms/PassDetail.h b/iree/compiler/Dialect/Flow/Transforms/PassDetail.h
index 0c80906..740ad78 100644
--- a/iree/compiler/Dialect/Flow/Transforms/PassDetail.h
+++ b/iree/compiler/Dialect/Flow/Transforms/PassDetail.h
@@ -7,6 +7,7 @@
#ifndef IREE_COMPILER_DIALECT_FLOW_TRANSFORMS_PASS_DETAIL_H_
#define IREE_COMPILER_DIALECT_FLOW_TRANSFORMS_PASS_DETAIL_H_
+#include "mlir/IR/FunctionInterfaces.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
diff --git a/iree/compiler/Dialect/Flow/Transforms/Passes.cpp b/iree/compiler/Dialect/Flow/Transforms/Passes.cpp
index d6417bc..7e08332 100644
--- a/iree/compiler/Dialect/Flow/Transforms/Passes.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/Passes.cpp
@@ -10,6 +10,7 @@
#include "iree/compiler/Dialect/Util/Transforms/Passes.h"
#include "iree/compiler/Utils/PassUtils.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/MemRef/Transforms/Passes.h"
#include "mlir/Pass/PassOptions.h"
@@ -79,7 +80,7 @@
namespace {
-using FunctionLikeNest = MultiOpNest<FuncOp, IREE::Util::InitializerOp>;
+using FunctionLikeNest = MultiOpNest<func::FuncOp, IREE::Util::InitializerOp>;
// Subset of the overall pass pipeline for optimizing globals and numerics.
// We may ultimately break this out separately so creating a syntactic
diff --git a/iree/compiler/Dialect/Flow/Transforms/PromoteTensorLoads.cpp b/iree/compiler/Dialect/Flow/Transforms/PromoteTensorLoads.cpp
index c67ab91..afa2a64 100644
--- a/iree/compiler/Dialect/Flow/Transforms/PromoteTensorLoads.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/PromoteTensorLoads.cpp
@@ -91,7 +91,8 @@
}
};
-std::unique_ptr<OperationPass<mlir::FuncOp>> createPromoteTensorLoadsPass() {
+std::unique_ptr<OperationPass<mlir::func::FuncOp>>
+createPromoteTensorLoadsPass() {
return std::make_unique<PromoteTensorLoadsPass>();
}
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/deduplicate_executables.mlir b/iree/compiler/Dialect/Flow/Transforms/test/deduplicate_executables.mlir
index 6d546da..b83c34c 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/deduplicate_executables.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/deduplicate_executables.mlir
@@ -162,7 +162,7 @@
flow.dispatch.entry @different_types_float_entry
builtin.module {
func @different_types_float_entry(%arg0: tensor<4xf32>) -> tensor<4xi1> {
- %0 = "mhlo.compare"(%arg0, %arg0) {comparison_direction = "EQ"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
+ %0 = "mhlo.compare"(%arg0, %arg0) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
return %0 : tensor<4xi1>
}
}
@@ -172,7 +172,7 @@
flow.dispatch.entry @different_types_int_entry
builtin.module {
func @different_types_int_entry(%arg0: tensor<4xi32>) -> tensor<4xi1> {
- %0 = "mhlo.compare"(%arg0, %arg0) {comparison_direction = "EQ"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %0 = "mhlo.compare"(%arg0, %arg0) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
return %0 : tensor<4xi1>
}
}
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir b/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir
index d74946a..59553df 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -verify-diagnostics -pass-pipeline="builtin.func(iree-flow-dispatch-linalg-on-tensors-pass, resolve-shaped-type-result-dims, cse, canonicalize, cse)" %s | FileCheck %s
+// RUN: iree-opt -split-input-file -verify-diagnostics -pass-pipeline="func.func(iree-flow-dispatch-linalg-on-tensors-pass, resolve-shaped-type-result-dims, cse, canonicalize, cse)" %s | FileCheck %s
func @tile_matmul_alone(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> {
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors_fusion.mlir b/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors_fusion.mlir
index d24826e..74fc741 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors_fusion.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/dispatch_linalg_on_tensors_fusion.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -verify-diagnostics -pass-pipeline="builtin.func(iree-flow-dispatch-linalg-on-tensors-pass)" -canonicalize -cse %s | FileCheck %s
+// RUN: iree-opt -split-input-file -verify-diagnostics -pass-pipeline="func.func(iree-flow-dispatch-linalg-on-tensors-pass)" -canonicalize -cse %s | FileCheck %s
func @fuse_conv2d_elementwise(%input: tensor<1x225x225x16xf32>, %filter: tensor<3x3x16x32xf32>, %offset: tensor<32xf32>) -> tensor<1x112x112x32xf32> {
%cst = arith.constant 0.000000e+00 : f32
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir b/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir
index dbc5ff7..9bf29cc 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/export_benchmark_funcs.mlir
@@ -22,7 +22,7 @@
func @while(%start: tensor<i32>, %bound: tensor<i32>) -> tensor<i32> {
cf.br ^bb1(%start : tensor<i32>)
^bb1(%0: tensor<i32>):
- %1 = "mhlo.compare"(%0, %bound) {comparison_direction = "LT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %1 = "mhlo.compare"(%0, %bound) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
%2 = tensor.extract %1[] : tensor<i1>
cf.cond_br %2, ^bb2(%0 : tensor<i32>), ^bb3(%0 : tensor<i32>)
^bb2(%3: tensor<i32>):
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/inject_dispatch_tracing.mlir b/iree/compiler/Dialect/Flow/Transforms/test/inject_dispatch_tracing.mlir
index b029c54..04500aa 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/inject_dispatch_tracing.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/inject_dispatch_tracing.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-flow-inject-dispatch-tracing)' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='func.func(iree-flow-inject-dispatch-tracing)' %s | FileCheck %s
// CHECK-LABEL: func @singleDispatch
// CHECK-SAME: (%[[ARG0:.+]]: tensor<4xf32>)
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/strip_signedness.mlir b/iree/compiler/Dialect/Flow/Transforms/test/strip_signedness.mlir
index 80a1bf1..7bd6c3b 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/strip_signedness.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/strip_signedness.mlir
@@ -1,5 +1,5 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-flow-strip-signedness)' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='func.func(iree-flow-strip-signedness)' %s | FileCheck %s
// CHECK-LABEL: @strip_signedness_arg
// CHECK-SAME: tensor<4xi8>
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/verify_input_ir.mlir b/iree/compiler/Dialect/Flow/Transforms/test/verify_input_ir.mlir
index 6f678bd..1400b00 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/verify_input_ir.mlir
+++ b/iree/compiler/Dialect/Flow/Transforms/test/verify_input_ir.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -pass-pipeline="builtin.func(iree-verify-input-legality)" -verify-diagnostics %s -split-input-file
+// RUN: iree-opt -pass-pipeline="func.func(iree-verify-input-legality)" -verify-diagnostics %s -split-input-file
// expected-error@below {{illegal operations still remain}}
func @check_no_mhlo(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>) -> tensor<?x?xf32> {
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferOps.cpp
index 1b7a323..99112ae 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferOps.cpp
@@ -25,7 +25,7 @@
LogicalResult matchAndRewrite(
IREE::HAL::BufferLoadOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- auto importType = importOp.getType();
+ auto importType = importOp.getFunctionType();
auto originalType = op.result().getType();
auto targetType = typeConverter->convertType(op.result().getType());
@@ -105,7 +105,7 @@
LogicalResult matchAndRewrite(
IREE::HAL::BufferStoreOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- auto importType = importOp.getType();
+ auto importType = importOp.getFunctionType();
auto elementType = op.value().getType();
int32_t validByteWidth =
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertCommandBufferOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertCommandBufferOps.cpp
index 6ded056..7807747 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertCommandBufferOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertCommandBufferOps.cpp
@@ -29,7 +29,7 @@
LogicalResult matchAndRewrite(
IREE::HAL::CommandBufferFillBufferOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- auto importType = importOp.getType();
+ auto importType = importOp.getFunctionType();
SmallVector<Value, 8> callOperands = {
adaptor.command_buffer(),
@@ -84,7 +84,7 @@
LogicalResult matchAndRewrite(
IREE::HAL::CommandBufferPushDescriptorSetOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- auto importType = importOp.getType();
+ auto importType = importOp.getFunctionType();
SmallVector<Value, 8> callOperands = {
adaptor.command_buffer(),
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp
index 8752f9f..5094543 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp
@@ -77,7 +77,7 @@
auto executableFormatString = detail::rewriteAttrToOperands(
createOp.getLoc(), executableBinaryOp.formatAttr(),
- importOp.getType().getInput(1), rewriter);
+ importOp.getFunctionType().getInput(1), rewriter);
assert(executableFormatString.hasValue() &&
executableFormatString.getValue().size() == 1);
auto executableRodata =
@@ -103,7 +103,7 @@
};
callOperands.append(adaptor.layouts().begin(), adaptor.layouts().end());
- auto importType = importOp.getType();
+ auto importType = importOp.getFunctionType();
auto callOp = rewriter.replaceOpWithNewOp<IREE::VM::CallVariadicOp>(
createOp, SymbolRefAttr::get(importOp), importType.getResults(),
segmentSizes, importType.getInputs(), callOperands);
diff --git a/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStructuralOps.cpp b/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStructuralOps.cpp
index e18b1d0..5e1077f 100644
--- a/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStructuralOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStructuralOps.cpp
@@ -23,18 +23,19 @@
namespace iree_compiler {
namespace {
-class FuncOpSignatureConversion : public OpConversionPattern<mlir::FuncOp> {
+class FuncOpSignatureConversion
+ : public OpConversionPattern<mlir::func::FuncOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
- mlir::FuncOp funcOp, OpAdaptor adaptor,
+ mlir::func::FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
auto &typeConverter = *getTypeConverter();
// Convert the input signature types.
// TODO(benvanik): dynamic shapes by passing in tensor dynamic dims.
- auto originalType = funcOp.getType();
+ auto originalType = funcOp.getFunctionType();
TypeConverter::SignatureConversion newSignature(
originalType.getNumInputs());
for (auto argType : llvm::enumerate(originalType.getInputs())) {
@@ -185,10 +186,11 @@
// We need to rewrite certain types on operands/results so use the default
// dynamic legality checker to force any ops using such types to run through
// our patterns.
- conversionTarget.addDynamicallyLegalOp<mlir::FuncOp>([&](mlir::FuncOp op) {
- return typeConverter.isSignatureLegal(op.getType()) &&
- typeConverter.isLegal(&op.getBody());
- });
+ conversionTarget.addDynamicallyLegalOp<mlir::func::FuncOp>(
+ [&](mlir::func::FuncOp op) {
+ return typeConverter.isSignatureLegal(op.getFunctionType()) &&
+ typeConverter.isLegal(&op.getBody());
+ });
patterns
.insert<FuncOpSignatureConversion, CallOpConversion, ReturnOpConversion,
diff --git a/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp b/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp
index 7d99766..d782458 100644
--- a/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp
+++ b/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp
@@ -98,8 +98,9 @@
//===----------------------------------------------------------------------===//
// Verifies that all types used with the given entry point are supportable.
-static LogicalResult verifyEntryPointTypes(mlir::FuncOp entryFuncOp) {
- for (auto inputType : llvm::enumerate(entryFuncOp.getType().getInputs())) {
+static LogicalResult verifyEntryPointTypes(mlir::func::FuncOp entryFuncOp) {
+ for (auto inputType :
+ llvm::enumerate(entryFuncOp.getFunctionType().getInputs())) {
if (inputType.value().isa<IREE::Stream::BindingType>() ||
inputType.value().isInteger(32)) {
// OK - directly translates to a HAL interface binding.
@@ -131,8 +132,9 @@
}
// Converts the usage of the given primitive |arg| to interface methods.
-static void convertOperandUsage(mlir::FuncOp sourceFuncOp, BlockArgument arg,
- unsigned pushConstantIdx, OpBuilder &builder) {
+static void convertOperandUsage(mlir::func::FuncOp sourceFuncOp,
+ BlockArgument arg, unsigned pushConstantIdx,
+ OpBuilder &builder) {
auto alignmentAttr = sourceFuncOp.getArgAttrOfType<IntegerAttr>(
arg.getArgNumber(), "stream.alignment");
auto valuesAttr = sourceFuncOp.getArgAttrOfType<ArrayAttr>(arg.getArgNumber(),
@@ -145,7 +147,7 @@
// Converts the usage of the given !stream.binding |arg| to interface methods.
static void convertBindingUsage(
- mlir::FuncOp sourceFuncOp, BlockArgument arg,
+ mlir::func::FuncOp sourceFuncOp, BlockArgument arg,
IREE::HAL::DescriptorSetLayoutAttr setLayoutAttr,
IREE::HAL::DescriptorSetBindingAttr bindingAttr) {
if (arg.use_empty()) return; // no-op
@@ -166,8 +168,8 @@
// Clones |sourceFuncOp| and updates its signature to match the |interfaceOp|
// and use the HAL interface access primitives.
-static mlir::FuncOp cloneFuncWithInterface(
- mlir::FuncOp sourceFuncOp, const ExecutableLayout &executableLayout,
+static mlir::func::FuncOp cloneFuncWithInterface(
+ mlir::func::FuncOp sourceFuncOp, const ExecutableLayout &executableLayout,
IREE::HAL::ExecutableLayoutAttr layoutAttr) {
// Clone so that we can do a bunch of unsafe in-place updates.
auto clonedFuncOp = sourceFuncOp.clone();
@@ -231,7 +233,7 @@
sourceExecutableOp.body().getOps<IREE::Stream::ExecutableExportOp>()) {
int ordinal = nextOrdinal++;
auto sourceFuncOp =
- sourceExecutableOp.getInnerModule().lookupSymbol<mlir::FuncOp>(
+ sourceExecutableOp.getInnerModule().lookupSymbol<mlir::func::FuncOp>(
exportOp.function_ref());
if (failed(verifyEntryPointTypes(sourceFuncOp))) return failure();
@@ -296,7 +298,7 @@
LogicalResult matchAndRewrite(IREE::HAL::InterfaceWorkgroupSizeOp sizeOp,
PatternRewriter &rewriter) const override {
// Lookup the entry point matching the parent.
- auto funcOp = sizeOp->getParentOfType<mlir::FuncOp>();
+ auto funcOp = sizeOp->getParentOfType<mlir::func::FuncOp>();
auto variantOp = funcOp->getParentOfType<IREE::HAL::ExecutableVariantOp>();
auto entryPointOp = dyn_cast<IREE::HAL::ExecutableEntryPointOp>(
SymbolTable::lookupSymbolIn(variantOp, funcOp.getName()));
diff --git a/iree/compiler/Dialect/HAL/Transforms/PackDispatchOperands.cpp b/iree/compiler/Dialect/HAL/Transforms/PackDispatchOperands.cpp
index dc03c3c..1de050f 100644
--- a/iree/compiler/Dialect/HAL/Transforms/PackDispatchOperands.cpp
+++ b/iree/compiler/Dialect/HAL/Transforms/PackDispatchOperands.cpp
@@ -134,7 +134,7 @@
// that was applied to dispatch ops above.
//
// This is a mirror of updateDispatchOp; see that for more information.
-static void updateExportFuncOp(mlir::FuncOp funcOp) {
+static void updateExportFuncOp(mlir::func::FuncOp funcOp) {
assert(!funcOp.empty() && "can't have empty exported functions");
auto &entryBlock = funcOp.getBody().front();
auto builder = OpBuilder::atBlockBegin(&entryBlock);
@@ -225,8 +225,8 @@
}
if (newArgTypes.size() != funcOp.getNumArguments()) {
// Changed argument count; update signature.
- funcOp.setType(
- builder.getFunctionType(newArgTypes, funcOp.getType().getResults()));
+ funcOp.setType(builder.getFunctionType(
+ newArgTypes, funcOp.getFunctionType().getResults()));
funcOp.setAllArgAttrs(newArgAttrs);
}
@@ -289,8 +289,8 @@
newArgTypes.push_back(builder.getI32Type());
}
- funcOp.setType(
- builder.getFunctionType(newArgTypes, funcOp.getType().getResults()));
+ funcOp.setType(builder.getFunctionType(
+ newArgTypes, funcOp.getFunctionType().getResults()));
}
//===----------------------------------------------------------------------===//
@@ -319,7 +319,8 @@
// Convert all public function signatures and manipulate the arguments.
for (auto executableOp :
getOperation().getOps<IREE::Stream::ExecutableOp>()) {
- for (auto funcOp : executableOp.getInnerModule().getOps<mlir::FuncOp>()) {
+ for (auto funcOp :
+ executableOp.getInnerModule().getOps<mlir::func::FuncOp>()) {
if (funcOp.isPublic()) {
updateExportFuncOp(funcOp);
}
diff --git a/iree/compiler/Dialect/HAL/Transforms/Passes.cpp b/iree/compiler/Dialect/HAL/Transforms/Passes.cpp
index 13e3edb..34787c7 100644
--- a/iree/compiler/Dialect/HAL/Transforms/Passes.cpp
+++ b/iree/compiler/Dialect/HAL/Transforms/Passes.cpp
@@ -59,7 +59,7 @@
// redundant store-loads are removed.
passManager.addNestedPass<IREE::Util::InitializerOp>(
IREE::Util::createSimplifyGlobalAccessesPass());
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
IREE::Util::createSimplifyGlobalAccessesPass());
// Cleanup and canonicalization of util.global (and other util ops).
@@ -169,7 +169,8 @@
// better CSE/fold dispatch logic.
passManager.addNestedPass<IREE::Util::InitializerOp>(
createInlineDeviceSwitchesPass());
- passManager.addNestedPass<mlir::FuncOp>(createInlineDeviceSwitchesPass());
+ passManager.addNestedPass<mlir::func::FuncOp>(
+ createInlineDeviceSwitchesPass());
// Memoize device queries such that we don't need to repeatedly ask the same
// information at runtime.
@@ -180,14 +181,15 @@
// HACK: repeat dispatch ops for benchmarks.
if (benchmarkDispatchRepeatCount != 1) {
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
createBenchmarkBatchDispatchesPass(benchmarkDispatchRepeatCount));
}
// Elide redundant command buffer state ops created during conversion.
passManager.addNestedPass<IREE::Util::InitializerOp>(
createElideRedundantCommandsPass());
- passManager.addNestedPass<mlir::FuncOp>(createElideRedundantCommandsPass());
+ passManager.addNestedPass<mlir::func::FuncOp>(
+ createElideRedundantCommandsPass());
// Fixup workgroup count calculations that may have used the affine dialect.
// Kind of random here but can happen if the benchmarking code does things.
diff --git a/iree/compiler/Dialect/HAL/Transforms/Passes.h b/iree/compiler/Dialect/HAL/Transforms/Passes.h
index 8bcda15..3b47c2b 100644
--- a/iree/compiler/Dialect/HAL/Transforms/Passes.h
+++ b/iree/compiler/Dialect/HAL/Transforms/Passes.h
@@ -10,6 +10,7 @@
#include "iree/compiler/Dialect/HAL/IR/HALOps.h"
#include "iree/compiler/Dialect/HAL/Target/TargetBackend.h"
#include "llvm/ADT/StringMap.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
@@ -111,7 +112,7 @@
//===----------------------------------------------------------------------===//
// Performs packing and materializes runtime packing code when required.
-std::unique_ptr<OperationPass<FuncOp>> createPackAllocationsPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createPackAllocationsPass();
// Finds all resource lookups (such as hal.executable.lookup), materializes
// their cache storage and initialization, and rewrites the lookups to
@@ -121,14 +122,14 @@
// Eliminates redundant 'load's of variables within functions with no 'store'.
// TODO(#1124): replace with memory side effects once supported upstream.
-std::unique_ptr<OperationPass<FuncOp>> createCSEVariableLoadsPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createCSEVariableLoadsPass();
// Elides stateful command buffer ops that set redundant state.
std::unique_ptr<OperationPass<void>> createElideRedundantCommandsPass();
// Repeats dispatches `iree-hal-repeat-dispatch-num` times, which is 1 by
// default.
-std::unique_ptr<OperationPass<FuncOp>> createBenchmarkBatchDispatchesPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createBenchmarkBatchDispatchesPass(
unsigned repeatCount);
//===----------------------------------------------------------------------===//
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/elide_redundant_commands.mlir b/iree/compiler/Dialect/HAL/Transforms/test/elide_redundant_commands.mlir
index 3184ed3..68c08e2 100644
--- a/iree/compiler/Dialect/HAL/Transforms/test/elide_redundant_commands.mlir
+++ b/iree/compiler/Dialect/HAL/Transforms/test/elide_redundant_commands.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-hal-elide-redundant-commands)' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='func.func(iree-hal-elide-redundant-commands)' %s | FileCheck %s
// Tests that redundant barriers are elided but barriers gaurding ops are not.
diff --git a/iree/compiler/Dialect/HAL/Utils/InferCustomKernelsTargetInfoFromParent.cpp b/iree/compiler/Dialect/HAL/Utils/InferCustomKernelsTargetInfoFromParent.cpp
index 15d31b3..c72b0b9 100644
--- a/iree/compiler/Dialect/HAL/Utils/InferCustomKernelsTargetInfoFromParent.cpp
+++ b/iree/compiler/Dialect/HAL/Utils/InferCustomKernelsTargetInfoFromParent.cpp
@@ -14,7 +14,7 @@
namespace iree_compiler {
LogicalResult InferCustomKernelsTargetInfoFromParent(
- FuncOp entryPointFn, CustomKernelsTargetInfo &targetInfo) {
+ func::FuncOp entryPointFn, CustomKernelsTargetInfo &targetInfo) {
// Set the out-value to defaults early so that early returns produce
// consistent results and so that we can write simpler code below
// (for loop OR-ing booleans, assuming initial 'false' value).
diff --git a/iree/compiler/Dialect/HAL/Utils/InferCustomKernelsTargetInfoFromParent.h b/iree/compiler/Dialect/HAL/Utils/InferCustomKernelsTargetInfoFromParent.h
index e5f4aff..ecb415c 100644
--- a/iree/compiler/Dialect/HAL/Utils/InferCustomKernelsTargetInfoFromParent.h
+++ b/iree/compiler/Dialect/HAL/Utils/InferCustomKernelsTargetInfoFromParent.h
@@ -12,6 +12,7 @@
#include <cassert>
#include "iree/compiler/Utils/CustomKernelsTargetInfo.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Support/LogicalResult.h"
@@ -19,7 +20,7 @@
namespace iree_compiler {
LogicalResult InferCustomKernelsTargetInfoFromParent(
- FuncOp entryPointFn, CustomKernelsTargetInfo &targetInfo);
+ func::FuncOp entryPointFn, CustomKernelsTargetInfo &targetInfo);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp
index c3fd120..632483f 100644
--- a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp
+++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp
@@ -65,7 +65,7 @@
/// )
LogicalResult updateHALToVMVXEntryFuncOp(FuncOp funcOp,
TypeConverter &typeConverter) {
- auto originalType = funcOp.getType();
+ auto originalType = funcOp.getFunctionType();
if (originalType.getNumInputs() != 0 || originalType.getNumResults() != 0) {
return funcOp.emitError() << "exported functions must have no I/O";
}
@@ -118,7 +118,7 @@
}
// Get the argument to the function corresponding to the workgroup dim.
- auto workgroupDim = op->getParentOfType<mlir::FuncOp>().getArgument(
+ auto workgroupDim = op->getParentOfType<mlir::func::FuncOp>().getArgument(
kEntryArgWorkgroupX + dim);
rewriter.replaceOp(op, workgroupDim);
return success();
@@ -141,7 +141,7 @@
}
// Get the argument to the function corresponding to the workgroup dim.
- auto workgroupDim = op->getParentOfType<mlir::FuncOp>().getArgument(
+ auto workgroupDim = op->getParentOfType<mlir::func::FuncOp>().getArgument(
kEntryArgWorkgroupSizeX + dim);
rewriter.replaceOp(op, workgroupDim);
return success();
@@ -164,7 +164,7 @@
}
// Get the argument to the function corresponding to the workgroup dim.
- auto workgroupDim = op->getParentOfType<mlir::FuncOp>().getArgument(
+ auto workgroupDim = op->getParentOfType<mlir::func::FuncOp>().getArgument(
kEntryArgWorkgroupCountX + dim);
rewriter.replaceOp(op, workgroupDim);
return success();
@@ -181,8 +181,8 @@
IREE::HAL::InterfaceConstantLoadOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
// Find the vmvx.interface argument to the function.
- auto constantsArg =
- op->getParentOfType<mlir::FuncOp>().getArgument(kEntryArgConstants);
+ auto constantsArg = op->getParentOfType<mlir::func::FuncOp>().getArgument(
+ kEntryArgConstants);
assert(constantsArg && "entry point not conforming to requirements");
auto constantType =
constantsArg.getType().cast<MemRefType>().getElementType();
@@ -209,8 +209,8 @@
IREE::HAL::InterfaceBindingSubspanOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
// Find the vmvx.interface argument to the function.
- auto bindingsArg =
- op->getParentOfType<mlir::FuncOp>().getArgument(kEntryArgBindings);
+ auto bindingsArg = op->getParentOfType<mlir::func::FuncOp>().getArgument(
+ kEntryArgBindings);
assert(bindingsArg && bindingsArg.getType().isa<IREE::Util::ListType>() &&
"entry point not conforming to requirements");
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.h b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.h
index 8cfc8ed..246fb0f 100644
--- a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.h
+++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.h
@@ -7,6 +7,7 @@
#ifndef IREE_COMPILER_DIALECT_MODULES_VMVX_CONVERSION_HALTOVMVX_CONVERTHALTOVMVX_H_
#define IREE_COMPILER_DIALECT_MODULES_VMVX_CONVERSION_HALTOVMVX_CONVERTHALTOVMVX_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
@@ -15,7 +16,7 @@
// Converts a `() -> ()` function to the calling convention used by VMVX for
// passing in bindings, constants, and workgroup parameters.
-LogicalResult updateHALToVMVXEntryFuncOp(FuncOp funcOp,
+LogicalResult updateHALToVMVXEntryFuncOp(func::FuncOp funcOp,
TypeConverter &typeConverter);
// Populates conversion patterns from the IREE HAL dialect interface to the
diff --git a/iree/compiler/Dialect/Stream/Analysis/ResourceUsage.cpp b/iree/compiler/Dialect/Stream/Analysis/ResourceUsage.cpp
index f67d592..ba6600c 100644
--- a/iree/compiler/Dialect/Stream/Analysis/ResourceUsage.cpp
+++ b/iree/compiler/Dialect/Stream/Analysis/ResourceUsage.cpp
@@ -605,7 +605,7 @@
ResourceUsageAnalysis::ResourceUsageAnalysis(Operation *rootOp)
: explorer(rootOp, TraversalAction::SHALLOW), solver(explorer, allocator) {
explorer.setOpAction<IREE::Util::InitializerOp>(TraversalAction::RECURSE);
- explorer.setOpAction<mlir::FuncOp>(TraversalAction::RECURSE);
+ explorer.setOpAction<mlir::func::FuncOp>(TraversalAction::RECURSE);
explorer.setDialectAction<IREE::Stream::StreamDialect>(
TraversalAction::RECURSE);
// Ignore the contents of executables (linalg goo, etc).
diff --git a/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.cpp b/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.cpp
index e1cb883..830327f 100644
--- a/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.cpp
+++ b/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.cpp
@@ -391,7 +391,7 @@
// Update the entry point signatures in the module.
// Dispatch tensor arguments become bindings and all others are preserved as
// adaptor. Note that we only touch public (exported) functions.
- for (auto funcOp : moduleOp.getOps<mlir::FuncOp>()) {
+ for (auto funcOp : moduleOp.getOps<mlir::func::FuncOp>()) {
if (!funcOp.isPublic()) continue;
SmallVector<Type> newTypes;
diff --git a/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStructuralOps.cpp b/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStructuralOps.cpp
index 30e9024..0359101 100644
--- a/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStructuralOps.cpp
+++ b/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStructuralOps.cpp
@@ -22,16 +22,17 @@
namespace iree_compiler {
namespace {
-struct FuncOpSignatureConversion : public OpConversionPattern<mlir::FuncOp> {
+struct FuncOpSignatureConversion
+ : public OpConversionPattern<mlir::func::FuncOp> {
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
- mlir::FuncOp funcOp, OpAdaptor adaptor,
+ mlir::func::FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
auto &typeConverter = *getTypeConverter();
// Convert the input signature types.
// TODO(benvanik): dynamic shapes by passing in tensor dynamic dims.
- auto originalType = funcOp.getType();
+ auto originalType = funcOp.getFunctionType();
TypeConverter::SignatureConversion newSignature(
originalType.getNumInputs());
for (auto argType : llvm::enumerate(originalType.getInputs())) {
@@ -221,10 +222,11 @@
// dynamic legality checker to force any ops using such types to run through
// our patterns.
- conversionTarget.addDynamicallyLegalOp<mlir::FuncOp>([&](mlir::FuncOp op) {
- return typeConverter.isSignatureLegal(op.getType()) &&
- typeConverter.isLegal(&op.getBody());
- });
+ conversionTarget.addDynamicallyLegalOp<mlir::func::FuncOp>(
+ [&](mlir::func::FuncOp op) {
+ return typeConverter.isSignatureLegal(op.getFunctionType()) &&
+ typeConverter.isLegal(&op.getBody());
+ });
conversionTarget.addDynamicallyLegalOp<mlir::func::CallOp>(
[&](mlir::func::CallOp op) {
return llvm::all_of(
diff --git a/iree/compiler/Dialect/Stream/IR/StreamOps.cpp b/iree/compiler/Dialect/Stream/IR/StreamOps.cpp
index 35576d5..ec5e8a7 100644
--- a/iree/compiler/Dialect/Stream/IR/StreamOps.cpp
+++ b/iree/compiler/Dialect/Stream/IR/StreamOps.cpp
@@ -1748,7 +1748,8 @@
// This is sloppy because the function has interleaved bindings and operands;
// if we had our own op we could just reuse the map we have for operands.
// static
-SmallVector<unsigned> CmdDispatchOp::makeOperandToArgMap(mlir::FuncOp funcOp) {
+SmallVector<unsigned> CmdDispatchOp::makeOperandToArgMap(
+ mlir::func::FuncOp funcOp) {
unsigned operandCount = llvm::count_if(
funcOp.getArgumentTypes(),
[](Type type) { return !type.isa<IREE::Stream::BindingType>(); });
@@ -1765,7 +1766,8 @@
}
// static
-SmallVector<unsigned> CmdDispatchOp::makeResourceToArgMap(mlir::FuncOp funcOp) {
+SmallVector<unsigned> CmdDispatchOp::makeResourceToArgMap(
+ mlir::func::FuncOp funcOp) {
unsigned operandCount = llvm::count_if(
funcOp.getArgumentTypes(),
[](Type type) { return type.isa<IREE::Stream::BindingType>(); });
@@ -2023,11 +2025,11 @@
builder.getStringAttr(sym_name), function_ref);
}
-::mlir::FuncOp ExecutableExportOp::getFunctionRef() {
+::mlir::func::FuncOp ExecutableExportOp::getFunctionRef() {
auto executableOp =
this->getOperation()->getParentOfType<IREE::Stream::ExecutableOp>();
if (!executableOp) return {};
- return executableOp.getInnerModule().lookupSymbol<::mlir::FuncOp>(
+ return executableOp.getInnerModule().lookupSymbol<::mlir::func::FuncOp>(
function_ref());
}
diff --git a/iree/compiler/Dialect/Stream/IR/StreamOps.h b/iree/compiler/Dialect/Stream/IR/StreamOps.h
index f781525..8b17a0f 100644
--- a/iree/compiler/Dialect/Stream/IR/StreamOps.h
+++ b/iree/compiler/Dialect/Stream/IR/StreamOps.h
@@ -15,6 +15,7 @@
#include "iree/compiler/Dialect/Util/IR/UtilOps.h"
#include "iree/compiler/Dialect/Util/IR/UtilTraits.h"
#include "iree/compiler/Dialect/Util/IR/UtilTypes.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
diff --git a/iree/compiler/Dialect/Stream/IR/StreamOps.td b/iree/compiler/Dialect/Stream/IR/StreamOps.td
index a4d5c73..605f575 100644
--- a/iree/compiler/Dialect/Stream/IR/StreamOps.td
+++ b/iree/compiler/Dialect/Stream/IR/StreamOps.td
@@ -2328,9 +2328,9 @@
Value getResultSize(unsigned idx) { return {}; }
// Builds a map of operand index to argument index.
- static SmallVector<unsigned> makeOperandToArgMap(mlir::FuncOp funcOp);
+ static SmallVector<unsigned> makeOperandToArgMap(mlir::func::FuncOp funcOp);
// Builds a map of resource to argument index of the corresponding binding.
- static SmallVector<unsigned> makeResourceToArgMap(mlir::FuncOp funcOp);
+ static SmallVector<unsigned> makeResourceToArgMap(mlir::func::FuncOp funcOp);
}];
let hasVerifier = 1;
@@ -2805,7 +2805,7 @@
];
let extraClassDeclaration = [{
- ::mlir::FuncOp getFunctionRef();
+ ::mlir::func::FuncOp getFunctionRef();
}];
}
diff --git a/iree/compiler/Dialect/Stream/Transforms/AnnotateDispatchArguments.cpp b/iree/compiler/Dialect/Stream/Transforms/AnnotateDispatchArguments.cpp
index 08b65e9..05c3116 100644
--- a/iree/compiler/Dialect/Stream/Transforms/AnnotateDispatchArguments.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/AnnotateDispatchArguments.cpp
@@ -324,7 +324,7 @@
: explorer(rootOp, TraversalAction::SHALLOW),
solver(explorer, allocator) {
explorer.setOpAction<IREE::Util::InitializerOp>(TraversalAction::RECURSE);
- explorer.setOpAction<mlir::FuncOp>(TraversalAction::RECURSE);
+ explorer.setOpAction<mlir::func::FuncOp>(TraversalAction::RECURSE);
explorer.setDialectAction<IREE::Stream::StreamDialect>(
TraversalAction::RECURSE);
// Ignore the contents of executables (linalg goo, etc).
diff --git a/iree/compiler/Dialect/Stream/Transforms/DumpStatistics.cpp b/iree/compiler/Dialect/Stream/Transforms/DumpStatistics.cpp
index 10e4acc..0584291 100644
--- a/iree/compiler/Dialect/Stream/Transforms/DumpStatistics.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/DumpStatistics.cpp
@@ -43,7 +43,7 @@
// stream.executable ops mapped by name.
llvm::MapVector<StringRef, IREE::Stream::ExecutableOp> executableOps;
// stream.executable exported function -> dispatches to it.
- llvm::MapVector<mlir::FuncOp, SmallVector<IREE::Stream::CmdDispatchOp>>
+ llvm::MapVector<mlir::func::FuncOp, SmallVector<IREE::Stream::CmdDispatchOp>>
exportDispatchOps;
// TODO(benvanik): resource allocations.
diff --git a/iree/compiler/Dialect/Stream/Transforms/ElideAsyncCopies.cpp b/iree/compiler/Dialect/Stream/Transforms/ElideAsyncCopies.cpp
index 27e09e1..5331b93 100644
--- a/iree/compiler/Dialect/Stream/Transforms/ElideAsyncCopies.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/ElideAsyncCopies.cpp
@@ -290,7 +290,7 @@
: explorer(rootOp, TraversalAction::SHALLOW),
solver(explorer, allocator) {
explorer.setOpAction<IREE::Util::InitializerOp>(TraversalAction::RECURSE);
- explorer.setOpAction<mlir::FuncOp>(TraversalAction::RECURSE);
+ explorer.setOpAction<mlir::func::FuncOp>(TraversalAction::RECURSE);
explorer.setDialectAction<IREE::Stream::StreamDialect>(
TraversalAction::RECURSE);
// Ignore the contents of executables (linalg goo, etc).
diff --git a/iree/compiler/Dialect/Stream/Transforms/FoldUniformOperands.cpp b/iree/compiler/Dialect/Stream/Transforms/FoldUniformOperands.cpp
index b4ea95f..37e1b26 100644
--- a/iree/compiler/Dialect/Stream/Transforms/FoldUniformOperands.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/FoldUniformOperands.cpp
@@ -43,7 +43,7 @@
// stream.cmd.dispatch @foo(%0, %1 : index, index)
// + deduped arguments in the executable
static void deduplicateOperands(
- mlir::FuncOp funcOp,
+ mlir::func::FuncOp funcOp,
SmallVector<IREE::Stream::CmdDispatchOp> &dispatchOps) {
auto &entryBlock = funcOp.front();
auto anyDispatchOp = dispatchOps.front();
@@ -115,7 +115,7 @@
}
LLVM_DEBUG({
- llvm::dbgs() << "deduplicateOperands for " << funcOp.sym_name() << "\n";
+ llvm::dbgs() << "deduplicateOperands for " << funcOp.getSymName() << "\n";
llvm::dbgs() << " dead operands: ";
llvm::interleaveComma(deadOperandsMap.set_bits(), llvm::dbgs());
llvm::dbgs() << "\n";
@@ -165,7 +165,7 @@
// stream.cmd.dispatch @foo(%c101 : index)
// + inlined %c1 in the executable
static void inlineUniformConstants(
- mlir::FuncOp funcOp,
+ mlir::func::FuncOp funcOp,
SmallVector<IREE::Stream::CmdDispatchOp> &dispatchOps) {
auto &entryBlock = funcOp.front();
auto anyDispatchOp = dispatchOps.front();
@@ -204,7 +204,8 @@
}
LLVM_DEBUG({
- llvm::dbgs() << "inlineUniformConstants for " << funcOp.sym_name() << "\n";
+ llvm::dbgs() << "inlineUniformConstants for " << funcOp.getSymName()
+ << "\n";
for (unsigned i = 0; i < operandValues.size(); ++i) {
if (!operandValues[i].hasValue()) continue;
llvm::dbgs() << " operand " << i << " = " << operandValues[i].getValue()
diff --git a/iree/compiler/Dialect/Stream/Transforms/FuseDispatchBindings.cpp b/iree/compiler/Dialect/Stream/Transforms/FuseDispatchBindings.cpp
index e55e705..e7fd525 100644
--- a/iree/compiler/Dialect/Stream/Transforms/FuseDispatchBindings.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/FuseDispatchBindings.cpp
@@ -172,7 +172,7 @@
// Updates an executable function to use the new bindings.
static void updateExecutableSignature(IREE::Stream::ExecutableOp executableOp,
IREE::Stream::ExecutableExportOp exportOp,
- mlir::FuncOp funcOp,
+ mlir::func::FuncOp funcOp,
ArrayRef<Binding> bindings) {
auto &entryBlock = funcOp.front();
diff --git a/iree/compiler/Dialect/Stream/Transforms/Passes.cpp b/iree/compiler/Dialect/Stream/Transforms/Passes.cpp
index 7ecf05a..8cbbcad 100644
--- a/iree/compiler/Dialect/Stream/Transforms/Passes.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/Passes.cpp
@@ -31,7 +31,7 @@
// redundant store-loads are removed.
passManager.addNestedPass<IREE::Util::InitializerOp>(
IREE::Util::createSimplifyGlobalAccessesPass());
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
IREE::Util::createSimplifyGlobalAccessesPass());
// Cleanup and canonicalization of util.global (and other util ops).
@@ -109,7 +109,7 @@
// affinity/configuration assigned during placement.
passManager.addNestedPass<IREE::Util::InitializerOp>(
IREE::Stream::createEncodeHostTensorsPass());
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
IREE::Stream::createEncodeHostTensorsPass());
passManager.addNestedPass<IREE::Stream::ExecutableOp>(
IREE::Stream::createEncodeDeviceTensorsPass());
@@ -127,7 +127,7 @@
// (though it's critical enough that it is not optional).
passManager.addNestedPass<IREE::Util::InitializerOp>(
IREE::Stream::createMaterializeCopyOnWritePass());
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
IREE::Stream::createMaterializeCopyOnWritePass());
passManager.addPass(IREE::Stream::createElideAsyncCopiesPass());
@@ -144,13 +144,13 @@
// Combine async work into execution regions.
passManager.addNestedPass<IREE::Util::InitializerOp>(
IREE::Stream::createScheduleExecutionPass());
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
IREE::Stream::createScheduleExecutionPass());
// Group concurrently executable work into waves.
passManager.addNestedPass<IREE::Util::InitializerOp>(
IREE::Stream::createScheduleConcurrencyPass());
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
IREE::Stream::createScheduleConcurrencyPass());
// Materialize timepoints across the entire module. This simplifies scheduling
@@ -175,7 +175,7 @@
// lifetime allocations.
passManager.addNestedPass<IREE::Util::InitializerOp>(
IREE::Stream::createScheduleAllocationPass());
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
IREE::Stream::createScheduleAllocationPass());
// TODO(benvanik): passes to convert alloc to alloca and thread through
@@ -187,13 +187,13 @@
// buffers and upload logic.
passManager.addNestedPass<IREE::Util::InitializerOp>(
IREE::Stream::createPackConstantsPass());
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
IREE::Stream::createPackConstantsPass());
// Pack fused allocations based on lifetime.
passManager.addNestedPass<IREE::Util::InitializerOp>(
IREE::Stream::createPackAllocationsPass());
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
IREE::Stream::createPackAllocationsPass());
// Layout packed slices to emit the arithmetic required for all resource
@@ -201,7 +201,7 @@
// below.
passManager.addNestedPass<IREE::Util::InitializerOp>(
IREE::Stream::createLayoutSlicesPass());
- passManager.addNestedPass<mlir::FuncOp>(
+ passManager.addNestedPass<mlir::func::FuncOp>(
IREE::Stream::createLayoutSlicesPass());
// Propagate subviews throughout the program to unify resource storage access.
diff --git a/iree/compiler/Dialect/Stream/Transforms/PropagateSubviews.cpp b/iree/compiler/Dialect/Stream/Transforms/PropagateSubviews.cpp
index 24c15fd..8caba29 100644
--- a/iree/compiler/Dialect/Stream/Transforms/PropagateSubviews.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/PropagateSubviews.cpp
@@ -357,9 +357,9 @@
// ->
// func @foo(%0: !stream.resource, %sz: index, %o: index, %l: index) {
// %1 = stream.resource.subview %0[%o] : {%sz} -> {%l}
-static void expandFuncOp(mlir::FuncOp op, ExpandedGlobalMap &globalMap,
+static void expandFuncOp(mlir::func::FuncOp op, ExpandedGlobalMap &globalMap,
IndexSet &indexSet, SubviewMap &subviewMap) {
- auto oldType = op.getType();
+ auto oldType = op.getFunctionType();
auto inputTypes = expandTypes(oldType.getInputs());
auto resultTypes = expandTypes(oldType.getResults());
auto newType = FunctionType::get(op.getContext(), inputTypes, resultTypes);
@@ -484,7 +484,7 @@
expandGlobalStoreOp(storeOp, globalMap, indexSet, subviewMap);
} else if (auto initializerOp = dyn_cast<IREE::Util::InitializerOp>(op)) {
expandInitializerOp(initializerOp, globalMap, indexSet, subviewMap);
- } else if (auto funcOp = dyn_cast<mlir::FuncOp>(op)) {
+ } else if (auto funcOp = dyn_cast<mlir::func::FuncOp>(op)) {
expandFuncOp(funcOp, globalMap, indexSet, subviewMap);
} else if (auto callOp = dyn_cast<mlir::func::CallOp>(op)) {
expandCallOp(callOp, indexSet, subviewMap);
diff --git a/iree/compiler/Dialect/Stream/Transforms/PropagateTimepoints.cpp b/iree/compiler/Dialect/Stream/Transforms/PropagateTimepoints.cpp
index d2b0e55..78789fd 100644
--- a/iree/compiler/Dialect/Stream/Transforms/PropagateTimepoints.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/PropagateTimepoints.cpp
@@ -362,9 +362,9 @@
// ->
// func @foo(%t: !stream.timepoint, %0: !stream.resource) {
// %1 = stream.timepoint.await %t, %0
-static void expandFuncOp(mlir::FuncOp op, ExpandedGlobalMap &globalMap,
+static void expandFuncOp(mlir::func::FuncOp op, ExpandedGlobalMap &globalMap,
BlockAndValueMapping &resourceTimepointMap) {
- auto oldType = op.getType();
+ auto oldType = op.getFunctionType();
auto inputTypes = expandTypes(oldType.getInputs());
auto resultTypes = expandTypes(oldType.getResults());
auto newType = FunctionType::get(op.getContext(), inputTypes, resultTypes);
@@ -545,7 +545,7 @@
expandGlobalStoreOp(storeOp, globalMap, resourceTimepointMap);
} else if (auto initializerOp = dyn_cast<IREE::Util::InitializerOp>(op)) {
expandInitializerOp(initializerOp, globalMap, resourceTimepointMap);
- } else if (auto funcOp = dyn_cast<mlir::FuncOp>(op)) {
+ } else if (auto funcOp = dyn_cast<mlir::func::FuncOp>(op)) {
expandFuncOp(funcOp, globalMap, resourceTimepointMap);
} else if (auto callOp = dyn_cast<mlir::func::CallOp>(op)) {
expandCallOp(callOp, resourceTimepointMap);
diff --git a/iree/compiler/Dialect/Stream/Transforms/RefineUsage.cpp b/iree/compiler/Dialect/Stream/Transforms/RefineUsage.cpp
index d05c4cf..3d26f94 100644
--- a/iree/compiler/Dialect/Stream/Transforms/RefineUsage.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/RefineUsage.cpp
@@ -205,15 +205,15 @@
// Applies usage analysis results to an MLIR function.
// All resource arguments and results, block arguments, and nested operations
// will have their lifetime specified.
-struct ApplyFuncOp : public UsageRefinementPattern<mlir::FuncOp> {
- using UsageRefinementPattern<mlir::FuncOp>::UsageRefinementPattern;
- LogicalResult matchAndRewrite(mlir::FuncOp op,
+struct ApplyFuncOp : public UsageRefinementPattern<mlir::func::FuncOp> {
+ using UsageRefinementPattern<mlir::func::FuncOp>::UsageRefinementPattern;
+ LogicalResult matchAndRewrite(mlir::func::FuncOp op,
PatternRewriter &rewriter) const override {
bool didChange = false;
// Arguments:
SmallVector<Type> newInputs;
- for (auto inputType : llvm::enumerate(op.getType().getInputs())) {
+ for (auto inputType : llvm::enumerate(op.getFunctionType().getInputs())) {
auto oldType = inputType.value().dyn_cast<IREE::Stream::ResourceType>();
if (!oldType) {
newInputs.push_back(inputType.value());
@@ -232,7 +232,7 @@
// Results:
SmallVector<Type> newOutputs;
auto anyReturnOp = *op.getOps<mlir::func::ReturnOp>().begin();
- for (auto outputType : llvm::enumerate(op.getType().getResults())) {
+ for (auto outputType : llvm::enumerate(op.getFunctionType().getResults())) {
auto oldType = outputType.value().dyn_cast<IREE::Stream::ResourceType>();
if (!oldType) {
newOutputs.push_back(outputType.value());
@@ -248,7 +248,7 @@
}
}
auto newFuncType = rewriter.getFunctionType(newInputs, newOutputs);
- if (op.getType() != newFuncType) {
+ if (op.getFunctionType() != newFuncType) {
op.setType(newFuncType);
didChange = true;
}
diff --git a/iree/compiler/Dialect/Stream/Transforms/SpecializeDispatches.cpp b/iree/compiler/Dialect/Stream/Transforms/SpecializeDispatches.cpp
index 75bc1bf..abfcad4 100644
--- a/iree/compiler/Dialect/Stream/Transforms/SpecializeDispatches.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/SpecializeDispatches.cpp
@@ -55,7 +55,7 @@
// Each dispatch gets a row in the table that can be selected based on the
// dispatch ordinal.
static ConstantTable buildConstantTable(
- mlir::FuncOp funcOp,
+ mlir::func::FuncOp funcOp,
SmallVector<IREE::Stream::CmdDispatchOp> &dispatchOps) {
auto anyDispatchOp = dispatchOps.front();
unsigned operandCount = anyDispatchOp.operands().size();
@@ -161,7 +161,7 @@
//
// TODO(benvanik): maybe a dedicated lookup table op to make further combining
// easier to do in a backend-generic way.
-static void insertConstantTableLookup(mlir::FuncOp funcOp,
+static void insertConstantTableLookup(mlir::func::FuncOp funcOp,
ConstantTable &constantTable) {
auto &entryBlock = funcOp.front();
auto operandToArgMap =
diff --git a/iree/compiler/Dialect/Stream/Transforms/test/fold_globals.mlir b/iree/compiler/Dialect/Stream/Transforms/test/fold_globals.mlir
index b58d5ba..fefaa46 100644
--- a/iree/compiler/Dialect/Stream/Transforms/test/fold_globals.mlir
+++ b/iree/compiler/Dialect/Stream/Transforms/test/fold_globals.mlir
@@ -6,13 +6,13 @@
// CHECK: util.global public mutable @uniformConstants = #stream.timepoint<immediate>
util.global public mutable @uniformConstants : !stream.timepoint
-builtin.func @foo() {
+func.func @foo() {
%timepoint = stream.timepoint.immediate => !stream.timepoint
// CHECK-NOT: util.global.store
util.global.store %timepoint, @uniformConstants : !stream.timepoint
return
}
-builtin.func @bar() {
+func.func @bar() {
%timepoint = stream.timepoint.immediate => !stream.timepoint
// CHECK-NOT: util.global.store
util.global.store %timepoint, @uniformConstants : !stream.timepoint
@@ -23,7 +23,7 @@
// CHECK-NOT: @immutable
util.global private @immutable = #stream.timepoint<immediate> : !stream.timepoint
-builtin.func @foo() -> !stream.timepoint {
+func.func @foo() -> !stream.timepoint {
// CHECK-NOT: util.global.load @immutable
// CHECK: %[[IMMEDIATE:.+]] = stream.timepoint.immediate => !stream.timepoint
%0 = util.global.load @immutable : !stream.timepoint
diff --git a/iree/compiler/Dialect/Stream/Transforms/test/layout_slices.mlir b/iree/compiler/Dialect/Stream/Transforms/test/layout_slices.mlir
index 26db0ee..0b8bb31 100644
--- a/iree/compiler/Dialect/Stream/Transforms/test/layout_slices.mlir
+++ b/iree/compiler/Dialect/Stream/Transforms/test/layout_slices.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-stream-layout-slices)' -cse %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='func.func(iree-stream-layout-slices)' -cse %s | FileCheck %s
#layoutStaticConfig = #stream.resource_config<{
max_allocation_size = 1073741824,
diff --git a/iree/compiler/Dialect/Stream/Transforms/test/materialize_copy_on_write.mlir b/iree/compiler/Dialect/Stream/Transforms/test/materialize_copy_on_write.mlir
index 12e02d8..3242934 100644
--- a/iree/compiler/Dialect/Stream/Transforms/test/materialize_copy_on_write.mlir
+++ b/iree/compiler/Dialect/Stream/Transforms/test/materialize_copy_on_write.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-stream-materialize-copy-on-write)' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='func.func(iree-stream-materialize-copy-on-write)' %s | FileCheck %s
// Tests that block arguments (including function arguments) are always cloned.
// Until a whole-program analysis runs we don't know their semantics.
diff --git a/iree/compiler/Dialect/Stream/Transforms/test/pack_allocations.mlir b/iree/compiler/Dialect/Stream/Transforms/test/pack_allocations.mlir
index c14d3b9..8e2550d 100644
--- a/iree/compiler/Dialect/Stream/Transforms/test/pack_allocations.mlir
+++ b/iree/compiler/Dialect/Stream/Transforms/test/pack_allocations.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-stream-pack-allocations)' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='func.func(iree-stream-pack-allocations)' %s | FileCheck %s
// CHECK-LABEL: @packAllocations
// CHECK-SAME: (%[[SIZE_A:.+]]: index, %[[SIZE_B:.+]]: index)
diff --git a/iree/compiler/Dialect/Stream/Transforms/test/pack_constants.mlir b/iree/compiler/Dialect/Stream/Transforms/test/pack_constants.mlir
index 6c91d54..e0a77f9 100644
--- a/iree/compiler/Dialect/Stream/Transforms/test/pack_constants.mlir
+++ b/iree/compiler/Dialect/Stream/Transforms/test/pack_constants.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-stream-pack-constants)' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='func.func(iree-stream-pack-constants)' %s | FileCheck %s
// This is a high level test of the structure emitted by the pass.
// Subsequent tests focus on individual components.
diff --git a/iree/compiler/Dialect/Stream/Transforms/test/schedule_allocation.mlir b/iree/compiler/Dialect/Stream/Transforms/test/schedule_allocation.mlir
index 00c39f9..32592ee 100644
--- a/iree/compiler/Dialect/Stream/Transforms/test/schedule_allocation.mlir
+++ b/iree/compiler/Dialect/Stream/Transforms/test/schedule_allocation.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-stream-schedule-allocation)' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='func.func(iree-stream-schedule-allocation)' %s | FileCheck %s
// Tests that async constant ops get extracted into a dedicated constant op
// outside of the execution region. This allows us to handle them in various
diff --git a/iree/compiler/Dialect/Stream/Transforms/test/schedule_concurrency.mlir b/iree/compiler/Dialect/Stream/Transforms/test/schedule_concurrency.mlir
index 794d510..60a9383 100644
--- a/iree/compiler/Dialect/Stream/Transforms/test/schedule_concurrency.mlir
+++ b/iree/compiler/Dialect/Stream/Transforms/test/schedule_concurrency.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline="builtin.func(iree-stream-schedule-concurrency)" %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline="func.func(iree-stream-schedule-concurrency)" %s | FileCheck %s
// Tests that when favor=min-peak-memory we assume ops are in an order that
// reduces live memory ranges and only optimistically put them in concurrency
diff --git a/iree/compiler/Dialect/Stream/Transforms/test/schedule_execution.mlir b/iree/compiler/Dialect/Stream/Transforms/test/schedule_execution.mlir
index 8524578..02a264b 100644
--- a/iree/compiler/Dialect/Stream/Transforms/test/schedule_execution.mlir
+++ b/iree/compiler/Dialect/Stream/Transforms/test/schedule_execution.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline="builtin.func(iree-stream-schedule-execution)" %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline="func.func(iree-stream-schedule-execution)" %s | FileCheck %s
// Tests basic partitioning of multiple ops.
diff --git a/iree/compiler/Dialect/Util/IR/UtilDialect.cpp b/iree/compiler/Dialect/Util/IR/UtilDialect.cpp
index ea39e8b..db3fd35 100644
--- a/iree/compiler/Dialect/Util/IR/UtilDialect.cpp
+++ b/iree/compiler/Dialect/Util/IR/UtilDialect.cpp
@@ -16,6 +16,7 @@
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/DialectImplementation.h"
+#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
@@ -159,14 +160,14 @@
OpTy> {};
template <typename OpTy>
- static void add(DialectRegistry ®istry) {
- registry.addOpInterface<OpTy, ExternalModel<OpTy>>();
+ static void add(MLIRContext *ctx) {
+ OpTy::template attachInterface<ExternalModel<OpTy>>(*ctx);
}
template <typename OpTy1, typename OpTy2, typename... More>
- static void add(DialectRegistry ®istry) {
- add<OpTy1>(registry);
- add<OpTy2, More...>(registry);
+ static void add(MLIRContext *ctx) {
+ add<OpTy1>(ctx);
+ add<OpTy2, More...>(ctx);
}
};
@@ -176,10 +177,13 @@
// Must ensure that any dependent dialects are registered.
registry.insert<arith::ArithmeticDialect>();
- GenericNumericCastExternalModel::add<
- arith::BitcastOp, arith::ExtFOp, arith::ExtUIOp, arith::ExtSIOp,
- arith::FPToSIOp, arith::FPToUIOp, arith::IndexCastOp, arith::TruncFOp,
- arith::TruncIOp, arith::SIToFPOp, arith::UIToFPOp>(registry);
+ registry.addExtension(+[](MLIRContext *ctx,
+ arith::ArithmeticDialect *dialect) {
+ GenericNumericCastExternalModel::add<
+ arith::BitcastOp, arith::ExtFOp, arith::ExtUIOp, arith::ExtSIOp,
+ arith::FPToSIOp, arith::FPToUIOp, arith::IndexCastOp, arith::TruncFOp,
+ arith::TruncIOp, arith::SIToFPOp, arith::UIToFPOp>(ctx);
+ });
}
} // namespace Util
diff --git a/iree/compiler/Dialect/Util/IR/UtilOps.cpp b/iree/compiler/Dialect/Util/IR/UtilOps.cpp
index 45211fd..31e5233 100644
--- a/iree/compiler/Dialect/Util/IR/UtilOps.cpp
+++ b/iree/compiler/Dialect/Util/IR/UtilOps.cpp
@@ -688,15 +688,15 @@
void InitializerOp::build(OpBuilder &builder, OperationState &result,
ArrayRef<NamedAttribute> attrs) {
- result.addAttribute(
- "type", TypeAttr::get(FunctionType::get(builder.getContext(), {}, {})));
+ result.addAttribute("function_type", TypeAttr::get(FunctionType::get(
+ builder.getContext(), {}, {})));
result.addRegion();
result.attributes.append(attrs.begin(), attrs.end());
}
ParseResult InitializerOp::parse(OpAsmParser &parser, OperationState &result) {
- result.addAttribute(
- "type", TypeAttr::get(FunctionType::get(result.getContext(), {}, {})));
+ result.addAttribute("function_type", TypeAttr::get(FunctionType::get(
+ result.getContext(), {}, {})));
if (parser.parseOptionalAttrDictWithKeyword(result.attributes)) {
return failure();
}
@@ -709,7 +709,8 @@
void InitializerOp::print(OpAsmPrinter &p) {
Operation *op = getOperation();
- p.printOptionalAttrDictWithKeyword(op->getAttrs(), /*elidedAttrs=*/{"type"});
+ p.printOptionalAttrDictWithKeyword(op->getAttrs(),
+ /*elidedAttrs=*/{"function_type"});
p << " ";
p.printRegion(body());
}
diff --git a/iree/compiler/Dialect/Util/IR/UtilOps.h b/iree/compiler/Dialect/Util/IR/UtilOps.h
index f9c1f0f..0793c49 100644
--- a/iree/compiler/Dialect/Util/IR/UtilOps.h
+++ b/iree/compiler/Dialect/Util/IR/UtilOps.h
@@ -12,9 +12,11 @@
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
+#include "mlir/IR/FunctionInterfaces.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/SymbolTable.h"
+#include "mlir/Interfaces/CallInterfaces.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/InferTypeOpInterface.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
diff --git a/iree/compiler/Dialect/Util/IR/UtilOps.td b/iree/compiler/Dialect/Util/IR/UtilOps.td
index 08f6782..67a2185 100644
--- a/iree/compiler/Dialect/Util/IR/UtilOps.td
+++ b/iree/compiler/Dialect/Util/IR/UtilOps.td
@@ -377,7 +377,7 @@
}];
let arguments = (ins
- TypeAttr:$type
+ TypeAttr:$function_type
);
let regions = (region AnyRegion:$body);
@@ -395,8 +395,10 @@
Block *addEntryBlock();
Block *addBlock();
- FunctionType getType() {
- return getTypeAttr().getValue().cast<FunctionType>();
+ FunctionType getFunctionType() {
+ return getOperation()->getAttrOfType<TypeAttr>(getTypeAttrName())
+ .getValue()
+ .cast<FunctionType>();
}
/// Returns the argument types of this function.
diff --git a/iree/compiler/Dialect/Util/Transforms/test/combine_initializers.mlir b/iree/compiler/Dialect/Util/Transforms/test/combine_initializers.mlir
index b61a022..53fada5 100644
--- a/iree/compiler/Dialect/Util/Transforms/test/combine_initializers.mlir
+++ b/iree/compiler/Dialect/Util/Transforms/test/combine_initializers.mlir
@@ -2,7 +2,7 @@
// Tests that multiple initializers are combined in their module order.
-builtin.func private @extern() -> index
+func.func private @extern() -> index
// CHECK: util.global private mutable @global0 : index
util.global private mutable @global0 : index
@@ -32,7 +32,7 @@
// CHECK-NEXT: util.initializer.return
// CHECK-LABEL: @orderedCombining
-builtin.func @orderedCombining(%arg0: index) -> (index, index, index) {
+func.func @orderedCombining(%arg0: index) -> (index, index, index) {
util.global.store %arg0, @global0 : index
%value0 = util.global.load @global0 : index
%value1 = util.global.load @global1 : index
diff --git a/iree/compiler/Dialect/Util/Transforms/test/fold_globals.mlir b/iree/compiler/Dialect/Util/Transforms/test/fold_globals.mlir
index fd1441a..c86c8d6 100644
--- a/iree/compiler/Dialect/Util/Transforms/test/fold_globals.mlir
+++ b/iree/compiler/Dialect/Util/Transforms/test/fold_globals.mlir
@@ -2,13 +2,13 @@
// CHECK: util.global public mutable @uniformConstants = 5 : index
util.global public mutable @uniformConstants : index
-builtin.func @foo() {
+func.func @foo() {
%c5 = arith.constant 5 : index
// CHECK-NOT: util.global.store %c5, @uniformConstants : index
util.global.store %c5, @uniformConstants : index
return
}
-builtin.func @bar() {
+func.func @bar() {
%c5 = arith.constant 5 : index
// CHECK-NOT: util.global.store %c5, @uniformConstants : index
util.global.store %c5, @uniformConstants : index
@@ -19,13 +19,13 @@
// CHECK: util.global public mutable @nonuniformConstants : index
util.global public mutable @nonuniformConstants : index
-builtin.func @foo() {
+func.func @foo() {
%c5 = arith.constant 5 : index
// CHECK: util.global.store %c5, @nonuniformConstants : index
util.global.store %c5, @nonuniformConstants : index
return
}
-builtin.func @bar() {
+func.func @bar() {
%c6 = arith.constant 6 : index
// CHECK: util.global.store %c6, @nonuniformConstants : index
util.global.store %c6, @nonuniformConstants : index
@@ -38,7 +38,7 @@
util.global private mutable @chained0 : index
// CHECK-NOT: util.global private mutable @chained1 : index
util.global private mutable @chained1 : index
-builtin.func @foo() -> index {
+func.func @foo() -> index {
// CHECK: %[[VALUE:.+]] = util.global.load @chained0 : index
%0 = util.global.load @chained0 : index
// CHECK-NOT: util.global.store
@@ -53,14 +53,14 @@
util.global public mutable @unchained0 : index
// CHECK: util.global public mutable @unchained1 : index
util.global public mutable @unchained1 : index
-builtin.func @foo() {
+func.func @foo() {
// CHECK: %[[VALUE:.+]] = util.global.load @unchained0 : index
%0 = util.global.load @unchained0 : index
// CHECK: util.global.store %[[VALUE]], @unchained1 : index
util.global.store %0, @unchained1 : index
return
}
-builtin.func @bar(%arg0: index) {
+func.func @bar(%arg0: index) {
// CHECK: util.global.store %arg0, @unchained1 : index
util.global.store %arg0, @unchained1 : index
return
@@ -83,7 +83,7 @@
util.global.store %c6, @immutable1 : index
util.initializer.return
}
-builtin.func @foo(%arg0: index) -> (index, index, index) {
+func.func @foo(%arg0: index) -> (index, index, index) {
// CHECK-DAG: %[[C5:.+]] = arith.constant 5
%0 = util.global.load @immutable0 : index
// CHECK-DAG: %[[C6:.+]] = arith.constant 6
@@ -102,7 +102,7 @@
util.global private mutable @used0 = 5 : index
// CHECK: util.global private mutable @used1 : index
util.global private mutable @used1 : index
-builtin.func @foo(%arg0: index, %arg1: index) -> (index, index) {
+func.func @foo(%arg0: index, %arg1: index) -> (index, index) {
// CHECK: %[[VALUE0:.+]] = util.global.load @used0 : index
%0 = util.global.load @used0 : index
// CHECK: %[[VALUE1:.+]] = util.global.load @used1 : index
@@ -134,7 +134,7 @@
util.global private @dupeCst0 {noinline} = 5 : index
// CHECK-NOT: util.global private @dupeCst1
util.global private @dupeCst1 {noinline} = 5 : index
-builtin.func @foo() -> (index, index) {
+func.func @foo() -> (index, index) {
// CHECK-DAG: %[[VALUE0:.+]] = util.global.load @dupeCst0
%0 = util.global.load @dupeCst0 : index
// CHECK-DAG: %[[VALUE1:.+]] = util.global.load @dupeCst0
@@ -155,7 +155,7 @@
util.global.store %c7, @nondupeCst1 : index
util.initializer.return
}
-builtin.func @foo() -> (index, index) {
+func.func @foo() -> (index, index) {
// CHECK-DAG: %[[C6:.+]] = arith.constant 6 : index
%0 = util.global.load @nondupeCst0 : index
// CHECK-DAG: %[[C7:.+]] = arith.constant 7 : index
diff --git a/iree/compiler/Dialect/Util/Transforms/test/fuse_globals.mlir b/iree/compiler/Dialect/Util/Transforms/test/fuse_globals.mlir
index e3e4767..2fff5b5 100644
--- a/iree/compiler/Dialect/Util/Transforms/test/fuse_globals.mlir
+++ b/iree/compiler/Dialect/Util/Transforms/test/fuse_globals.mlir
@@ -3,7 +3,7 @@
// CHECK: util.global private mutable @fusable0 : index
util.global private mutable @fusable0 : index
util.global private mutable @fusable1 : index
-builtin.func @foo(%arg0: index) -> (index, index) {
+func.func @foo(%arg0: index) -> (index, index) {
// CHECK: util.global.store %arg0, @fusable0
util.global.store %arg0, @fusable0 : index
// CHECK-NOT: util.global.store %arg0, @fusable1
@@ -24,7 +24,7 @@
util.global private mutable @unfusable0 : index
// CHECK: util.global private mutable @unfusable1 : index
util.global private mutable @unfusable1 : index
-builtin.func @foo(%arg0: index) -> (index, index) {
+func.func @foo(%arg0: index) -> (index, index) {
// CHECK: util.global.store %arg0, @unfusable0 : index
util.global.store %arg0, @unfusable0 : index
// CHECK: util.global.store %arg0, @unfusable1 : index
@@ -36,7 +36,7 @@
// CHECK: return %[[VALUE0]], %[[VALUE1]]
return %0, %1 : index, index
}
-builtin.func @bar(%arg0: index) {
+func.func @bar(%arg0: index) {
util.global.store %arg0, @unfusable0 : index
return
}
@@ -54,7 +54,7 @@
util.global private mutable @unfusableInit0 = 5 : index
// CHECK: util.global private mutable @unfusableInit1 = 6 : index
util.global private mutable @unfusableInit1 = 6 : index
-builtin.func @foo(%arg0: index) -> (index, index) {
+func.func @foo(%arg0: index) -> (index, index) {
// CHECK: util.global.store %arg0, @unfusableInit0
util.global.store %arg0, @unfusableInit0 : index
// CHECK: util.global.store %arg0, @unfusableInit1
@@ -73,12 +73,12 @@
util.global private mutable @unfusableDivergent0 : index
// CHECK: util.global private mutable @unfusableDivergent1
util.global private mutable @unfusableDivergent1 : index
-builtin.func @fn_a(%arg0: index) {
+func.func @fn_a(%arg0: index) {
util.global.store %arg0, @unfusableDivergent0 : index
util.global.store %arg0, @unfusableDivergent1 : index
return
}
-builtin.func @fn_b(%arg0: index) {
+func.func @fn_b(%arg0: index) {
util.global.store %arg0, @unfusableDivergent0 : index
return
}
diff --git a/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals.mlir b/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals.mlir
index 002c0b6..567890a 100644
--- a/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals.mlir
+++ b/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals.mlir
@@ -4,7 +4,7 @@
module @hoist_simple_const_expr {
// CHECK: util.global private @[[HOISTED_SYM:.*]] : i32
// CHECK: func @main
- builtin.func @main() -> (i32) {
+ func.func @main() -> (i32) {
%0 = arith.constant 0 : i32
%1 = arith.constant 1 : i32
// CHECK-NOT: arith.constant
@@ -33,7 +33,7 @@
// CHECK: return %[[VAL]]
// CHECK-NOT: util.initializer
module @do_not_hoist_variable_op {
- builtin.func @main() -> (i32) {
+ func.func @main() -> (i32) {
%0 = arith.constant 0 : i32
%1 = arith.constant 1 : i32
%2 = "iree_unregistered.var_expr"(%0, %1) : (i32, i32) -> i32
@@ -46,7 +46,7 @@
// CHECK-NOT: util.global
// CHECK-NOT: util.initializer
module @do_not_hoist_variable_operands {
- builtin.func @main(%arg0 : i32) -> (i32) {
+ func.func @main(%arg0 : i32) -> (i32) {
%0 = arith.constant 0 : i32
%2 = "iree_unregistered.const_expr"(%0, %arg0) : (i32, i32) -> i32
return %2 : i32
@@ -58,7 +58,7 @@
// CHECK-NOT: util.global
// CHECK-NOT: util.initializer
module @do_not_hoist_sub_byte_aligned_scalar_leaf {
- builtin.func @main() -> (i32) {
+ func.func @main() -> (i32) {
%0 = arith.constant 1 : i1
%2 = "iree_unregistered.var_expr"(%0) : (i1) -> i32
return %2 : i32
@@ -70,7 +70,7 @@
// CHECK-NOT: util.global
// CHECK-NOT: util.initializer
module @do_not_hoist_sub_byte_aligned_tensor_leaf {
- builtin.func @main() -> (i32) {
+ func.func @main() -> (i32) {
%0 = arith.constant dense<true> : tensor<i1>
%2 = "iree_unregistered.var_expr"(%0) : (tensor<i1>) -> i32
return %2 : i32
@@ -83,7 +83,7 @@
// Can hoist a const-expr tree that transitively includes sub-byte aligned
// values.
module @hoist_sub_byte_aligned_scalar_transitive {
- builtin.func @main() -> (i32) {
+ func.func @main() -> (i32) {
%0 = arith.constant 1 : i1
%2 = "iree_unregistered.const_expr"(%0) : (i1) -> i32
return %2 : i32
@@ -96,7 +96,7 @@
// Can hoist a const-expr tree that transitively includes sub-byte aligned
// values.
module @hoist_sub_byte_aligned_tensor_transitive {
- builtin.func @main() -> (i32) {
+ func.func @main() -> (i32) {
%0 = arith.constant dense<true> : tensor<i1>
%2 = "iree_unregistered.const_expr"(%0) : (tensor<i1>) -> i32
return %2 : i32
@@ -113,7 +113,7 @@
util.global private @latent_global : i32
// CHECK: func @main
- builtin.func @main() -> (i32, i32, i32) {
+ func.func @main() -> (i32, i32, i32) {
// CHECK-DAG: %[[LOAD_HOISTED_0:.*]] = util.global.load @[[HOISTED_0]] : i32
// CHECK-DAG: %[[LOAD_HOISTED_1:.*]] = util.global.load @[[HOISTED_1]] : i32
// CHECK-DAG: %[[RESULT:.*]] = "iree_unregistered.var_expr"(%[[LOAD_HOISTED_1]])
@@ -150,7 +150,7 @@
module @hoist_non_leaf_const_expr {
// CHECK: util.global private @[[HOISTED:.*]] : i32
// CHECK: func @main
- builtin.func @main() -> (i32) {
+ func.func @main() -> (i32) {
// CHECK: %[[LOAD_HOISTED:.*]] = util.global.load @[[HOISTED]] : i32
// CHECK: %[[RESULT:.*]] = "iree_unregistered.non_leaf_const_expr"(%hoisted)
// CHECK: return %[[RESULT]]
@@ -176,7 +176,7 @@
module @hoist_implicit_capture {
// CHECK: util.global private @[[HOISTED_SYM:.*]] : i32
// CHECK: func @main
- builtin.func @main() -> (i32) {
+ func.func @main() -> (i32) {
%0 = arith.constant 0 : i32
%1 = arith.constant 1 : i32
// CHECK-NOT: arith.constant
diff --git a/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals_linalg.mlir b/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals_linalg.mlir
index 871ac46..ef5ae65 100644
--- a/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals_linalg.mlir
+++ b/iree/compiler/Dialect/Util/Transforms/test/hoist_into_globals_linalg.mlir
@@ -7,7 +7,7 @@
module @compute_hoisted {
// CHECK: util.global private @[[HOISTED:.*]] : tensor<5x6xf32>
// CHECK: func @main
- builtin.func @main() -> (tensor<5x6xf32>) {
+ func.func @main() -> (tensor<5x6xf32>) {
%cst_0 = arith.constant dense<1.270000e+02> : tensor<f32>
// A non-leaf broadcast.
@@ -42,7 +42,7 @@
module @broadcast_treated_as_leaf {
// CHECK-NOT: util.global
// CHECK: func @main
- builtin.func @main() -> (tensor<5x6xf32>) {
+ func.func @main() -> (tensor<5x6xf32>) {
%cst_0 = arith.constant dense<1.270000e+02> : tensor<f32>
// CHECK: linalg.init_tensor
%0 = linalg.init_tensor [5, 6] : tensor<5x6xf32>
diff --git a/iree/compiler/Dialect/Util/Transforms/test/simplify_global_accesses.mlir b/iree/compiler/Dialect/Util/Transforms/test/simplify_global_accesses.mlir
index bca8c4a..59bed47 100644
--- a/iree/compiler/Dialect/Util/Transforms/test/simplify_global_accesses.mlir
+++ b/iree/compiler/Dialect/Util/Transforms/test/simplify_global_accesses.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-util-simplify-global-accesses)' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='func.func(iree-util-simplify-global-accesses)' %s | FileCheck %s
util.global private @varA = dense<1> : tensor<2xi32>
util.global private @varB = dense<3> : tensor<2x4xi32>
diff --git a/iree/compiler/Dialect/Util/Transforms/test/strip_debug_ops.mlir b/iree/compiler/Dialect/Util/Transforms/test/strip_debug_ops.mlir
index d4c518c..e6a1245 100644
--- a/iree/compiler/Dialect/Util/Transforms/test/strip_debug_ops.mlir
+++ b/iree/compiler/Dialect/Util/Transforms/test/strip_debug_ops.mlir
@@ -1,4 +1,4 @@
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-util-strip-debug-ops)' %s | FileCheck %s
+// RUN: iree-opt -split-input-file -pass-pipeline='func.func(iree-util-strip-debug-ops)' %s | FileCheck %s
// CHECK-LABEL: @stripAssert
func @stripAssert(%cond: i1) {
diff --git a/iree/compiler/Dialect/VM/Conversion/ImportUtils.h b/iree/compiler/Dialect/VM/Conversion/ImportUtils.h
index bf2e057..cf2440b 100644
--- a/iree/compiler/Dialect/VM/Conversion/ImportUtils.h
+++ b/iree/compiler/Dialect/VM/Conversion/ImportUtils.h
@@ -57,7 +57,7 @@
state.addAttributes(llvm::to_vector<4>(operation->getDialectAttrs()));
state.addAttribute("callee", SymbolRefAttr::get(importOp));
- auto importType = importOp.getType();
+ auto importType = importOp.getFunctionType();
for (auto resultType : operation->getResultTypes()) {
if (failed(typeConverter.convertType(resultType, state.types))) {
return None;
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp
index 8f07600..aa2c873 100644
--- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp
@@ -69,7 +69,7 @@
LogicalResult matchAndRewrite(
FuncOp srcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- FunctionType srcFuncType = srcOp.getType();
+ FunctionType srcFuncType = srcOp.getFunctionType();
TypeConverter::SignatureConversion signatureConversion(
srcOp.getNumArguments());
diff --git a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp
index f3cc426..bab0054 100644
--- a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp
@@ -135,7 +135,7 @@
auto moduleOp = funcOp.getOperation()->getParentOfType<IREE::VM::ModuleOp>();
- FunctionType funcType = funcOp.getType();
+ FunctionType funcType = funcOp.getFunctionType();
std::string name =
std::string(moduleOp.getName()) + "_" + std::string(funcOp.getName());
std::string moduleTypeName = (moduleOp.getName() + "_t").str();
@@ -172,7 +172,7 @@
auto newFuncType = mlir::FunctionType::get(
ctx, {inputTypes}, {emitc::OpaqueType::get(ctx, "iree_status_t")});
- auto newFuncOp = builder.create<mlir::FuncOp>(loc, name, newFuncType);
+ auto newFuncOp = builder.create<mlir::func::FuncOp>(loc, name, newFuncType);
newFuncOp.getOperation()->setAttr("emitc.static", UnitAttr::get(ctx));
@@ -214,7 +214,7 @@
vmAnalysis = typeConverter.lookupAnalysis(newFuncOp);
if (failed(vmAnalysis)) {
return funcOp.emitError()
- << "newly created mlir::FuncOp not found in cache.";
+ << "newly created mlir::func::FuncOp not found in cache.";
}
// Add constant ops for local refs
@@ -458,7 +458,8 @@
}
/// Releases refs which are local to the function as well as ref arguments.
-void releaseRefs(OpBuilder &builder, Location location, mlir::FuncOp funcOp,
+void releaseRefs(OpBuilder &builder, Location location,
+ mlir::func::FuncOp funcOp,
IREE::VM::EmitCTypeConverter &typeConverter) {
auto ctx = builder.getContext();
@@ -567,7 +568,7 @@
auto blockBuilder = [&builder, &location,
&typeConverter](emitc::CallOp &callOp) {
Block *block = builder.getBlock();
- mlir::FuncOp funcOp = cast<mlir::FuncOp>(block->getParentOp());
+ mlir::func::FuncOp funcOp = cast<mlir::func::FuncOp>(block->getParentOp());
releaseRefs(builder, location, funcOp, typeConverter);
@@ -589,7 +590,7 @@
auto ctx = builder.getContext();
Block *block = builder.getBlock();
- mlir::FuncOp funcOp = cast<mlir::FuncOp>(block->getParentOp());
+ mlir::func::FuncOp funcOp = cast<mlir::func::FuncOp>(block->getParentOp());
releaseRefs(builder, location, funcOp, typeConverter);
@@ -615,7 +616,7 @@
/// value, i.e. a truthy value branches to the continuation block when
/// `negateCondition` is false.
mlir::func::CallOp failableCall(
- OpBuilder &builder, Location location, mlir::FuncOp &callee,
+ OpBuilder &builder, Location location, mlir::func::FuncOp &callee,
ArrayRef<Value> operands,
const std::function<void(mlir::func::CallOp &)> &failureBlockBuilder,
bool negateCondition = false) {
@@ -665,12 +666,13 @@
}
mlir::func::CallOp returnIfError(OpBuilder &builder, Location location,
- mlir::FuncOp &callee, ArrayRef<Value> operands,
+ mlir::func::FuncOp &callee,
+ ArrayRef<Value> operands,
IREE::VM::EmitCTypeConverter &typeConverter) {
auto blockBuilder = [&builder, &location,
&typeConverter](mlir::func::CallOp &callOp) {
Block *block = builder.getBlock();
- mlir::FuncOp funcOp = cast<mlir::FuncOp>(block->getParentOp());
+ mlir::func::FuncOp funcOp = cast<mlir::func::FuncOp>(block->getParentOp());
releaseRefs(builder, location, funcOp, typeConverter);
@@ -699,8 +701,8 @@
ctx, {emitc::PointerType::get(emitc::OpaqueType::get(ctx, "void"))},
{});
- auto funcOp =
- builder.create<mlir::FuncOp>(loc, moduleName + "_destroy", funcType);
+ auto funcOp = builder.create<mlir::func::FuncOp>(
+ loc, moduleName + "_destroy", funcType);
typeConverter.analysisCache.insert(
std::make_pair(funcOp.getOperation(), VMAnalysis()));
@@ -760,8 +762,8 @@
emitc::OpaqueType::get(ctx, "iree_vm_module_state_t")))},
{emitc::OpaqueType::get(ctx, "iree_status_t")});
- auto funcOp = builder.create<mlir::FuncOp>(loc, moduleName + "_alloc_state",
- funcType);
+ auto funcOp = builder.create<mlir::func::FuncOp>(
+ loc, moduleName + "_alloc_state", funcType);
typeConverter.analysisCache.insert(
std::make_pair(funcOp.getOperation(), VMAnalysis()));
@@ -1012,8 +1014,8 @@
emitc::OpaqueType::get(ctx, "iree_vm_module_state_t"))},
{});
- auto funcOp =
- builder.create<mlir::FuncOp>(loc, moduleName + "_free_state", funcType);
+ auto funcOp = builder.create<mlir::func::FuncOp>(
+ loc, moduleName + "_free_state", funcType);
typeConverter.analysisCache.insert(
std::make_pair(funcOp.getOperation(), VMAnalysis()));
@@ -1125,7 +1127,7 @@
},
{emitc::OpaqueType::get(ctx, "iree_status_t")});
- auto funcOp = builder.create<mlir::FuncOp>(
+ auto funcOp = builder.create<mlir::func::FuncOp>(
loc, moduleName + "_resolve_import", funcType);
typeConverter.analysisCache.insert(
@@ -1207,8 +1209,8 @@
emitc::OpaqueType::get(ctx, "iree_vm_module_t")))},
{emitc::OpaqueType::get(ctx, "iree_status_t")});
- auto funcOp =
- builder.create<mlir::FuncOp>(loc, moduleName + "_create", funcType);
+ auto funcOp = builder.create<mlir::func::FuncOp>(
+ loc, moduleName + "_create", funcType);
typeConverter.analysisCache.insert(
std::make_pair(funcOp.getOperation(), VMAnalysis()));
@@ -1464,16 +1466,16 @@
StringRef funcName;
};
-class FuncOpConversion : public OpConversionPattern<mlir::FuncOp> {
+class FuncOpConversion : public OpConversionPattern<mlir::func::FuncOp> {
public:
- using OpConversionPattern<mlir::FuncOp>::OpConversionPattern;
+ using OpConversionPattern<mlir::func::FuncOp>::OpConversionPattern;
private:
LogicalResult matchAndRewrite(
- mlir::FuncOp funcOp, OpAdaptor adaptor,
+ mlir::func::FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
TypeConverter::SignatureConversion signatureConverter(
- funcOp.getType().getNumInputs());
+ funcOp.getFunctionType().getNumInputs());
TypeConverter typeConverter;
for (const auto &arg : llvm::enumerate(funcOp.getArguments())) {
Type convertedType =
@@ -1487,7 +1489,7 @@
rewriter.updateRootInPlace(funcOp, [&] {
funcOp.setType(
rewriter.getFunctionType(signatureConverter.getConvertedTypes(),
- funcOp.getType().getResults()));
+ funcOp.getFunctionType().getResults()));
});
return success();
}
@@ -1520,8 +1522,8 @@
rewriter.startRootUpdate(exportOp.getOperation());
- mlir::FuncOp funcOp =
- lookupSymbolRef<mlir::FuncOp>(exportOp.getOperation(), "function_ref");
+ mlir::func::FuncOp funcOp = lookupSymbolRef<mlir::func::FuncOp>(
+ exportOp.getOperation(), "function_ref");
auto vmAnalysis = typeConverter->lookupAnalysis(funcOp);
if (failed(vmAnalysis)) {
@@ -1554,7 +1556,7 @@
ctx, {inputTypes}, {emitc::OpaqueType::get(ctx, "iree_status_t")});
auto newFuncOp =
- rewriter.create<mlir::FuncOp>(loc, newFuncName, newFuncType);
+ rewriter.create<mlir::func::FuncOp>(loc, newFuncName, newFuncType);
VMAnalysis newVmAnalysis;
newVmAnalysis.numRefArguments = numRefArgs;
@@ -1659,7 +1661,7 @@
FailureOr<std::pair<Value, Value>> castModuleAndStateStructs(
ConversionPatternRewriter &rewriter, IREE::VM::ExportOp &exportOp,
- mlir::FuncOp &newFuncOp) const {
+ mlir::func::FuncOp &newFuncOp) const {
auto ctx = exportOp.getContext();
auto loc = exportOp.getLoc();
@@ -1705,14 +1707,14 @@
FailureOr<std::pair<GeneratedStruct, GeneratedStruct>>
typedefArgumentAndResultStructs(ConversionPatternRewriter &rewriter,
IREE::VM::ExportOp &exportOp,
- mlir::FuncOp &newFuncOp) const {
+ mlir::func::FuncOp &newFuncOp) const {
auto loc = exportOp.getLoc();
IREE::VM::EmitCTypeConverter *typeConverter =
this->template getTypeConverter<IREE::VM::EmitCTypeConverter>();
- mlir::FuncOp funcOp =
- lookupSymbolRef<mlir::FuncOp>(exportOp.getOperation(), "function_ref");
+ mlir::func::FuncOp funcOp = lookupSymbolRef<mlir::func::FuncOp>(
+ exportOp.getOperation(), "function_ref");
auto vmAnalysis = typeConverter->lookupAnalysis(funcOp);
if (failed(vmAnalysis)) {
@@ -1802,7 +1804,7 @@
void castArgumentAndResultStructs(ConversionPatternRewriter &rewriter,
IREE::VM::ExportOp &exportOp,
- mlir::FuncOp &newFuncOp,
+ mlir::func::FuncOp &newFuncOp,
GeneratedStruct &argumentStruct,
GeneratedStruct &resultStruct) const {
auto ctx = exportOp.getContext();
@@ -1909,8 +1911,8 @@
IREE::VM::EmitCTypeConverter *typeConverter =
this->template getTypeConverter<IREE::VM::EmitCTypeConverter>();
- mlir::FuncOp funcOp =
- lookupSymbolRef<mlir::FuncOp>(exportOp.getOperation(), "function_ref");
+ mlir::func::FuncOp funcOp = lookupSymbolRef<mlir::func::FuncOp>(
+ exportOp.getOperation(), "function_ref");
auto vmAnalysis = typeConverter->lookupAnalysis(funcOp);
if (failed(vmAnalysis)) {
@@ -1972,8 +1974,8 @@
IREE::VM::EmitCTypeConverter *typeConverter =
this->template getTypeConverter<IREE::VM::EmitCTypeConverter>();
- mlir::FuncOp funcOp =
- lookupSymbolRef<mlir::FuncOp>(exportOp.getOperation(), "function_ref");
+ mlir::func::FuncOp funcOp = lookupSymbolRef<mlir::func::FuncOp>(
+ exportOp.getOperation(), "function_ref");
auto vmAnalysis = typeConverter->lookupAnalysis(funcOp);
if (failed(vmAnalysis)) {
@@ -2038,13 +2040,13 @@
}
if (importOp.isVariadic()) {
- if (failed(createVariadicImportShims(importOp.getType(), importOp,
+ if (failed(createVariadicImportShims(importOp.getFunctionType(), importOp,
rewriter))) {
return failure();
}
} else {
- if (failed(
- createImportShim(importOp.getType(), importOp, -1, rewriter))) {
+ if (failed(createImportShim(importOp.getFunctionType(), importOp, -1,
+ rewriter))) {
return failure();
}
}
@@ -2101,8 +2103,8 @@
<< "Failed to build function type for wrapper";
}
- auto newFuncOp = rewriter.create<mlir::FuncOp>(loc, newFuncName.getValue(),
- newFuncType.getValue());
+ auto newFuncOp = rewriter.create<mlir::func::FuncOp>(
+ loc, newFuncName.getValue(), newFuncType.getValue());
getTypeConverter<IREE::VM::EmitCTypeConverter>()->analysisCache.insert(
std::make_pair(newFuncOp.getOperation(), VMAnalysis{}));
@@ -2115,7 +2117,7 @@
Block *block =
rewriter.createBlock(&newFuncOp.getBody(), newFuncOp.getBody().end());
- for (Type type : newFuncOp.getType().getInputs()) {
+ for (Type type : newFuncOp.getFunctionType().getInputs()) {
block->addArgument(type, loc);
}
@@ -2395,7 +2397,7 @@
}
LogicalResult packArgumentBuffer(ArrayRef<Type> inputTypes,
- mlir::FuncOp &funcOp, Value call,
+ mlir::func::FuncOp &funcOp, Value call,
ConversionPatternRewriter &rewriter,
Location loc) const {
auto ctx = rewriter.getContext();
@@ -2520,7 +2522,7 @@
}
LogicalResult unpackResultBuffer(ArrayRef<Type> resultTypes,
- mlir::FuncOp &funcOp, Value call,
+ mlir::func::FuncOp &funcOp, Value call,
ConversionPatternRewriter &rewriter,
Location loc) const {
auto ctx = rewriter.getContext();
@@ -2785,8 +2787,8 @@
LogicalResult matchAndRewrite(
CallOpTy op, Adaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- mlir::FuncOp funcOp =
- lookupSymbolRef<mlir::FuncOp>(op.getOperation(), "callee");
+ mlir::func::FuncOp funcOp =
+ lookupSymbolRef<mlir::func::FuncOp>(op.getOperation(), "callee");
IREE::VM::ImportOp importOp =
lookupSymbolRef<IREE::VM::ImportOp>(op.getOperation(), "callee");
@@ -2806,13 +2808,13 @@
LogicalResult rewriteInternalCall(Operation *op, Adaptor adaptor,
ConversionPatternRewriter &rewriter,
- mlir::FuncOp funcOp) const {
+ mlir::func::FuncOp funcOp) const {
auto loc = op->getLoc();
SmallVector<Value, 4> updatedOperands;
SmallVector<Value, 4> resultOperands;
- auto parentFuncOp = op->getParentOfType<mlir::FuncOp>();
+ auto parentFuncOp = op->getParentOfType<mlir::func::FuncOp>();
BlockArgument stackArg = parentFuncOp.getArgument(0);
BlockArgument moduleArg = parentFuncOp.getArgument(1);
@@ -2854,7 +2856,7 @@
int importOrdinal = importOp.ordinal().getValue().getZExtValue();
- auto funcOp = op->getParentOfType<mlir::FuncOp>();
+ auto funcOp = op->getParentOfType<mlir::func::FuncOp>();
BlockArgument stackArg = funcOp.getArgument(0);
BlockArgument stateArg = funcOp.getArgument(2);
@@ -2913,7 +2915,8 @@
if (!funcName.hasValue())
return op->emitError() << "Couldn't build name to imported function";
- auto callee = moduleOp.lookupSymbol<mlir::FuncOp>(funcName.getValue());
+ auto callee =
+ moduleOp.lookupSymbol<mlir::func::FuncOp>(funcName.getValue());
if (callee == nullptr) {
return op->emitError() << "Couldn't find function with name `"
<< funcName.getValue() << "`";
@@ -3086,7 +3089,7 @@
auto loc = cmpOp.getLoc();
auto funcOp =
- cmpOp.getOperation()->template getParentOfType<mlir::FuncOp>();
+ cmpOp.getOperation()->template getParentOfType<mlir::func::FuncOp>();
IREE::VM::EmitCTypeConverter *typeConverter =
this->template getTypeConverter<IREE::VM::EmitCTypeConverter>();
@@ -3161,7 +3164,7 @@
auto ctx = cmpOp.getContext();
auto loc = cmpOp.getLoc();
- auto funcOp = cmpOp.getOperation()->getParentOfType<mlir::FuncOp>();
+ auto funcOp = cmpOp.getOperation()->getParentOfType<mlir::func::FuncOp>();
IREE::VM::EmitCTypeConverter *typeConverter =
this->template getTypeConverter<IREE::VM::EmitCTypeConverter>();
@@ -3296,7 +3299,7 @@
}
auto funcOp =
- constRefRodataOp.getOperation()->getParentOfType<mlir::FuncOp>();
+ constRefRodataOp.getOperation()->getParentOfType<mlir::func::FuncOp>();
BlockArgument stateArg = funcOp.getArgument(2);
auto rodataBuffersPtr = rewriter.create<emitc::CallOp>(
@@ -3393,7 +3396,7 @@
return success();
}
- auto funcOp = op.getOperation()->getParentOfType<mlir::FuncOp>();
+ auto funcOp = op.getOperation()->getParentOfType<mlir::func::FuncOp>();
IREE::VM::EmitCTypeConverter *typeConverter =
this->template getTypeConverter<IREE::VM::EmitCTypeConverter>();
@@ -3522,7 +3525,7 @@
return success();
}
- auto funcOp = op.getOperation()->getParentOfType<mlir::FuncOp>();
+ auto funcOp = op.getOperation()->getParentOfType<mlir::func::FuncOp>();
IREE::VM::EmitCTypeConverter *typeConverter =
getTypeConverter<IREE::VM::EmitCTypeConverter>();
@@ -3632,7 +3635,7 @@
auto ctx = op.getContext();
auto loc = op.getLoc();
- auto funcOp = op.getOperation()->getParentOfType<mlir::FuncOp>();
+ auto funcOp = op.getOperation()->getParentOfType<mlir::func::FuncOp>();
IREE::VM::EmitCTypeConverter *typeConverter =
getTypeConverter<IREE::VM::EmitCTypeConverter>();
@@ -3755,7 +3758,7 @@
passthroughBlock =
rewriter.createBlock(parentRegion, parentRegion->end());
- auto funcOp = op.getOperation()->getParentOfType<mlir::FuncOp>();
+ auto funcOp = op.getOperation()->getParentOfType<mlir::func::FuncOp>();
IREE::VM::EmitCTypeConverter *typeConverter =
getTypeConverter<IREE::VM::EmitCTypeConverter>();
@@ -3776,7 +3779,7 @@
OpBuilder::InsertionGuard guard(rewriter);
failureBlock = rewriter.createBlock(parentRegion, parentRegion->end());
- auto funcOp = op.getOperation()->getParentOfType<mlir::FuncOp>();
+ auto funcOp = op.getOperation()->getParentOfType<mlir::func::FuncOp>();
IREE::VM::EmitCTypeConverter *typeConverter =
getTypeConverter<IREE::VM::EmitCTypeConverter>();
@@ -3889,7 +3892,7 @@
}
auto funcOp =
- loadOp.getOperation()->template getParentOfType<mlir::FuncOp>();
+ loadOp.getOperation()->template getParentOfType<mlir::func::FuncOp>();
BlockArgument stateArg = funcOp.getArgument(2);
auto rwDataPtr = rewriter.create<emitc::CallOp>(
@@ -3955,7 +3958,7 @@
auto globalOrdinal = globalOp.ordinal().getValue().getZExtValue();
- auto funcOp = op->getParentOfType<mlir::FuncOp>();
+ auto funcOp = op->getParentOfType<mlir::func::FuncOp>();
IREE::VM::EmitCTypeConverter *typeConverter =
this->template getTypeConverter<IREE::VM::EmitCTypeConverter>();
@@ -4069,7 +4072,7 @@
}
auto funcOp =
- storeOp.getOperation()->template getParentOfType<mlir::FuncOp>();
+ storeOp.getOperation()->template getParentOfType<mlir::func::FuncOp>();
BlockArgument stateArg = funcOp.getArgument(2);
auto rwDataPtr = rewriter.create<emitc::CallOp>(
@@ -4240,7 +4243,7 @@
/*applicableOperator=*/StringAttr::get(ctx, "&"),
/*operand=*/listOp.getResult());
- auto funcOp = allocOp.getOperation()->getParentOfType<mlir::FuncOp>();
+ auto funcOp = allocOp.getOperation()->getParentOfType<mlir::func::FuncOp>();
IREE::VM::EmitCTypeConverter *typeConverter =
getTypeConverter<IREE::VM::EmitCTypeConverter>();
@@ -4671,7 +4674,7 @@
/*operands=*/ArrayRef<Value>{refOp.getResult()},
/*typeConverter=*/*typeConverter);
- auto funcOp = setOp.getOperation()->getParentOfType<mlir::FuncOp>();
+ auto funcOp = setOp.getOperation()->getParentOfType<mlir::func::FuncOp>();
auto vmAnalysis = typeConverter->lookupAnalysis(funcOp);
if (failed(vmAnalysis)) {
@@ -5096,9 +5099,10 @@
mlir::func::FuncDialect, mlir::arith::ArithmeticDialect,
mlir::math::MathDialect>();
- target.addDynamicallyLegalOp<mlir::FuncOp>([&](mlir::FuncOp op) {
- return typeConverter.isSignatureLegal(op.getType());
- });
+ target.addDynamicallyLegalOp<mlir::func::FuncOp>(
+ [&](mlir::func::FuncOp op) {
+ return typeConverter.isSignatureLegal(op.getFunctionType());
+ });
// Structural ops
target.addLegalOp<IREE::VM::ModuleOp>();
diff --git a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/EmitCTypeConverter.cpp b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/EmitCTypeConverter.cpp
index 7d2dfab..516b532 100644
--- a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/EmitCTypeConverter.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/EmitCTypeConverter.cpp
@@ -80,12 +80,12 @@
Optional<Value> EmitCTypeConverter::materializeRef(Value ref) {
assert(ref.getType().isa<IREE::VM::RefType>());
- mlir::FuncOp funcOp;
+ mlir::func::FuncOp funcOp;
if (auto definingOp = ref.getDefiningOp()) {
- funcOp = definingOp->getParentOfType<mlir::FuncOp>();
+ funcOp = definingOp->getParentOfType<mlir::func::FuncOp>();
} else {
Operation *op = ref.cast<BlockArgument>().getOwner()->getParentOp();
- funcOp = cast<mlir::FuncOp>(op);
+ funcOp = cast<mlir::func::FuncOp>(op);
}
auto vmAnalysis = lookupAnalysis(funcOp);
diff --git a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/EmitCTypeConverter.h b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/EmitCTypeConverter.h
index 59bfec0..0856a5b 100644
--- a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/EmitCTypeConverter.h
+++ b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/EmitCTypeConverter.h
@@ -10,6 +10,7 @@
#include "iree/compiler/Dialect/VM/Conversion/VMToEmitC/VMAnalysis.h"
#include "iree/compiler/Dialect/VM/IR/VMTypes.h"
#include "mlir/Dialect/EmitC/IR/EmitC.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Transforms/DialectConversion.h"
namespace mlir {
@@ -21,7 +22,7 @@
public:
EmitCTypeConverter();
FailureOr<std::reference_wrapper<VMAnalysis>> lookupAnalysis(
- mlir::FuncOp &funcOp) {
+ mlir::func::FuncOp &funcOp) {
return lookupAnalysis(funcOp.getOperation());
}
FailureOr<std::reference_wrapper<VMAnalysis>> lookupAnalysis(
diff --git a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/VMAnalysis.h b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/VMAnalysis.h
index 628a7aa..e8f93e4 100644
--- a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/VMAnalysis.h
+++ b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/VMAnalysis.h
@@ -22,7 +22,7 @@
Operation *op = funcOp.getOperation();
registerAllocation = RegisterAllocation(op);
valueLiveness = ValueLiveness(op);
- originalFunctionType = funcOp.getType();
+ originalFunctionType = funcOp.getFunctionType();
}
VMAnalysis(VMAnalysis &&) = default;
diff --git a/iree/compiler/Dialect/VM/IR/VMOps.cpp b/iree/compiler/Dialect/VM/IR/VMOps.cpp
index 9dfa11c..d9a2064 100644
--- a/iree/compiler/Dialect/VM/IR/VMOps.cpp
+++ b/iree/compiler/Dialect/VM/IR/VMOps.cpp
@@ -86,9 +86,7 @@
void FuncOp::print(OpAsmPrinter &p) {
Operation *op = getOperation();
- FunctionType fnType = getType();
- function_interface_impl::printFunctionOp(
- p, op, fnType.getInputs(), /*isVariadic=*/false, fnType.getResults());
+ function_interface_impl::printFunctionOp(p, op, /*isVariadic=*/false);
}
void FuncOp::build(OpBuilder &builder, OperationState &result, StringRef name,
@@ -97,7 +95,7 @@
result.addRegion();
result.addAttribute(SymbolTable::getSymbolAttrName(),
builder.getStringAttr(name));
- result.addAttribute("type", TypeAttr::get(type));
+ result.addAttribute("function_type", TypeAttr::get(type));
result.attributes.append(attrs.begin(), attrs.end());
if (argAttrs.empty()) {
return;
@@ -113,13 +111,14 @@
assert(empty() && "function already has an entry block");
auto *entry = new Block();
push_back(entry);
- SmallVector<Location> locs(getType().getNumInputs(), getLoc());
- entry->addArguments(getType().getInputs(), locs);
+ SmallVector<Location> locs(getFunctionType().getNumInputs(), getLoc());
+ entry->addArguments(getFunctionType().getInputs(), locs);
return entry;
}
LogicalResult FuncOp::verifyType() {
- auto type = getTypeAttr().getValue();
+ auto type =
+ getOperation()->getAttrOfType<TypeAttr>(getTypeAttrName()).getValue();
if (!type.isa<FunctionType>())
return emitOpError("requires '" + getTypeAttrName() +
"' attribute of function type");
@@ -268,7 +267,7 @@
if (auto name = getArgAttrOfType<StringAttr>(i, "vm.name")) {
p << '%' << name.getValue() << " : ";
}
- p.printType(getType().getInput(i));
+ p.printType(getFunctionType().getInput(i));
if (getArgAttrOfType<UnitAttr>(i, "vm.variadic")) {
p << " ...";
}
@@ -279,9 +278,9 @@
p << ")";
if (getResultTypes().size() == 1) {
p << " -> ";
- p.printType(getType().getResult(0));
+ p.printType(getFunctionType().getResult(0));
} else if (getResultTypes().size() > 1) {
- p << " -> (" << getType().getResults() << ")";
+ p << " -> (" << getFunctionType().getResults() << ")";
}
mlir::function_interface_impl::printFunctionAttributes(
p, op, getArgumentTypes().size(), getResultTypes().size(),
@@ -296,7 +295,7 @@
ArrayRef<DictionaryAttr> argAttrs) {
result.addAttribute(SymbolTable::getSymbolAttrName(),
builder.getStringAttr(name));
- result.addAttribute("type", TypeAttr::get(type));
+ result.addAttribute("function_type", TypeAttr::get(type));
result.attributes.append(attrs.begin(), attrs.end());
if (argAttrs.empty()) {
return;
@@ -309,7 +308,8 @@
}
LogicalResult ImportOp::verifyType() {
- auto type = getTypeAttr().getValue();
+ auto type =
+ getOperation()->getAttrOfType<TypeAttr>(getTypeAttrName()).getValue();
if (!type.isa<FunctionType>())
return emitOpError("requires '" + getTypeAttrName() +
"' attribute of function type");
@@ -318,15 +318,15 @@
void InitializerOp::build(OpBuilder &builder, OperationState &result,
ArrayRef<NamedAttribute> attrs) {
- result.addAttribute(
- "type", TypeAttr::get(FunctionType::get(builder.getContext(), {}, {})));
+ result.addAttribute("function_type", TypeAttr::get(FunctionType::get(
+ builder.getContext(), {}, {})));
result.addRegion();
result.attributes.append(attrs.begin(), attrs.end());
}
ParseResult InitializerOp::parse(OpAsmParser &parser, OperationState &result) {
- result.addAttribute(
- "type", TypeAttr::get(FunctionType::get(result.getContext(), {}, {})));
+ result.addAttribute("function_type", TypeAttr::get(FunctionType::get(
+ result.getContext(), {}, {})));
if (parser.parseOptionalAttrDictWithKeyword(result.attributes)) {
return failure();
}
@@ -339,7 +339,8 @@
void InitializerOp::print(OpAsmPrinter &p) {
Operation *op = getOperation();
- p.printOptionalAttrDictWithKeyword(op->getAttrs(), /*elidedAttrs=*/{"type"});
+ p.printOptionalAttrDictWithKeyword(op->getAttrs(),
+ /*elidedAttrs=*/{"function_type"});
p << " ";
p.printRegion(body());
}
@@ -378,13 +379,14 @@
return op->emitOpError()
<< "initializer function " << initializerAttr << " not found";
}
- if (initializer.getType().getNumInputs() != 0 ||
- initializer.getType().getNumResults() != 1 ||
- initializer.getType().getResult(0) != globalType.getValue()) {
+ if (initializer.getFunctionType().getNumInputs() != 0 ||
+ initializer.getFunctionType().getNumResults() != 1 ||
+ initializer.getFunctionType().getResult(0) != globalType.getValue()) {
return op->emitOpError()
<< "initializer type mismatch; global " << globalName << " is "
<< globalType << " but initializer function "
- << initializer.getName() << " is " << initializer.getType();
+ << initializer.getName() << " is "
+ << initializer.getFunctionType();
}
} else if (initialValueAttr) {
// Ensure the value is something we can convert to a const.
diff --git a/iree/compiler/Dialect/VM/IR/VMOps.td b/iree/compiler/Dialect/VM/IR/VMOps.td
index 9c06818..2d21ab8 100644
--- a/iree/compiler/Dialect/VM/IR/VMOps.td
+++ b/iree/compiler/Dialect/VM/IR/VMOps.td
@@ -114,17 +114,17 @@
}
/// Returns the type of this function.
- FunctionType getType() {
+ FunctionType getFunctionType() {
return getOperation()->getAttrOfType<TypeAttr>(getTypeAttrName())
.getValue()
.cast<FunctionType>();
}
/// Returns the argument types of this function.
- ArrayRef<Type> getArgumentTypes() { return getType().getInputs(); }
+ ArrayRef<Type> getArgumentTypes() { return getFunctionType().getInputs(); }
/// Returns the result types of this function.
- ArrayRef<Type> getResultTypes() { return getType().getResults(); }
+ ArrayRef<Type> getResultTypes() { return getFunctionType().getResults(); }
/// Hook for OpTrait::FunctionLike, called after verifying that the 'type'
/// attribute is present. This can check for preconditions of the
@@ -134,7 +134,7 @@
Region *getCallableRegion() { return &body(); }
ArrayRef<Type> getCallableResults() {
assert(!isExternal() && "invalid callable");
- return getType().getResults();
+ return getFunctionType().getResults();
}
/// Adds or overrides a reflection attribute.
@@ -195,7 +195,7 @@
let extraClassDeclaration = [{
/// Returns the type of this function.
- FunctionType getType() {
+ FunctionType getFunctionType() {
return getOperation()->getAttrOfType<TypeAttr>(getTypeAttrName())
.getValue()
.cast<FunctionType>();
@@ -220,10 +220,10 @@
}
/// Returns the argument types of this function.
- ArrayRef<Type> getArgumentTypes() { return getType().getInputs(); }
+ ArrayRef<Type> getArgumentTypes() { return getFunctionType().getInputs(); }
/// Returns the result types of this function.
- ArrayRef<Type> getResultTypes() { return getType().getResults(); }
+ ArrayRef<Type> getResultTypes() { return getFunctionType().getResults(); }
/// Hook for OpTrait::FunctionLike, called after verifying that the 'type'
@@ -233,7 +233,7 @@
Region *getCallableRegion() { return nullptr; }
ArrayRef<Type> getCallableResults() {
- return getType().getResults();
+ return getFunctionType().getResults();
}
}];
}
@@ -252,7 +252,7 @@
}];
let arguments = (ins
- TypeAttr:$type
+ TypeAttr:$function_type
);
let regions = (region AnyRegion:$body);
@@ -270,8 +270,10 @@
Block *addEntryBlock();
Block *addBlock();
- FunctionType getType() {
- return getTypeAttr().getValue().cast<FunctionType>();
+ FunctionType getFunctionType() {
+ return getOperation()->getAttrOfType<TypeAttr>(getTypeAttrName())
+ .getValue()
+ .cast<FunctionType>();
}
/// Returns the argument types of this function.
@@ -3530,7 +3532,7 @@
[{
$_state.addOperands(operands);
$_state.addAttribute("callee", mlir::SymbolRefAttr::get(callee));
- $_state.addTypes(callee.getType().getResults());
+ $_state.addTypes(callee.getFunctionType().getResults());
}]>,
OpBuilder<(ins "FlatSymbolRefAttr":$callee,
"ArrayRef<Type>":$resultTypes, CArg<"ValueRange", "{}">:$operands),
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp
index 0566e5d..08d03a5 100644
--- a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp
+++ b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp
@@ -481,7 +481,7 @@
// Generate the signature calling convention string based on types.
auto cconv = makeImportCallingConventionString(importOp);
if (!cconv.hasValue()) return {};
- return createFunctionSignatureDef(importOp.getType(), typeTable,
+ return createFunctionSignatureDef(importOp.getFunctionType(), typeTable,
cconv.getValue(), /*reflectionAttrsRef=*/0,
fbb);
}
@@ -514,7 +514,7 @@
fbb, reflectionAttrRefs.data(), reflectionAttrRefs.size());
}
- return createFunctionSignatureDef(funcOp.getType(), typeTable,
+ return createFunctionSignatureDef(funcOp.getFunctionType(), typeTable,
cconv.getValue(), reflectionAttrsRef, fbb);
}
@@ -525,7 +525,7 @@
// Generate the signature calling convention string based on types.
auto cconv = makeCallingConventionString(funcOp);
if (!cconv.hasValue()) return {};
- return createFunctionSignatureDef(funcOp.getType(), typeTable,
+ return createFunctionSignatureDef(funcOp.getFunctionType(), typeTable,
cconv.getValue(), /*reflectionAttrsRef=*/0,
fbb);
}
diff --git a/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp b/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp
index e94294b..30c0026 100644
--- a/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp
+++ b/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp
@@ -39,12 +39,13 @@
}
static LogicalResult printFunctionDeclaration(
- mlir::FuncOp funcOp, llvm::raw_ostream &output,
+ mlir::func::FuncOp funcOp, llvm::raw_ostream &output,
mlir::emitc::CppEmitter &emitter) {
Operation *op = funcOp.getOperation();
if (op->hasAttr("emitc.static")) output << "static ";
- if (failed(emitter.emitTypes(funcOp.getLoc(), funcOp.getType().getResults())))
+ if (failed(emitter.emitTypes(funcOp.getLoc(),
+ funcOp.getFunctionType().getResults())))
return failure();
output << " " << funcOp.getName();
@@ -165,7 +166,7 @@
});
for (auto exportOp : exportOps) {
StringRef funcName = exportOp.function_ref();
- auto funcOp = symbolTable.lookup<mlir::FuncOp>(funcName);
+ auto funcOp = symbolTable.lookup<mlir::func::FuncOp>(funcName);
if (!funcOp) {
return exportOp.emitError("Couldn't find referenced FuncOp");
}
@@ -220,7 +221,7 @@
// implementation.
for (auto exportOp : exportOps) {
StringRef funcName = exportOp.function_ref();
- auto funcOp = symbolTable.lookup<mlir::FuncOp>(funcName);
+ auto funcOp = symbolTable.lookup<mlir::func::FuncOp>(funcName);
if (!funcOp) {
return exportOp.emitError("Couldn't find referenced FuncOp");
}
@@ -362,7 +363,7 @@
output << "\n";
mlir::emitc::CppEmitter emitter(output, /*declareVariablesAtTop=*/true);
- for (auto funcOp : moduleOp.getOps<mlir::FuncOp>()) {
+ for (auto funcOp : moduleOp.getOps<mlir::func::FuncOp>()) {
Operation *op = funcOp.getOperation();
if (!op->hasAttr("vm.module.constructor")) continue;
if (failed(printFunctionDeclaration(funcOp, output, emitter)))
@@ -403,7 +404,7 @@
// translate functions
output << "// DECLARE FUNCTIONS\n";
- for (auto funcOp : moduleOp.getOps<mlir::FuncOp>()) {
+ for (auto funcOp : moduleOp.getOps<mlir::func::FuncOp>()) {
Operation *op = funcOp.getOperation();
if (op->hasAttr("vm.module.constructor")) continue;
if (failed(printFunctionDeclaration(funcOp, output, emitter)))
@@ -417,7 +418,7 @@
// TODO(simon-camp): Clean up. We generate calls to a macro that defines a
// struct. As we declare all variables at the start of the function, the
// macro call cannot be inlined into the function.
- if (!isa<mlir::FuncOp, emitc::CallOp>(op)) continue;
+ if (!isa<mlir::func::FuncOp, emitc::CallOp>(op)) continue;
if (op.hasAttr("vm.emit_at_end")) continue;
if (op.hasAttr("emitc.static")) output << "static ";
if (failed(emitter.emitOperation(op,
@@ -433,7 +434,7 @@
}
// Emit code for functions marked with `vm.emit_at_end`.
- for (auto funcOp : moduleOp.getOps<mlir::FuncOp>()) {
+ for (auto funcOp : moduleOp.getOps<mlir::func::FuncOp>()) {
Operation *op = funcOp.getOperation();
if (!op->hasAttr("vm.emit_at_end")) continue;
if (op->hasAttr("emitc.static")) output << "static ";
diff --git a/iree/compiler/Dialect/VM/Target/C/TranslateToCpp.cpp b/iree/compiler/Dialect/VM/Target/C/TranslateToCpp.cpp
index 4814217..b24c8ef 100644
--- a/iree/compiler/Dialect/VM/Target/C/TranslateToCpp.cpp
+++ b/iree/compiler/Dialect/VM/Target/C/TranslateToCpp.cpp
@@ -423,7 +423,7 @@
CppEmitter::Scope scope(emitter);
raw_indented_ostream &os = emitter.ostream();
if (failed(emitter.emitTypes(functionOp.getLoc(),
- functionOp.getType().getResults())))
+ functionOp.getFunctionType().getResults())))
return failure();
os << " " << functionOp.getName();
diff --git a/iree/compiler/Dialect/VM/Transforms/Passes.cpp b/iree/compiler/Dialect/VM/Transforms/Passes.cpp
index 0daeba2..fcb3ae1 100644
--- a/iree/compiler/Dialect/VM/Transforms/Passes.cpp
+++ b/iree/compiler/Dialect/VM/Transforms/Passes.cpp
@@ -12,6 +12,7 @@
#include "iree/compiler/Dialect/VM/IR/VMOps.h"
#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h"
#include "mlir/Dialect/Affine/Passes.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Transforms/Passes.h"
@@ -23,13 +24,14 @@
void buildVMTransformPassPipeline(OpPassManager &passManager,
TargetOptions targetOptions) {
- passManager.addNestedPass<mlir::FuncOp>(createLoopCoalescingPass());
+ passManager.addNestedPass<mlir::func::FuncOp>(createLoopCoalescingPass());
passManager.addNestedPass<IREE::Util::InitializerOp>(
createLoopInvariantCodeMotionPass());
- passManager.addNestedPass<mlir::FuncOp>(createLoopInvariantCodeMotionPass());
+ passManager.addNestedPass<mlir::func::FuncOp>(
+ createLoopInvariantCodeMotionPass());
passManager.addNestedPass<IREE::Util::InitializerOp>(
createConvertSCFToCFPass());
- passManager.addNestedPass<mlir::FuncOp>(createConvertSCFToCFPass());
+ passManager.addNestedPass<mlir::func::FuncOp>(createConvertSCFToCFPass());
passManager.addPass(createCanonicalizerPass());
passManager.addPass(createCSEPass());
diff --git a/iree/compiler/Dialect/VM/Utils/CallingConvention.cpp b/iree/compiler/Dialect/VM/Utils/CallingConvention.cpp
index c375056..4ecbaa9 100644
--- a/iree/compiler/Dialect/VM/Utils/CallingConvention.cpp
+++ b/iree/compiler/Dialect/VM/Utils/CallingConvention.cpp
@@ -76,7 +76,7 @@
Optional<std::string> makeImportCallingConventionString(
IREE::VM::ImportOp importOp) {
- auto functionType = importOp.getType();
+ auto functionType = importOp.getFunctionType();
if (functionType.getNumInputs() == 0 && functionType.getNumResults() == 0) {
return std::string("0v_v"); // Valid but empty.
}
@@ -114,7 +114,7 @@
}
Optional<std::string> makeCallingConventionString(IREE::VM::FuncOp funcOp) {
- auto functionType = funcOp.getType();
+ auto functionType = funcOp.getFunctionType();
if (functionType.getNumInputs() == 0 && functionType.getNumResults() == 0) {
return std::string("0v_v"); // Valid but empty.
}
diff --git a/iree/compiler/InputConversion/Common/IREEImportPublic.cpp b/iree/compiler/InputConversion/Common/IREEImportPublic.cpp
index aac6181..dabf718 100644
--- a/iree/compiler/InputConversion/Common/IREEImportPublic.cpp
+++ b/iree/compiler/InputConversion/Common/IREEImportPublic.cpp
@@ -144,7 +144,7 @@
LogicalResult matchAndRewrite(
FuncOp srcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- FunctionType srcFuncType = srcOp.getType();
+ FunctionType srcFuncType = srcOp.getFunctionType();
TypeConverter::SignatureConversion signatureConversion(
srcOp.getNumArguments());
@@ -299,13 +299,13 @@
};
target.addDynamicallyLegalOp<FuncOp>([&](FuncOp funcOp) {
- for (Type type : funcOp.getType().getInputs()) {
+ for (Type type : funcOp.getFunctionType().getInputs()) {
if (isIllegalType(type)) return false;
}
- for (Type type : funcOp.getType().getResults()) {
+ for (Type type : funcOp.getFunctionType().getResults()) {
if (isIllegalType(type)) return false;
}
- for (Block &block : funcOp.body()) {
+ for (Block &block : funcOp.getBody()) {
for (Type type : block.getArgumentTypes()) {
if (isIllegalType(type)) return false;
}
diff --git a/iree/compiler/InputConversion/Common/PassDetail.h b/iree/compiler/InputConversion/Common/PassDetail.h
index 71990b3..67316a7 100644
--- a/iree/compiler/InputConversion/Common/PassDetail.h
+++ b/iree/compiler/InputConversion/Common/PassDetail.h
@@ -7,6 +7,7 @@
#ifndef IREE_COMPILER_INPUTCONVERSION_COMMON_PASSDETAIL_H_
#define IREE_COMPILER_INPUTCONVERSION_COMMON_PASSDETAIL_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
diff --git a/iree/compiler/InputConversion/Common/Passes.h b/iree/compiler/InputConversion/Common/Passes.h
index a78af31..5781a19 100644
--- a/iree/compiler/InputConversion/Common/Passes.h
+++ b/iree/compiler/InputConversion/Common/Passes.h
@@ -7,6 +7,7 @@
#ifndef IREE_COMPILER_INPUTCONVERSION_COMMON_PASSES_H_
#define IREE_COMPILER_INPUTCONVERSION_COMMON_PASSES_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
@@ -25,9 +26,9 @@
// Passes
//===----------------------------------------------------------------------===//
-std::unique_ptr<OperationPass<FuncOp>> createTopLevelSCFToCFGPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createTopLevelSCFToCFGPass();
std::unique_ptr<OperationPass<ModuleOp>> createIREEImportPublicPass();
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
createLinalgQuantizedMatmulToMatmulPass();
//===----------------------------------------------------------------------===//
diff --git a/iree/compiler/InputConversion/Common/test/iree_import_public.mlir b/iree/compiler/InputConversion/Common/test/iree_import_public.mlir
index bc77986..b6d0670 100644
--- a/iree/compiler/InputConversion/Common/test/iree_import_public.mlir
+++ b/iree/compiler/InputConversion/Common/test/iree_import_public.mlir
@@ -3,14 +3,14 @@
// CHECK-LABEL: func @bv_func
// CHECK-SAME: (%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> (!hal.buffer_view, !hal.buffer_view)
// CHECK: return %arg0, %arg1 : !hal.buffer_view, !hal.buffer_view
-builtin.func @bv_func(%arg0 : !iree_input.buffer_view, %arg1 : !iree_input.buffer_view) -> (!iree_input.buffer_view, !iree_input.buffer_view) {
+func.func @bv_func(%arg0 : !iree_input.buffer_view, %arg1 : !iree_input.buffer_view) -> (!iree_input.buffer_view, !iree_input.buffer_view) {
return %arg0, %arg1 : !iree_input.buffer_view, !iree_input.buffer_view
}
// -----
// CHECK-LABEL: func @list_func
// CHECK-SAME: (%arg0: !util.list<?>) -> !util.list<?>
-builtin.func @list_func(%arg0 : !iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant> {
+func.func @list_func(%arg0 : !iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant> {
return %arg0 : !iree_input.list<!iree_input.variant>
}
@@ -18,7 +18,7 @@
// CHECK-LABEL: func @list_func_retains_iree_abi
// CHECK-SAME: (%arg0: !util.list<?>) -> !util.list<?>
// CHECK-SAME: iree.abi = "FOOBAR"
-builtin.func @list_func_retains_iree_abi(%arg0 : !iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant>
+func.func @list_func_retains_iree_abi(%arg0 : !iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant>
attributes {iree.abi = "FOOBAR"} {
return %arg0 : !iree_input.list<!iree_input.variant>
}
@@ -26,7 +26,7 @@
// -----
// CHECK-LABEL: func @list_func_call
// CHECK: call @list_func_call(%arg0) : (!util.list<?>) -> !util.list<?>
-builtin.func @list_func_call(%arg0 : !iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant> {
+func.func @list_func_call(%arg0 : !iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant> {
call @list_func_call(%arg0) : (!iree_input.list<!iree_input.variant>) -> !iree_input.list<!iree_input.variant>
return %arg0 : !iree_input.list<!iree_input.variant>
}
@@ -34,14 +34,14 @@
// -----
// CHECK-LABEL: func @ptr_func
// CHECK-SAME: (%arg0: !util.ptr<!hal.buffer_view>) -> !util.ptr<!hal.buffer_view>
-builtin.func @ptr_func(%arg0 : !iree_input.ptr<!iree_input.buffer_view>) -> !iree_input.ptr<!iree_input.buffer_view> {
+func.func @ptr_func(%arg0 : !iree_input.ptr<!iree_input.buffer_view>) -> !iree_input.ptr<!iree_input.buffer_view> {
return %arg0 : !iree_input.ptr<!iree_input.buffer_view>
}
// -----
// CHECK-LABEL: func @null_op
// CHECK: util.null : !util.variant
-builtin.func @null_op() -> !iree_input.variant {
+func.func @null_op() -> !iree_input.variant {
%0 = iree_input.null : !iree_input.variant
return %0 : !iree_input.variant
}
@@ -49,7 +49,7 @@
// -----
// CHECK-LABEL: func @tensor_to_buffer_view
// CHECK: hal.tensor.export %arg0 : tensor<?x?x3xf32>{%arg1, %arg2} -> !hal.buffer_view
-builtin.func @tensor_to_buffer_view(%arg0 : tensor<?x?x3xf32>, %arg1 : index, %arg2 : index) -> !iree_input.buffer_view {
+func.func @tensor_to_buffer_view(%arg0 : tensor<?x?x3xf32>, %arg1 : index, %arg2 : index) -> !iree_input.buffer_view {
%0 = iree_input.cast.tensor_to_buffer_view %arg0 : tensor<?x?x3xf32>{%arg1, %arg2} -> !iree_input.buffer_view
return %0 : !iree_input.buffer_view
}
@@ -57,7 +57,7 @@
// -----
// CHECK-LABEL: func @tensor_to_buffer_view_static
// CHECK: hal.tensor.export %arg0 : tensor<3xf32> -> !hal.buffer_view
-builtin.func @tensor_to_buffer_view_static(%arg0 : tensor<3xf32>) -> !iree_input.buffer_view {
+func.func @tensor_to_buffer_view_static(%arg0 : tensor<3xf32>) -> !iree_input.buffer_view {
%0 = iree_input.cast.tensor_to_buffer_view %arg0 : tensor<3xf32> -> !iree_input.buffer_view
return %0 : !iree_input.buffer_view
}
@@ -69,7 +69,7 @@
// CHECK: %[[ONE:.*]] = arith.constant 1
// CHECK: %[[D1:.*]] = tensor.dim %arg0, %[[ONE]]
// CHECK: hal.tensor.export %arg0 : tensor<?x?x3xf32>{%[[D0]], %[[D1]]} -> !hal.buffer_view
-builtin.func @tensor_to_buffer_view_implicit_dims(%arg0 : tensor<?x?x3xf32>) -> !iree_input.buffer_view {
+func.func @tensor_to_buffer_view_implicit_dims(%arg0 : tensor<?x?x3xf32>) -> !iree_input.buffer_view {
%0 = iree_input.cast.tensor_to_buffer_view %arg0 : tensor<?x?x3xf32> -> !iree_input.buffer_view
return %0 : !iree_input.buffer_view
}
@@ -77,7 +77,7 @@
// -----
// CHECK-LABEL: func @buffer_view_to_tensor
// CHECK: hal.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3xf32>{%arg1, %arg2}
-builtin.func @buffer_view_to_tensor(%arg0 : !iree_input.buffer_view, %arg1 : index, %arg2 : index) -> tensor<?x?x3xf32> {
+func.func @buffer_view_to_tensor(%arg0 : !iree_input.buffer_view, %arg1 : index, %arg2 : index) -> tensor<?x?x3xf32> {
%0 = iree_input.cast.buffer_view_to_tensor %arg0 : !iree_input.buffer_view -> tensor<?x?x3xf32>{%arg1, %arg2}
return %0 : tensor<?x?x3xf32>
}
@@ -85,7 +85,7 @@
// -----
// CHECK-LABEL: func @buffer_view_to_tensor_static
// CHECK: hal.tensor.import %arg0 : !hal.buffer_view -> tensor<3xf32>
-builtin.func @buffer_view_to_tensor_static(%arg0 : !iree_input.buffer_view) -> tensor<3xf32> {
+func.func @buffer_view_to_tensor_static(%arg0 : !iree_input.buffer_view) -> tensor<3xf32> {
%0 = iree_input.cast.buffer_view_to_tensor %arg0 : !iree_input.buffer_view -> tensor<3xf32>
return %0 : tensor<3xf32>
}
@@ -95,7 +95,7 @@
// CHECK: %[[D0:.*]] = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
// CHECK: %[[D1:.*]] = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index
// CHECK: hal.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3xf32>{%[[D0]], %[[D1]]}
-builtin.func @buffer_view_to_tensor_implicit_dims(%arg0 : !iree_input.buffer_view) -> tensor<?x?x3xf32> {
+func.func @buffer_view_to_tensor_implicit_dims(%arg0 : !iree_input.buffer_view) -> tensor<?x?x3xf32> {
%0 = iree_input.cast.buffer_view_to_tensor %arg0 : !iree_input.buffer_view -> tensor<?x?x3xf32>
return %0 : tensor<?x?x3xf32>
}
@@ -103,7 +103,7 @@
// -----
// CHECK-LABEL: func @buffer_view_rank
// CHECK: hal.buffer_view.rank<%arg0 : !hal.buffer_view> : index
-builtin.func @buffer_view_rank(%arg0 : !iree_input.buffer_view) -> index {
+func.func @buffer_view_rank(%arg0 : !iree_input.buffer_view) -> index {
%0 = iree_input.buffer_view.rank %arg0 : index
return %0 : index
}
@@ -111,7 +111,7 @@
// -----
// CHECK-LABEL: func @buffer_view_dim
// CHECK: hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
-builtin.func @buffer_view_dim(%arg0 : !iree_input.buffer_view) -> index {
+func.func @buffer_view_dim(%arg0 : !iree_input.buffer_view) -> index {
%0 = iree_input.buffer_view.dim %arg0, 0 : index
return %0: index
}
@@ -119,7 +119,7 @@
// -----
// CHECK-LABEL: func @list_create
// CHECK: util.list.create %arg0 : !util.list<?>
-builtin.func @list_create(%arg0 : index) -> !iree_input.list<!iree_input.variant> {
+func.func @list_create(%arg0 : index) -> !iree_input.list<!iree_input.variant> {
%0 = iree_input.list.create %arg0 : !iree_input.list<!iree_input.variant>
return %0 : !iree_input.list<!iree_input.variant>
}
@@ -127,7 +127,7 @@
// -----
// CHECK-LABEL: func @list_size
// CHECK: util.list.size %arg0 : !util.list<?>
-builtin.func @list_size(%arg0 : !iree_input.list<!iree_input.variant>) -> index {
+func.func @list_size(%arg0 : !iree_input.list<!iree_input.variant>) -> index {
%0 = iree_input.list.size %arg0 : !iree_input.list<!iree_input.variant>
return %0 : index
}
@@ -135,7 +135,7 @@
// -----
// CHECK-LABEL: func @list_resize
// CHECK: util.list.resize %arg0, %arg1 : !util.list<?>
-builtin.func @list_resize(%arg0 : !iree_input.list<!iree_input.variant>, %arg1 : index) {
+func.func @list_resize(%arg0 : !iree_input.list<!iree_input.variant>, %arg1 : index) {
iree_input.list.resize %arg0, %arg1 : !iree_input.list<!iree_input.variant>
return
}
@@ -143,7 +143,7 @@
// -----
// CHECK-LABEL: func @list_get
// CHECK: util.list.get %arg0[%arg1] : !util.list<?>
-builtin.func @list_get(%arg0 : !iree_input.list<!iree_input.variant>, %arg1 : index) -> !iree_input.variant {
+func.func @list_get(%arg0 : !iree_input.list<!iree_input.variant>, %arg1 : index) -> !iree_input.variant {
%0 = iree_input.list.get %arg0[%arg1] : !iree_input.list<!iree_input.variant> -> !iree_input.variant
return %0 : !iree_input.variant
}
@@ -151,7 +151,7 @@
// -----
// CHECK-LABEL: func @list_set
// CHECK: util.list.set %arg0[%arg1], %arg2 : !util.list<?>
-builtin.func @list_set(%arg0 : !iree_input.list<!iree_input.variant>, %arg1 : index, %arg2 : !iree_input.variant) {
+func.func @list_set(%arg0 : !iree_input.list<!iree_input.variant>, %arg1 : index, %arg2 : !iree_input.variant) {
iree_input.list.set %arg0[%arg1], %arg2 : !iree_input.list<!iree_input.variant>, !iree_input.variant
return
}
@@ -159,7 +159,7 @@
// -----
// CHECK-LABEL: func @tensor_reshape
// CHECK: flow.tensor.reshape %arg0 : tensor<?x?xf32>{%arg1, %arg2} -> tensor<?x?xf32>{%arg2, %arg1}
-builtin.func @tensor_reshape(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index) -> tensor<?x?xf32> {
+func.func @tensor_reshape(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index) -> tensor<?x?xf32> {
%0 = iree_input.tensor.reshape %arg0 : tensor<?x?xf32>{%arg1, %arg2} -> tensor<?x?xf32>{%arg2, %arg1}
return %0 : tensor<?x?xf32>
}
@@ -167,7 +167,7 @@
// -----
// CHECK-LABEL: func @tensor_load
// CHECK: flow.tensor.load %arg0[%arg2, %arg3] : tensor<?x3xf32>{%arg1}
-builtin.func @tensor_load(%arg0 : tensor<?x3xf32>, %arg1 : index, %arg2 : index, %arg3 : index) -> f32 {
+func.func @tensor_load(%arg0 : tensor<?x3xf32>, %arg1 : index, %arg2 : index, %arg3 : index) -> f32 {
%0 = iree_input.tensor.load %arg0[%arg2, %arg3] : tensor<?x3xf32>{%arg1}
return %0 : f32
}
@@ -175,7 +175,7 @@
// -----
// CHECK-LABEL: func @tensor_store
// CHECK: flow.tensor.store %arg4, %arg0[%arg2, %arg3] : tensor<?x3xf32>{%arg1}
-builtin.func @tensor_store(%arg0 : tensor<?x3xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : f32) {
+func.func @tensor_store(%arg0 : tensor<?x3xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : f32) {
iree_input.tensor.store %arg4, %arg0[%arg2, %arg3] : tensor<?x3xf32>{%arg1}
return
}
@@ -183,7 +183,7 @@
// -----
// CHECK-LABEL: func @tensor_splat
// CHECK: flow.tensor.splat %arg0 : tensor<?x?xf32>{%arg1, %arg2}
-builtin.func @tensor_splat(%arg0 : f32, %arg1 : index, %arg2 : index) -> tensor<?x?xf32> {
+func.func @tensor_splat(%arg0 : f32, %arg1 : index, %arg2 : index) -> tensor<?x?xf32> {
%0 = iree_input.tensor.splat %arg0 : tensor<?x?xf32>{%arg1, %arg2}
return %0 : tensor<?x?xf32>
}
@@ -191,7 +191,7 @@
// -----
// CHECK-LABEL: func @tensor_clone
// CHECK: flow.tensor.clone %arg0 : tensor<?x?xf32>{%arg1, %arg2}
-builtin.func @tensor_clone(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index) -> tensor<?x?xf32> {
+func.func @tensor_clone(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index) -> tensor<?x?xf32> {
%0 = iree_input.tensor.clone %arg0 : tensor<?x?xf32>{%arg1, %arg2}
return %0 : tensor<?x?xf32>
}
@@ -199,7 +199,7 @@
// -----
// CHECK-LABEL: func @tensor_slice
// CHECK: flow.tensor.slice %arg0[%arg1 for %arg2] : tensor<?xf32>{%arg3} -> tensor<?xf32>{%arg4}
-builtin.func @tensor_slice(%arg0 : tensor<?xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index) -> tensor<?xf32> {
+func.func @tensor_slice(%arg0 : tensor<?xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index) -> tensor<?xf32> {
%0 = iree_input.tensor.slice %arg0[%arg1 for %arg2] : tensor<?xf32>{%arg3} -> tensor<?xf32>{%arg4}
return %0 : tensor<?xf32>
}
@@ -207,7 +207,7 @@
// -----
// CHECK-LABEL: func @tensor_update
// CHECK: flow.tensor.update %arg3, %arg0[%arg1] : tensor<?xf32>{%arg2} -> %arg0 as tensor<?xf32>{%arg4}
-builtin.func @tensor_update(%arg0 : tensor<?xf32>, %arg1 : index, %arg2 : index, %arg3 : tensor<?xf32>, %arg4 : index) -> tensor<?xf32> {
+func.func @tensor_update(%arg0 : tensor<?xf32>, %arg1 : index, %arg2 : index, %arg3 : tensor<?xf32>, %arg4 : index) -> tensor<?xf32> {
%0 = iree_input.tensor.update %arg3, %arg0[%arg1] : tensor<?xf32>{%arg2} -> tensor<?xf32>{%arg4}
return %0 : tensor<?xf32>
}
@@ -215,7 +215,7 @@
// -----
// CHECK-LABEL: func @tensor_trace
// CHECK: flow.tensor.trace {key = "FOOBAR"} %arg0, %arg1 : tensor<5xf32>, tensor<3xf32>
-builtin.func @tensor_trace(%arg0 : tensor<5xf32>, %arg1 : tensor<3xf32>) {
+func.func @tensor_trace(%arg0 : tensor<5xf32>, %arg1 : tensor<3xf32>) {
iree_input.tensor.trace "FOOBAR" %arg0, %arg1 : tensor<5xf32>, tensor<3xf32>
return
}
@@ -240,7 +240,7 @@
// CHECK-NEXT: util.initializer.return
// CHECK-NEXT: }
// CHECK: func private @initializer() -> tensor<4xi32>
- builtin.func private @initializer() -> tensor<4xi32>
+ func.func private @initializer() -> tensor<4xi32>
}
// -----
diff --git a/iree/compiler/InputConversion/MHLO/ConvertComplexToReal.cpp b/iree/compiler/InputConversion/MHLO/ConvertComplexToReal.cpp
index 09c2ab4..35bbb18 100644
--- a/iree/compiler/InputConversion/MHLO/ConvertComplexToReal.cpp
+++ b/iree/compiler/InputConversion/MHLO/ConvertComplexToReal.cpp
@@ -227,7 +227,7 @@
ConvertCompareOp(TypeConverter &typeConverter, MLIRContext *context,
mhlo::ComparisonDirection direction)
: OpConversionPattern<CompareOpTy>(typeConverter, context),
- direction(mhlo::stringifyEnum(direction)) {}
+ direction(direction) {}
LogicalResult matchAndRewrite(
CompareOpTy op, typename CompareOpTy::Adaptor adaptor,
@@ -262,7 +262,7 @@
return success();
}
- StringRef direction;
+ mhlo::ComparisonDirection direction;
};
struct ElideComplexPattern : public OpConversionPattern<mhlo::ComplexOp> {
diff --git a/iree/compiler/InputConversion/MHLO/FlattenTuplesInCFG.cpp b/iree/compiler/InputConversion/MHLO/FlattenTuplesInCFG.cpp
index 23eabe9..5d2bddf 100644
--- a/iree/compiler/InputConversion/MHLO/FlattenTuplesInCFG.cpp
+++ b/iree/compiler/InputConversion/MHLO/FlattenTuplesInCFG.cpp
@@ -288,7 +288,7 @@
std::vector<std::pair<FuncOp, FuncOp>> convertedFunctions;
for (auto oldFunction : module.getOps<FuncOp>()) {
- auto oldFunctionType = oldFunction.getType();
+ auto oldFunctionType = oldFunction.getFunctionType();
llvm::SmallVector<Type, 10> newInputTypes;
untupleTypes(oldFunctionType.getInputs(), &newInputTypes);
diff --git a/iree/compiler/InputConversion/MHLO/LegalizeInputTypes.cpp b/iree/compiler/InputConversion/MHLO/LegalizeInputTypes.cpp
index 04264ae..ac40eca 100644
--- a/iree/compiler/InputConversion/MHLO/LegalizeInputTypes.cpp
+++ b/iree/compiler/InputConversion/MHLO/LegalizeInputTypes.cpp
@@ -181,10 +181,10 @@
return success();
}
-static LogicalResult convertFunc(mlir::FuncOp oldFuncOp,
+static LogicalResult convertFunc(mlir::func::FuncOp oldFuncOp,
FlowTypeConverter &typeConverter,
OpBuilder &moduleBuilder) {
- auto oldType = oldFuncOp.getType();
+ auto oldType = oldFuncOp.getFunctionType();
TypeConverter::SignatureConversion signature(oldType.getNumInputs());
for (unsigned i = 0, e = oldType.getNumInputs(); i != e; ++i) {
if (failed(typeConverter.convertSignatureArg(i, oldType.getInput(i),
@@ -224,7 +224,7 @@
for (auto *oldOp : oldOps) {
OpBuilder moduleBuilder(moduleOp);
moduleBuilder.setInsertionPoint(oldOp);
- if (auto oldFuncOp = dyn_cast<mlir::FuncOp>(oldOp)) {
+ if (auto oldFuncOp = dyn_cast<mlir::func::FuncOp>(oldOp)) {
if (failed(convertFunc(oldFuncOp, typeConverter, moduleBuilder))) {
return signalPassFailure();
}
diff --git a/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp b/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp
index ebc4504..3e94508 100644
--- a/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp
+++ b/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp
@@ -178,7 +178,7 @@
LogicalResult matchAndRewrite(
mhlo::FftOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- if (op.fft_type() != "RFFT") {
+ if (op.fft_type() != mhlo::FftType::RFFT) {
return rewriter.notifyMatchFailure(op,
"non RFFT types are supported yet");
}
@@ -222,7 +222,7 @@
LogicalResult matchAndRewrite(
FuncOp srcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- FunctionType srcFuncType = srcOp.getType();
+ FunctionType srcFuncType = srcOp.getFunctionType();
TypeConverter::SignatureConversion signatureConversion(
srcOp.getNumArguments());
@@ -349,13 +349,13 @@
// Functions must have legal types.
target.addDynamicallyLegalOp<FuncOp>([&](FuncOp funcOp) {
- for (Type type : funcOp.getType().getInputs()) {
+ for (Type type : funcOp.getFunctionType().getInputs()) {
if (isIllegalType(type)) return false;
}
- for (Type type : funcOp.getType().getResults()) {
+ for (Type type : funcOp.getFunctionType().getResults()) {
if (isIllegalType(type)) return false;
}
- for (Block &block : funcOp.body()) {
+ for (Block &block : funcOp.getBody()) {
for (Type type : block.getArgumentTypes()) {
if (isIllegalType(type)) return false;
}
diff --git a/iree/compiler/InputConversion/MHLO/PassDetail.h b/iree/compiler/InputConversion/MHLO/PassDetail.h
index f7098ac..097b977 100644
--- a/iree/compiler/InputConversion/MHLO/PassDetail.h
+++ b/iree/compiler/InputConversion/MHLO/PassDetail.h
@@ -7,6 +7,7 @@
#ifndef IREE_COMPILER_INPUTCONVERSION_MHLO_PASSDETAIL_H_
#define IREE_COMPILER_INPUTCONVERSION_MHLO_PASSDETAIL_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
diff --git a/iree/compiler/InputConversion/MHLO/Passes.h b/iree/compiler/InputConversion/MHLO/Passes.h
index a027fc3..9c82ece 100644
--- a/iree/compiler/InputConversion/MHLO/Passes.h
+++ b/iree/compiler/InputConversion/MHLO/Passes.h
@@ -7,6 +7,7 @@
#ifndef IREE_COMPILER_INPUTCONVERSION_MHLO_PASSES_H_
#define IREE_COMPILER_INPUTCONVERSION_MHLO_PASSES_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
@@ -52,15 +53,16 @@
std::unique_ptr<OperationPass<ModuleOp>> createLegalizeInputTypesPass();
/// Creates XLA-HLO to Linalg on tensors transformation pass.
-std::unique_ptr<OperationPass<FuncOp>> createMHLOToLinalgOnTensorsPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createMHLOToLinalgOnTensorsPass();
/// Creates XLA-HLO to LinalgExt pass.
-std::unique_ptr<OperationPass<FuncOp>> createConvertMHLOToLinalgExtPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createConvertMHLOToLinalgExtPass();
/// Creates XLA-HLO preprocessing transformation pass. In this pass we should
/// have all mhlo -> mhlo transformations that are shared between all
/// backends.
-std::unique_ptr<OperationPass<FuncOp>> createMHLOToMHLOPreprocessingPass();
+std::unique_ptr<OperationPass<func::FuncOp>>
+createMHLOToMHLOPreprocessingPass();
// Verifies a module being input to the core compiler pipeline only contains
// IR structures that are supported at that level.
@@ -71,7 +73,8 @@
// Test passes
//------------------------------------------------------------------------------
-std::unique_ptr<OperationPass<FuncOp>> createTestMHLOConvertComplexToRealPass();
+std::unique_ptr<OperationPass<func::FuncOp>>
+createTestMHLOConvertComplexToRealPass();
//===----------------------------------------------------------------------===//
// Register all Passes
diff --git a/iree/compiler/InputConversion/MHLO/Passes.td b/iree/compiler/InputConversion/MHLO/Passes.td
index 4cf16ae..3960c0d 100644
--- a/iree/compiler/InputConversion/MHLO/Passes.td
+++ b/iree/compiler/InputConversion/MHLO/Passes.td
@@ -10,13 +10,13 @@
include "mlir/Pass/PassBase.td"
def ConvertMHLOToLinalgOnTensors :
- Pass<"iree-mhlo-to-linalg-on-tensors", "FuncOp"> {
+ Pass<"iree-mhlo-to-linalg-on-tensors", "func::FuncOp"> {
let summary = "Convert from XLA-HLO ops to Linalg ops on tensors";
let constructor = "mlir::iree_compiler::MHLO::createMHLOToLinalgOnTensorsPass()";
}
def ConvertMHLOToLinalgExt
- : Pass<"iree-mhlo-to-linalg-ext", "FuncOp"> {
+ : Pass<"iree-mhlo-to-linalg-ext", "func::FuncOp"> {
let summary =
"Convert from XLA-HLO ops to LinalgExt ops and distribute to Flow ops";
let constructor =
@@ -36,7 +36,7 @@
}
def MHLOToMHLOPreprocessing :
- Pass<"iree-mhlo-to-mhlo-preprocessing", "FuncOp"> {
+ Pass<"iree-mhlo-to-mhlo-preprocessing", "func::FuncOp"> {
let summary = "Apply mhlo to mhlo transformations for some mhlo ops";
let constructor = "mlir::iree_compiler::MHLO::createMHLOToMHLOPreprocessingPass()";
let options = [
@@ -56,7 +56,7 @@
//------------------------------------------------------------------------------
def TestMHLOConvertComplexToReal :
- Pass<"iree-test-mhlo-convert-complex-to-real", "FuncOp"> {
+ Pass<"iree-test-mhlo-convert-complex-to-real", "func::FuncOp"> {
let summary = "Test pass that does an MHLO->MHLO conversion of just complex arithmetic ops.";
let constructor = "mlir::iree_compiler::MHLO::createTestMHLOConvertComplexToRealPass()";
}
diff --git a/iree/compiler/InputConversion/MHLO/test/broadcasting.mlir b/iree/compiler/InputConversion/MHLO/test/broadcasting.mlir
index 8999445..e3b8bc9 100644
--- a/iree/compiler/InputConversion/MHLO/test/broadcasting.mlir
+++ b/iree/compiler/InputConversion/MHLO/test/broadcasting.mlir
@@ -79,7 +79,7 @@
// NOTE: compare is unique because of the element type switch. The pattern
// will fail or the verifier will catch it if wrong.
// CHECK-NOT: mhlo.compare
- %0 = chlo.broadcast_compare %arg0, %arg1 {comparison_direction = "EQ"} : (tensor<?xf32>, tensor<?x?xf32>) -> tensor<?x?xi1>
+ %0 = chlo.broadcast_compare %arg0, %arg1 {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<?xf32>, tensor<?x?xf32>) -> tensor<?x?xi1>
return %0 : tensor<?x?xi1>
}
@@ -296,7 +296,7 @@
// CHECK-LABEL: @compareWithoutBroadcast
func @compareWithoutBroadcast(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xi1> {
// CHECK-NOT: mhlo.compare
- %0 = chlo.broadcast_compare %arg0, %arg1 {comparison_direction = "EQ"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
+ %0 = chlo.broadcast_compare %arg0, %arg1 {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
return %0 : tensor<4xi1>
}
diff --git a/iree/compiler/InputConversion/MHLO/test/convert_complex_to_real.mlir b/iree/compiler/InputConversion/MHLO/test/convert_complex_to_real.mlir
index 68ae132..b3c4d4c 100644
--- a/iree/compiler/InputConversion/MHLO/test/convert_complex_to_real.mlir
+++ b/iree/compiler/InputConversion/MHLO/test/convert_complex_to_real.mlir
@@ -123,10 +123,10 @@
%arg2 : tensor<2xf32>, %arg3 : tensor<2xf32>) -> (tensor<2xi1>) {
%lhs = "mhlo.complex"(%arg0, %arg1) : (tensor<2xf32>, tensor<2xf32>) -> (tensor<2xcomplex<f32>>)
%rhs = "mhlo.complex"(%arg2, %arg3) : (tensor<2xf32>, tensor<2xf32>) -> (tensor<2xcomplex<f32>>)
- // CHECK-DAG: %[[OUTR:.+]] = chlo.broadcast_compare %arg0, %arg2 {comparison_direction = "EQ"}
- // CHECK-DAG: %[[OUTI:.+]] = chlo.broadcast_compare %arg1, %arg3 {comparison_direction = "EQ"}
+ // CHECK-DAG: %[[OUTR:.+]] = chlo.broadcast_compare %arg0, %arg2 {comparison_direction = #mhlo<"comparison_direction EQ">}
+ // CHECK-DAG: %[[OUTI:.+]] = chlo.broadcast_compare %arg1, %arg3 {comparison_direction = #mhlo<"comparison_direction EQ">}
// CHECK-DAG: %[[OUT:.+]] = mhlo.and %[[OUTR]], %[[OUTI]]
- %0 = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<2xcomplex<f32>>, tensor<2xcomplex<f32>>) -> tensor<2xi1>
+ %0 = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<2xcomplex<f32>>, tensor<2xcomplex<f32>>) -> tensor<2xi1>
// CHECK: return %[[OUT]]
return %0 : tensor<2xi1>
@@ -137,10 +137,10 @@
%arg2 : tensor<2xf32>, %arg3 : tensor<2xf32>) -> (tensor<2xi1>) {
%lhs = "mhlo.complex"(%arg0, %arg1) : (tensor<2xf32>, tensor<2xf32>) -> (tensor<2xcomplex<f32>>)
%rhs = "mhlo.complex"(%arg2, %arg3) : (tensor<2xf32>, tensor<2xf32>) -> (tensor<2xcomplex<f32>>)
- // CHECK-DAG: %[[OUTR:.+]] = chlo.broadcast_compare %arg0, %arg2 {comparison_direction = "NE"}
- // CHECK-DAG: %[[OUTI:.+]] = chlo.broadcast_compare %arg1, %arg3 {comparison_direction = "NE"}
+ // CHECK-DAG: %[[OUTR:.+]] = chlo.broadcast_compare %arg0, %arg2 {comparison_direction = #mhlo<"comparison_direction NE">}
+ // CHECK-DAG: %[[OUTI:.+]] = chlo.broadcast_compare %arg1, %arg3 {comparison_direction = #mhlo<"comparison_direction NE">}
// CHECK-DAG: %[[OUT:.+]] = mhlo.or %[[OUTR]], %[[OUTI]]
- %0 = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "NE"} : (tensor<2xcomplex<f32>>, tensor<2xcomplex<f32>>) -> tensor<2xi1>
+ %0 = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction NE">} : (tensor<2xcomplex<f32>>, tensor<2xcomplex<f32>>) -> tensor<2xi1>
// CHECK: return %[[OUT]]
return %0 : tensor<2xi1>
diff --git a/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir b/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir
index bb2349e..8600141 100644
--- a/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir
+++ b/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir
@@ -5,7 +5,7 @@
func @sort_1d(%arg0: tensor<128xi32>) -> (tensor<128xi32>) {
%0 = "mhlo.sort"(%arg0) ( {
^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>):
- %1 = "mhlo.compare"(%arg2, %arg3) {comparison_direction = "GT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %1 = "mhlo.compare"(%arg2, %arg3) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
"mhlo.return"(%1) : (tensor<i1>) -> ()
}) {dimension = 0 : i64, is_stable = false} : (tensor<128xi32>) -> (tensor<128xi32>)
return %0 : tensor<128xi32>
@@ -26,7 +26,7 @@
func @sort_1d_ui(%arg0: tensor<128xui32>) -> (tensor<128xui32>) {
%0 = "mhlo.sort"(%arg0) ( {
^bb0(%arg2: tensor<ui32>, %arg3: tensor<ui32>): // no predecessors
- %1 = "mhlo.compare"(%arg2, %arg3) {comparison_direction = "GT"} : (tensor<ui32>, tensor<ui32>) -> tensor<i1>
+ %1 = "mhlo.compare"(%arg2, %arg3) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<ui32>, tensor<ui32>) -> tensor<i1>
"mhlo.return"(%1) : (tensor<i1>) -> ()
}) {dimension = 0 : i64, is_stable = false} : (tensor<128xui32>) -> (tensor<128xui32>)
return %0 : tensor<128xui32>
@@ -50,7 +50,7 @@
%0 = mhlo.constant dense<0> : tensor<i32>
%1 = "mhlo.sort"(%arg0) ( {
^bb0(%arg1: tensor<i32>, %arg3: tensor<i32>):
- %2 = "mhlo.compare"(%arg1, %0) {comparison_direction = "LT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %2 = "mhlo.compare"(%arg1, %0) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
"mhlo.return"(%2) : (tensor<i1>) -> ()
}) {dimension = 1 : i64, is_stable = true} : (tensor<1x10xi32>) -> tensor<1x10xi32>
return %1 : tensor<1x10xi32>
@@ -72,7 +72,7 @@
func @sort_argument_capture(%arg0: tensor<1x10xi32>, %arg1 : tensor<i32>) -> tensor<1x10xi32> {
%1 = "mhlo.sort"(%arg0) ( {
^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>):
- %2 = "mhlo.compare"(%arg2, %arg1) {comparison_direction = "LT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %2 = "mhlo.compare"(%arg2, %arg1) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
"mhlo.return"(%2) : (tensor<i1>) -> ()
}) {dimension = 1 : i64, is_stable = true} : (tensor<1x10xi32>) -> tensor<1x10xi32>
return %1 : tensor<1x10xi32>
@@ -95,7 +95,7 @@
func @sort_2d(%arg0: tensor<16x32xi32>) -> (tensor<16x32xi32>) {
%0 = "mhlo.sort"(%arg0) ( {
^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>):
- %1 = "mhlo.compare"(%arg2, %arg3) {comparison_direction = "GT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %1 = "mhlo.compare"(%arg2, %arg3) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
"mhlo.return"(%1) : (tensor<i1>) -> ()
}) {dimension = 0 : i64, is_stable = false} : (tensor<16x32xi32>) -> (tensor<16x32xi32>)
return %0 : tensor<16x32xi32>
@@ -118,7 +118,7 @@
^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):
%2 = "mhlo.bitcast_convert"(%arg1) : (tensor<f32>) -> tensor<ui32>
%3 = "mhlo.bitcast_convert"(%arg2) : (tensor<f32>) -> tensor<ui32>
- %4 = "mhlo.compare"(%2, %3) {comparison_direction = "LT"} : (tensor<ui32>, tensor<ui32>) -> tensor<i1>
+ %4 = "mhlo.compare"(%2, %3) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<ui32>, tensor<ui32>) -> tensor<i1>
"mhlo.return"(%4) : (tensor<i1>) -> ()
}) {dimension = 1 : i64, is_stable = true} : (tensor<1x5xf32>) -> tensor<1x5xf32>
return %1 : tensor<1x5xf32>
@@ -144,7 +144,7 @@
%1 = "mhlo.sort"(%arg0) ( {
^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):
%2 = "mhlo.bitcast_convert"(%arg1) : (tensor<f32>) -> tensor<ui32>
- %3 = "mhlo.compare"(%2, %ui32) {comparison_direction = "LT"} : (tensor<ui32>, tensor<ui32>) -> tensor<i1>
+ %3 = "mhlo.compare"(%2, %ui32) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<ui32>, tensor<ui32>) -> tensor<i1>
"mhlo.return"(%3) : (tensor<i1>) -> ()
}) {dimension = 1 : i64, is_stable = true} : (tensor<1x5xf32>) -> tensor<1x5xf32>
return %1 : tensor<1x5xf32>
@@ -176,7 +176,7 @@
%3 = mhlo.add %2, %arg1 : tensor<complex<f32>>
%4 = "mhlo.real"(%3) : (tensor<complex<f32>>) -> tensor<f32>
%5 = "mhlo.imag"(%3) : (tensor<complex<f32>>) -> tensor<f32>
- %6 = "mhlo.compare"(%4, %5) {comparison_direction = "LT"} : (tensor<f32>, tensor<f32>) -> tensor<i1>
+ %6 = "mhlo.compare"(%4, %5) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<f32>, tensor<f32>) -> tensor<i1>
"mhlo.return"(%6) : (tensor<i1>) -> ()
}) {dimension = 1 : i64, is_stable = true} : (tensor<1x5xf32>) -> tensor<1x5xf32>
return %1 : tensor<1x5xf32>
@@ -200,7 +200,7 @@
func @topk(%arg0: tensor<128xi32>, %arg1: tensor<128xi32>) -> (tensor<128xi32>) {
%0:2 = "mhlo.sort"(%arg0, %arg1) ( {
^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>, %arg4: tensor<i32>, %arg5: tensor<i32>):
- %1 = "mhlo.compare"(%arg2, %arg3) {comparison_direction = "GT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %1 = "mhlo.compare"(%arg2, %arg3) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
"mhlo.return"(%1) : (tensor<i1>) -> ()
}) {dimension = 0 : i64, is_stable = false} : (tensor<128xi32>, tensor<128xi32>) -> (tensor<128xi32>, tensor<128xi32>)
return %0#0 : tensor<128xi32>
@@ -413,7 +413,7 @@
func @rfft_1d(%input: tensor<8xf32>) -> (tensor<5xf32>, tensor<5xf32>) {
%0 = "mhlo.fft"(%input) {
- fft_length = dense<8> : tensor<1xi64>, fft_type = "RFFT"
+ fft_length = dense<8> : tensor<1xi64>, fft_type = #mhlo<"fft_type RFFT">
} : (tensor<8xf32>) -> tensor<5xcomplex<f32>>
%1 = "mhlo.real"(%0) : (tensor<5xcomplex<f32>>) -> tensor<5xf32>
%2 = "mhlo.imag"(%0) : (tensor<5xcomplex<f32>>) -> tensor<5xf32>
@@ -460,7 +460,7 @@
func @rfft_2d(%input: tensor<4x8xf32>) -> (tensor<4x5xf32>, tensor<4x5xf32>) {
%0 = "mhlo.fft"(%input) {
- fft_length = dense<8> : tensor<1xi64>, fft_type = "RFFT"
+ fft_length = dense<8> : tensor<1xi64>, fft_type = #mhlo<"fft_type RFFT">
} : (tensor<4x8xf32>) -> tensor<4x5xcomplex<f32>>
%1 = "mhlo.real"(%0) : (tensor<4x5xcomplex<f32>>) -> tensor<4x5xf32>
%2 = "mhlo.imag"(%0) : (tensor<4x5xcomplex<f32>>) -> tensor<4x5xf32>
diff --git a/iree/compiler/InputConversion/MHLO/test/fft.mlir b/iree/compiler/InputConversion/MHLO/test/fft.mlir
index 367269b..a09bee1 100644
--- a/iree/compiler/InputConversion/MHLO/test/fft.mlir
+++ b/iree/compiler/InputConversion/MHLO/test/fft.mlir
@@ -2,7 +2,7 @@
func @rfft_1d(%input: tensor<32xf32>) -> (tensor<17xf32>, tensor<17xf32>) {
%0 = "mhlo.fft"(%input) {
- fft_length = dense<32> : tensor<1xi64>, fft_type = "RFFT"
+ fft_length = dense<32> : tensor<1xi64>, fft_type = #mhlo<"fft_type RFFT">
} : (tensor<32xf32>) -> tensor<17xcomplex<f32>>
%1 = "mhlo.real"(%0) : (tensor<17xcomplex<f32>>) -> tensor<17xf32>
%2 = "mhlo.imag"(%0) : (tensor<17xcomplex<f32>>) -> tensor<17xf32>
@@ -33,7 +33,7 @@
func @rfft_2d(%input: tensor<1x32xf32>) -> (tensor<1x17xf32>, tensor<1x17xf32>) {
%0 = "mhlo.fft"(%input) {
- fft_length = dense<32> : tensor<1xi64>, fft_type = "RFFT"
+ fft_length = dense<32> : tensor<1xi64>, fft_type = #mhlo<"fft_type RFFT">
} : (tensor<1x32xf32>) -> tensor<1x17xcomplex<f32>>
%1 = "mhlo.real"(%0) : (tensor<1x17xcomplex<f32>>) -> tensor<1x17xf32>
%2 = "mhlo.imag"(%0) : (tensor<1x17xcomplex<f32>>) -> tensor<1x17xf32>
diff --git a/iree/compiler/InputConversion/MHLO/test/legalize_input_types.mlir b/iree/compiler/InputConversion/MHLO/test/legalize_input_types.mlir
index ed95270..13c3c1a 100644
--- a/iree/compiler/InputConversion/MHLO/test/legalize_input_types.mlir
+++ b/iree/compiler/InputConversion/MHLO/test/legalize_input_types.mlir
@@ -88,7 +88,7 @@
}
// -----
-// expected-error@+1 {{'builtin.func' op unable to legalize type of input 0}}
+// expected-error@+1 {{'func.func' op unable to legalize type of input 0}}
func @tensorUnrankedArg(%arg0 : tensor<*xi64>) -> tensor<*xi64> {
return %arg0 : tensor<*xi64>
}
@@ -106,14 +106,14 @@
// CHECK-LABEL: func @compareI64
// CHECK-SAME: (%arg0: tensor<i32>, %arg1: tensor<i32>) -> (i1, tensor<i32>)
func @compareI64(%arg0 : tensor<i64>, %arg1 : tensor<i64>) -> (i1, tensor<i64>) {
- // CHECK-NEXT: %0 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "LT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ // CHECK-NEXT: %0 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
// CHECK-NEXT: %1 = tensor.extract %0[] : tensor<i1>
// CHECK-NEXT: cf.cond_br %1, ^bb1(%1, %arg0 : i1, tensor<i32>), ^bb2(%1, %arg1 : i1, tensor<i32>)
// CHECK-NEXT: ^bb1(%2: i1, %3: tensor<i32>): // pred: ^bb0
// CHECK-NEXT: return %2, %3 : i1, tensor<i32>
// CHECK-NEXT: ^bb2(%4: i1, %5: tensor<i32>): // pred: ^bb0
// CHECK-NEXT: return %4, %5 : i1, tensor<i32>
- %0 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "LT"} : (tensor<i64>, tensor<i64>) -> tensor<i1>
+ %0 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i64>, tensor<i64>) -> tensor<i1>
%1 = tensor.extract %0[] : tensor<i1>
cf.cond_br %1, ^bb1(%1, %arg0 : i1, tensor<i64>), ^bb2(%1, %arg1 : i1, tensor<i64>)
^bb1(%2 : i1, %3 : tensor<i64>):
@@ -161,7 +161,7 @@
// CHECK: util.global.load @[[VAR]]
// CHECK: util.global.store %{{.+}}, @[[VAR]]
util.global mutable @readwritevar = dense<0> : tensor<i64>
-builtin.func @foo(%arg0 : tensor<i64>) {
+func.func @foo(%arg0 : tensor<i64>) {
%0 = util.global.load @readwritevar : tensor<i64>
%1 = chlo.broadcast_add %0, %arg0 : (tensor<i64>, tensor<i64>) -> tensor<i64>
util.global.store %1, @readwritevar : tensor<i64>
diff --git a/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_canoncalize_dot_general.mlir b/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_canoncalize_dot_general.mlir
index fc51c87..6203cae 100644
--- a/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_canoncalize_dot_general.mlir
+++ b/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_canoncalize_dot_general.mlir
@@ -8,7 +8,7 @@
rhs_batching_dimensions = [],
rhs_contracting_dimensions = [0, 1],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<1x32x128x4xf32>, tensor<128x4x8x64xf32>) -> tensor<1x32x8x64xf32>
return %0 : tensor<1x32x8x64xf32>
}
@@ -30,7 +30,7 @@
rhs_batching_dimensions = [0, 1],
rhs_contracting_dimensions = [2],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<1x8x32x64xf32>, tensor<1x8x64x32xf32>) -> tensor<1x8x32x32xf32>
return %0 : tensor<1x8x32x32xf32>
}
@@ -51,7 +51,7 @@
rhs_batching_dimensions = [0, 1],
rhs_contracting_dimensions = [2],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<1x8x64x32xf32>, tensor<1x8x64x32xf32>) -> tensor<1x8x32x32xf32>
return %0 : tensor<1x8x32x32xf32>
}
@@ -72,7 +72,7 @@
rhs_batching_dimensions = [0, 1],
rhs_contracting_dimensions = [3],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<1x8x32x64xf32>, tensor<1x8x32x64xf32>) -> tensor<1x8x32x32xf32>
return %0 : tensor<1x8x32x32xf32>
}
@@ -95,7 +95,7 @@
rhs_batching_dimensions = [0, 1],
rhs_contracting_dimensions = [3],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<1x8x64x32xf32>, tensor<1x8x32x64xf32>) -> tensor<1x8x32x32xf32>
return %0 : tensor<1x8x32x32xf32>
}
@@ -118,7 +118,7 @@
rhs_batching_dimensions = [0, 2],
rhs_contracting_dimensions = [3],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<1x1x8x64xf32>, tensor<1x512x8x64xf32>) -> tensor<1x8x1x512xf32>
return %0 : tensor<1x8x1x512xf32>
}
diff --git a/iree/test/e2e/models/collatz.mlir b/iree/test/e2e/models/collatz.mlir
index 5de052d..12b7394 100644
--- a/iree/test/e2e/models/collatz.mlir
+++ b/iree/test/e2e/models/collatz.mlir
@@ -10,20 +10,20 @@
%3 = mhlo.constant dense<0.0> : tensor<f32>
cf.br ^bb1(%3, %arg0 : tensor<f32>, tensor<f32>)
^bb1(%4: tensor<f32>, %5: tensor<f32>):
- %6 = "mhlo.compare"(%5, %0) {comparison_direction = "GT"} : (tensor<f32>, tensor<f32>) -> tensor<i1>
+ %6 = "mhlo.compare"(%5, %0) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<f32>, tensor<f32>) -> tensor<i1>
%7 = tensor.extract %6[] : tensor<i1>
cf.cond_br %7, ^bb2(%4, %5 : tensor<f32>, tensor<f32>), ^bb6(%4 : tensor<f32>)
^bb2(%8: tensor<f32>, %9: tensor<f32>):
%10 = mhlo.add %8, %0 : tensor<f32>
%11 = mhlo.remainder %9, %2 : tensor<f32>
- %12 = "mhlo.compare"(%11, %3) {comparison_direction = "NE"} : (tensor<f32>, tensor<f32>) -> tensor<i1>
- %13 = "mhlo.compare"(%2, %3) {comparison_direction = "LT"} : (tensor<f32>, tensor<f32>) -> tensor<i1>
- %14 = "mhlo.compare"(%11, %3) {comparison_direction = "LT"} : (tensor<f32>, tensor<f32>) -> tensor<i1>
- %15 = "mhlo.compare"(%13, %14) {comparison_direction = "NE"} : (tensor<i1>, tensor<i1>) -> tensor<i1>
+ %12 = "mhlo.compare"(%11, %3) {comparison_direction = #mhlo<"comparison_direction NE">} : (tensor<f32>, tensor<f32>) -> tensor<i1>
+ %13 = "mhlo.compare"(%2, %3) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<f32>, tensor<f32>) -> tensor<i1>
+ %14 = "mhlo.compare"(%11, %3) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<f32>, tensor<f32>) -> tensor<i1>
+ %15 = "mhlo.compare"(%13, %14) {comparison_direction = #mhlo<"comparison_direction NE">} : (tensor<i1>, tensor<i1>) -> tensor<i1>
%16 = mhlo.and %12, %15 : tensor<i1>
%17 = mhlo.add %11, %2 : tensor<f32>
%18 = "mhlo.select"(%16, %17, %11) : (tensor<i1>, tensor<f32>, tensor<f32>) -> tensor<f32>
- %19 = "mhlo.compare"(%18, %3) {comparison_direction = "GT"} : (tensor<f32>, tensor<f32>) -> tensor<i1>
+ %19 = "mhlo.compare"(%18, %3) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<f32>, tensor<f32>) -> tensor<i1>
%20 = tensor.extract %19[] : tensor<i1>
cf.cond_br %20, ^bb3, ^bb4
^bb3: // pred: ^bb2
diff --git a/iree/test/e2e/models/fragment_000.mlir b/iree/test/e2e/models/fragment_000.mlir
index b39037a..f941e5f 100644
--- a/iree/test/e2e/models/fragment_000.mlir
+++ b/iree/test/e2e/models/fragment_000.mlir
@@ -13,7 +13,7 @@
%6 = "mhlo.broadcast_in_dim"(%2) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.9"} : (tensor<f32>) -> tensor<5x1x5xf32>
%7 = mhlo.multiply %5, %6 : tensor<5x1x5xf32>
%8 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.47"} : (tensor<f32>) -> tensor<5x1x5xf32>
- %9 = "mhlo.compare"(%7, %8) {comparison_direction = "GT"} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
+ %9 = "mhlo.compare"(%7, %8) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
%10 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.11"} : (tensor<f32>) -> tensor<5x1x5xf32>
%11 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.67"} : (tensor<f32>) -> tensor<5x5xf32>
%12 = "mhlo.broadcast_in_dim"(%4) {broadcast_dimensions = dense<1> : tensor<1xi64>, name = "broadcast.64"} : (tensor<5xf32>) -> tensor<5x5xf32>
diff --git a/iree/test/e2e/models/fullyconnected.mlir b/iree/test/e2e/models/fullyconnected.mlir
index 36b1376..b3ba4c8 100644
--- a/iree/test/e2e/models/fullyconnected.mlir
+++ b/iree/test/e2e/models/fullyconnected.mlir
@@ -13,7 +13,7 @@
%6 = mhlo.multiply %4, %5 {name = "multiply.57"} : tensor<5x1x5xf32>
%cst_0 = arith.constant {name = "constant.58"} dense<0.000000e+00> : tensor<f32>
%7 = "mhlo.broadcast_in_dim"(%cst_0) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.59"} : (tensor<f32>) -> tensor<5x1x5xf32>
- %8 = "mhlo.compare"(%6, %7) {comparison_direction = "GT", name = "compare.60"} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
+ %8 = "mhlo.compare"(%6, %7) {comparison_direction = #mhlo<"comparison_direction GT">, name = "compare.60"} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
%cst_1 = arith.constant {name = "constant.24"} dense<0.000000e+00> : tensor<f32>
%9 = "mhlo.broadcast_in_dim"(%cst_1) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.25"} : (tensor<f32>) -> tensor<5x1x5xf32>
%cst_2 = arith.constant {name = "constant.90"} dense<0.000000e+00> : tensor<f32>
@@ -25,7 +25,7 @@
%14 = mhlo.multiply %12, %13 {name = "multiply.51"} : tensor<5x1x5xf32>
%cst_4 = arith.constant {name = "constant.52"} dense<0.000000e+00> : tensor<f32>
%15 = "mhlo.broadcast_in_dim"(%cst_4) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.53"} : (tensor<f32>) -> tensor<5x1x5xf32>
- %16 = "mhlo.compare"(%14, %15) {comparison_direction = "GT", name = "compare.54"} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
+ %16 = "mhlo.compare"(%14, %15) {comparison_direction = #mhlo<"comparison_direction GT">, name = "compare.54"} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
%cst_5 = arith.constant {name = "constant.17"} dense<0.000000e+00> : tensor<f32>
%17 = "mhlo.broadcast_in_dim"(%cst_5) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.18"} : (tensor<f32>) -> tensor<5x1x5xf32>
%cst_6 = arith.constant {name = "constant.78"} dense<0.000000e+00> : tensor<f32>
@@ -37,7 +37,7 @@
%22 = mhlo.multiply %20, %21 {name = "multiply.45"} : tensor<5x1x5xf32>
%cst_8 = arith.constant {name = "constant.46"} dense<0.000000e+00> : tensor<f32>
%23 = "mhlo.broadcast_in_dim"(%cst_8) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.47"} : (tensor<f32>) -> tensor<5x1x5xf32>
- %24 = "mhlo.compare"(%22, %23) {comparison_direction = "GT", name = "compare.48"} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
+ %24 = "mhlo.compare"(%22, %23) {comparison_direction = #mhlo<"comparison_direction GT">, name = "compare.48"} : (tensor<5x1x5xf32>, tensor<5x1x5xf32>) -> tensor<5x1x5xi1>
%cst_9 = arith.constant {name = "constant.10"} dense<0.000000e+00> : tensor<f32>
%25 = "mhlo.broadcast_in_dim"(%cst_9) {broadcast_dimensions = dense<[]> : tensor<0xi64>, name = "broadcast.11"} : (tensor<f32>) -> tensor<5x1x5xf32>
%cst_10 = arith.constant {name = "constant.66"} dense<0.000000e+00> : tensor<f32>
@@ -48,7 +48,7 @@
%30 = "mhlo.transpose"(%29) {name = "transpose.39", permutation = dense<[1, 0, 2]> : tensor<3xi64>} : (tensor<1x5x3xf32>) -> tensor<5x1x3xf32>
%31 = "mhlo.reshape"(%30) {name = "reshape.40"} : (tensor<5x1x3xf32>) -> tensor<5x3xf32>
%cst_11 = arith.constant {name = "constant.61"} dense<[[0.706495285, -0.567672312, 0.483717591, 0.522725761, 0.7563259], [-0.0899272263, -0.283501834, -0.350822538, -0.351515919, -0.337136656], [-0.451804549, 0.372324884, -0.620518147, 0.235451385, 0.851095855]]> : tensor<3x5xf32>
- %32 = "mhlo.dot"(%31, %cst_11) {name = "dot.62", precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<5x3xf32>, tensor<3x5xf32>) -> tensor<5x5xf32>
+ %32 = "mhlo.dot"(%31, %cst_11) {name = "dot.62", precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]} : (tensor<5x3xf32>, tensor<3x5xf32>) -> tensor<5x5xf32>
%cst_12 = arith.constant {name = "constant.63"} dense<[0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00]> : tensor<5xf32>
%33 = "mhlo.broadcast_in_dim"(%cst_12) {broadcast_dimensions = dense<[1]> : tensor<1xi64>, name = "broadcast.64"} : (tensor<5xf32>) -> tensor<5x5xf32>
%34 = mhlo.add %32, %33 {name = "add.65"} : tensor<5x5xf32>
@@ -58,7 +58,7 @@
%38 = "mhlo.copy"(%37) {name = "copy.4"} : (tensor<5x1x5xf32>) -> tensor<5x1x5xf32>
%39 = "mhlo.reshape"(%38) {name = "reshape.72"} : (tensor<5x1x5xf32>) -> tensor<5x5xf32>
%cst_13 = arith.constant {name = "constant.73"} dense<[[-0.0118641369, -3.785000e-02, 0.489048243, 0.321015775, -0.702280283], [-0.280262798, -0.724645615, -0.00332254497, 0.392334729, 0.619746447], [-0.113318317, -0.180415511, -0.146743968, 0.250408649, -0.442881733], [0.115600757, 0.703136146, -0.00812680274, -0.225454301, -0.0835619792], [-0.136745885, -6.298570e-01, 0.43629986, -0.689790308, 0.230725273]]> : tensor<5x5xf32>
- %40 = "mhlo.dot"(%39, %cst_13) {name = "dot.74", precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<5x5xf32>, tensor<5x5xf32>) -> tensor<5x5xf32>
+ %40 = "mhlo.dot"(%39, %cst_13) {name = "dot.74", precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]} : (tensor<5x5xf32>, tensor<5x5xf32>) -> tensor<5x5xf32>
%cst_14 = arith.constant {name = "constant.75"} dense<[0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00]> : tensor<5xf32>
%41 = "mhlo.broadcast_in_dim"(%cst_14) {broadcast_dimensions = dense<[1]> : tensor<1xi64>, name = "broadcast.76"} : (tensor<5xf32>) -> tensor<5x5xf32>
%42 = mhlo.add %40, %41 {name = "add.77"} : tensor<5x5xf32>
@@ -68,7 +68,7 @@
%46 = "mhlo.copy"(%45) {name = "copy.5"} : (tensor<5x1x5xf32>) -> tensor<5x1x5xf32>
%47 = "mhlo.reshape"(%46) {name = "reshape.84"} : (tensor<5x1x5xf32>) -> tensor<5x5xf32>
%cst_15 = arith.constant {name = "constant.85"} dense<[[-0.136191264, -0.0401721969, 0.38497138, -5.850760e-01, 0.370910525], [-0.391011149, 0.0266356133, 0.309115469, -0.205079094, -0.559861302], [0.497760415, 0.689488232, 0.0759292394, -0.33134672, -0.237128958], [-0.53243047, 0.476418108, -0.371978909, 0.283265263, 0.63842845], [0.101761498, -0.218626946, 0.475128263, 0.042601984, 0.0988005772]]> : tensor<5x5xf32>
- %48 = "mhlo.dot"(%47, %cst_15) {name = "dot.86", precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<5x5xf32>, tensor<5x5xf32>) -> tensor<5x5xf32>
+ %48 = "mhlo.dot"(%47, %cst_15) {name = "dot.86", precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]} : (tensor<5x5xf32>, tensor<5x5xf32>) -> tensor<5x5xf32>
%cst_16 = arith.constant {name = "constant.87"} dense<[0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00]> : tensor<5xf32>
%49 = "mhlo.broadcast_in_dim"(%cst_16) {broadcast_dimensions = dense<[1]> : tensor<1xi64>, name = "broadcast.88"} : (tensor<5xf32>) -> tensor<5x5xf32>
%50 = mhlo.add %48, %49 {name = "add.89"} : tensor<5x5xf32>
diff --git a/iree/test/e2e/models/unidirectional_lstm.mlir b/iree/test/e2e/models/unidirectional_lstm.mlir
index 45ac605..bae3859 100644
--- a/iree/test/e2e/models/unidirectional_lstm.mlir
+++ b/iree/test/e2e/models/unidirectional_lstm.mlir
@@ -13,7 +13,7 @@
// some calls from @main and the call graphs of the removed callees.
func private @ForwardLoopCond_gFAnjWGSoLs__.167(%arg0: tensor<i64>, %arg1: tensor<i64>, %arg2: tensor<40xf32>, %arg3: tensor<i64>, %arg4: tensor<74x40xf32>, %arg5: tensor<i64>, %arg6: tensor<1x10xf32>, %arg7: tensor<1x10xf32>, %arg8: tensor<5x1x64xf32>, %arg9: tensor<5x1x1xf32>, %arg10: tensor<5x1x1xf32>, %arg11: tensor<5xi64>, %arg12: tensor<5x1x10xf32>, %arg13: tensor<5x1x10xf32>) -> tensor<i1> {
- %0 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "LT"} : (tensor<i64>, tensor<i64>) -> tensor<i1>
+ %0 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i64>, tensor<i64>) -> tensor<i1>
return %0 : tensor<i1>
}
func private @Forward_o16DF3vQKaI__disable_call_shape_inference_true_.189(%arg0: tensor<1x10xf32>, %arg1: tensor<1x10xf32>, %arg2: tensor<5x1x64xf32>, %arg3: tensor<5x1x1xf32>, %arg4: tensor<5x1x1xf32>) -> (tensor<i64>, tensor<5xi64>, tensor<5x1x10xf32>, tensor<5x1x10xf32>, tensor<i64>, tensor<1x10xf32>, tensor<1x10xf32>) {
@@ -39,7 +39,7 @@
%115 = mhlo.minimum %arg5, %arg6 : tensor<f32>
"mhlo.return"(%115) : (tensor<f32>) -> ()
}) {dimensions = dense<[1, 2]> : tensor<2xi64>} : (tensor<5x1x1xf32>, tensor<f32>) -> tensor<5xf32>
- %10 = "mhlo.compare"(%9, %0) {comparison_direction = "EQ"} : (tensor<5xf32>, tensor<5xf32>) -> tensor<5xi1>
+ %10 = "mhlo.compare"(%9, %0) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<5xf32>, tensor<5xf32>) -> tensor<5xi1>
%11 = "mhlo.convert"(%10) : (tensor<5xi1>) -> tensor<5xi32>
%12 = mhlo.multiply %11, %cst_0 : tensor<5xi32>
%13 = "mhlo.reduce"(%12, %cst_1) ( {
@@ -48,9 +48,9 @@
"mhlo.return"(%115) : (tensor<i32>) -> ()
}) {dimensions = dense<0> : tensor<1xi64>} : (tensor<5xi32>, tensor<i32>) -> tensor<i32>
%14 = mhlo.subtract %cst_2, %13 : tensor<i32>
- %15 = "mhlo.compare"(%14, %cst_2) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %15 = "mhlo.compare"(%14, %cst_2) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
%16 = "mhlo.reverse"(%9) {dimensions = dense<0> : tensor<1xi64>} : (tensor<5xf32>) -> tensor<5xf32>
- %17 = "mhlo.compare"(%16, %0) {comparison_direction = "EQ"} : (tensor<5xf32>, tensor<5xf32>) -> tensor<5xi1>
+ %17 = "mhlo.compare"(%16, %0) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<5xf32>, tensor<5xf32>) -> tensor<5xi1>
%18 = "mhlo.convert"(%17) : (tensor<5xi1>) -> tensor<5xi32>
%19 = mhlo.multiply %18, %cst_0 : tensor<5xi32>
%20 = "mhlo.reduce"(%19, %cst_1) ( {
@@ -82,7 +82,7 @@
%58 = "mhlo.reshape"(%57) : (tensor<1x1xf32>) -> tensor<1xf32>
%59 = "mhlo.broadcast_in_dim"(%58) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<1xf32>) -> tensor<1x10xf32>
%60 = mhlo.multiply %59, %6 : tensor<1x10xf32>
- %61 = "mhlo.compare"(%60, %7) {comparison_direction = "GT"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xi1>
+ %61 = "mhlo.compare"(%60, %7) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xi1>
%62 = "mhlo.gather"(%50, %42) {
dimension_numbers = #mhlo.gather<
collapsed_slice_dims = [0],
@@ -93,7 +93,7 @@
slice_sizes = dense<[1, 1, 64]> : tensor<3xi64>
} : (tensor<5x1x64xf32>, tensor<i64>) -> tensor<1x64xf32>
%63 = "mhlo.concatenate"(%62, %49) {dimension = 1 : i64} : (tensor<1x64xf32>, tensor<1x10xf32>) -> tensor<1x74xf32>
- %64 = "mhlo.dot"(%63, %46) {precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x74xf32>, tensor<74x40xf32>) -> tensor<1x40xf32>
+ %64 = "mhlo.dot"(%63, %46) {precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]} : (tensor<1x74xf32>, tensor<74x40xf32>) -> tensor<1x40xf32>
%65 = "mhlo.reshape"(%44) : (tensor<40xf32>) -> tensor<1x40xf32>
%66 = mhlo.add %64, %65 : tensor<1x40xf32>
%67 = "mhlo.slice"(%66) {limit_indices = dense<[1, 30]> : tensor<2xi64>, start_indices = dense<[0, 20]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<1x40xf32>) -> tensor<1x10xf32>
@@ -117,7 +117,7 @@
%85 = "mhlo.reshape"(%57) : (tensor<1x1xf32>) -> tensor<1xf32>
%86 = "mhlo.broadcast_in_dim"(%85) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<1xf32>) -> tensor<1x10xf32>
%87 = mhlo.multiply %86, %6 : tensor<1x10xf32>
- %88 = "mhlo.compare"(%87, %7) {comparison_direction = "GT"} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xi1>
+ %88 = "mhlo.compare"(%87, %7) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xi1>
%89 = "mhlo.slice"(%66) {limit_indices = dense<[1, 40]> : tensor<2xi64>, start_indices = dense<[0, 30]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<1x40xf32>) -> tensor<1x10xf32>
%90 = mhlo.multiply %89, %8 : tensor<1x10xf32>
%91 = "mhlo.tanh"(%90) : (tensor<1x10xf32>) -> tensor<1x10xf32>
diff --git a/iree/test/e2e/regression/dynamic_compare_and_select.mlir b/iree/test/e2e/regression/dynamic_compare_and_select.mlir
index f1ac3a5..01a483b 100644
--- a/iree/test/e2e/regression/dynamic_compare_and_select.mlir
+++ b/iree/test/e2e/regression/dynamic_compare_and_select.mlir
@@ -6,7 +6,7 @@
// CHECK: 10xi32=9 8 7 6 5 4 3 2 1 0
func @main(%arg0: tensor<?xi32>, %arg1: tensor<?xi32>, %arg2: tensor<?xi32>, %arg3: tensor<?xi32>) -> tensor<?xi32> {
- %1 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "LT"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi1>
+ %1 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi1>
%2 = "mhlo.select"(%1, %arg2, %arg3) : (tensor<?xi1>, tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
return %2 : tensor<?xi32>
}
diff --git a/iree/test/e2e/vulkan_specific/compare.mlir b/iree/test/e2e/vulkan_specific/compare.mlir
index 0f171be..12c45e5 100644
--- a/iree/test/e2e/vulkan_specific/compare.mlir
+++ b/iree/test/e2e/vulkan_specific/compare.mlir
@@ -1,7 +1,7 @@
func @compare_tensor() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -12,7 +12,7 @@
func @compare_scalar() {
%lhs = util.unfoldable_constant dense<1> : tensor<i32>
%rhs = util.unfoldable_constant dense<5> : tensor<i32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -23,7 +23,7 @@
func @compare_i8() {
%lhs = util.unfoldable_constant dense<1> : tensor<i8>
%rhs = util.unfoldable_constant dense<5> : tensor<i8>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i8>, tensor<i8>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i8>, tensor<i8>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -34,7 +34,7 @@
func @compare_i16() {
%lhs = util.unfoldable_constant dense<1> : tensor<i16>
%rhs = util.unfoldable_constant dense<5> : tensor<i16>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i16>, tensor<i16>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i16>, tensor<i16>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -45,7 +45,7 @@
func @compare_i32() {
%lhs = util.unfoldable_constant dense<1> : tensor<i32>
%rhs = util.unfoldable_constant dense<5> : tensor<i32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -56,7 +56,7 @@
func @compare_i64() {
%lhs = util.unfoldable_constant dense<1> : tensor<i64>
%rhs = util.unfoldable_constant dense<5> : tensor<i64>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i64>, tensor<i64>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i64>, tensor<i64>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -67,7 +67,7 @@
func @compare_f32() {
%lhs = util.unfoldable_constant dense<1.0> : tensor<f32>
%rhs = util.unfoldable_constant dense<5.0> : tensor<f32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<f32>, tensor<f32>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<f32>, tensor<f32>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -78,7 +78,7 @@
func @compare_f64() {
%lhs = util.unfoldable_constant dense<1.0> : tensor<f64>
%rhs = util.unfoldable_constant dense<5.0> : tensor<f64>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<f64>, tensor<f64>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<f64>, tensor<f64>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -89,7 +89,7 @@
func @compare_tensor_odd_length() {
%lhs = util.unfoldable_constant dense<[1, 2, 7]> : tensor<3xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3]> : tensor<3xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<3xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<3xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<3xi1>, tensor<3xi8>, tensor<3xi8>) -> tensor<3xi8>
@@ -100,7 +100,7 @@
func @compare_eq() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -111,7 +111,7 @@
func @compare_ne() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "NE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction NE">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -122,7 +122,7 @@
func @compare_lt() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "LT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -133,7 +133,7 @@
func @compare_le() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "LE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction LE">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -144,7 +144,7 @@
func @compare_gt() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "GT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -155,7 +155,7 @@
func @compare_ge() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "GE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction GE">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
diff --git a/iree/test/e2e/vulkan_specific/dot_general.mlir b/iree/test/e2e/vulkan_specific/dot_general.mlir
index af2b39c..060af86 100644
--- a/iree/test/e2e/vulkan_specific/dot_general.mlir
+++ b/iree/test/e2e/vulkan_specific/dot_general.mlir
@@ -11,7 +11,7 @@
rhs_batching_dimensions = [0],
rhs_contracting_dimensions = [1],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<1x2x3xf32>, tensor<1x3x4xf32>) -> tensor<1x2x4xf32>
check.expect_almost_eq_const(%res, dense<[[[0.6, 1.2, 1.8, 2.4],[1.5, 3.0, 4.5, 6.0]]]> : tensor<1x2x4xf32>) : tensor<1x2x4xf32>
return
@@ -36,7 +36,7 @@
rhs_batching_dimensions = [0],
rhs_contracting_dimensions = [1],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<2x2x3xf32>, tensor<2x3x4xf32>) -> tensor<2x2x4xf32>
check.expect_almost_eq_const(%res, dense<[
[
@@ -58,7 +58,7 @@
rhs_batching_dimensions = [0],
rhs_contracting_dimensions = [1],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<4x32x1024xf32>, tensor<4x1024x64xf32>) -> tensor<4x32x64xf32>
check.expect_almost_eq_const(%res, dense<409.596> : tensor<4x32x64xf32>) : tensor<4x32x64xf32>
return
diff --git a/iree/test/e2e/xla_ops/compare.mlir b/iree/test/e2e/xla_ops/compare.mlir
index 0f171be..12c45e5 100644
--- a/iree/test/e2e/xla_ops/compare.mlir
+++ b/iree/test/e2e/xla_ops/compare.mlir
@@ -1,7 +1,7 @@
func @compare_tensor() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -12,7 +12,7 @@
func @compare_scalar() {
%lhs = util.unfoldable_constant dense<1> : tensor<i32>
%rhs = util.unfoldable_constant dense<5> : tensor<i32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -23,7 +23,7 @@
func @compare_i8() {
%lhs = util.unfoldable_constant dense<1> : tensor<i8>
%rhs = util.unfoldable_constant dense<5> : tensor<i8>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i8>, tensor<i8>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i8>, tensor<i8>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -34,7 +34,7 @@
func @compare_i16() {
%lhs = util.unfoldable_constant dense<1> : tensor<i16>
%rhs = util.unfoldable_constant dense<5> : tensor<i16>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i16>, tensor<i16>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i16>, tensor<i16>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -45,7 +45,7 @@
func @compare_i32() {
%lhs = util.unfoldable_constant dense<1> : tensor<i32>
%rhs = util.unfoldable_constant dense<5> : tensor<i32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -56,7 +56,7 @@
func @compare_i64() {
%lhs = util.unfoldable_constant dense<1> : tensor<i64>
%rhs = util.unfoldable_constant dense<5> : tensor<i64>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<i64>, tensor<i64>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i64>, tensor<i64>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -67,7 +67,7 @@
func @compare_f32() {
%lhs = util.unfoldable_constant dense<1.0> : tensor<f32>
%rhs = util.unfoldable_constant dense<5.0> : tensor<f32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<f32>, tensor<f32>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<f32>, tensor<f32>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -78,7 +78,7 @@
func @compare_f64() {
%lhs = util.unfoldable_constant dense<1.0> : tensor<f64>
%rhs = util.unfoldable_constant dense<5.0> : tensor<f64>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<f64>, tensor<f64>) -> tensor<i1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<f64>, tensor<f64>) -> tensor<i1>
%c0 = util.unfoldable_constant dense<0> : tensor<i8>
%c1 = util.unfoldable_constant dense<1> : tensor<i8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<i1>, tensor<i8>, tensor<i8>) -> tensor<i8>
@@ -89,7 +89,7 @@
func @compare_tensor_odd_length() {
%lhs = util.unfoldable_constant dense<[1, 2, 7]> : tensor<3xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3]> : tensor<3xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<3xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<3xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<3xi1>, tensor<3xi8>, tensor<3xi8>) -> tensor<3xi8>
@@ -100,7 +100,7 @@
func @compare_eq() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "EQ"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -111,7 +111,7 @@
func @compare_ne() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "NE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction NE">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -122,7 +122,7 @@
func @compare_lt() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "LT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -133,7 +133,7 @@
func @compare_le() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "LE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction LE">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -144,7 +144,7 @@
func @compare_gt() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "GT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
@@ -155,7 +155,7 @@
func @compare_ge() {
%lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
- %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = "GE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %result = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<"comparison_direction GE">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
%c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
%output = "mhlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
diff --git a/iree/test/e2e/xla_ops/dot_general.mlir b/iree/test/e2e/xla_ops/dot_general.mlir
index d56b98a..297aabb 100644
--- a/iree/test/e2e/xla_ops/dot_general.mlir
+++ b/iree/test/e2e/xla_ops/dot_general.mlir
@@ -8,7 +8,7 @@
rhs_batching_dimensions = [],
rhs_contracting_dimensions = [0],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<1x1x2xf32>, tensor<2x3xf32>) -> tensor<1x1x3xf32>
check.expect_almost_eq_const(%res, dense<[[[0.23, 0.31, 0.39]]]> : tensor<1x1x3xf32>) : tensor<1x1x3xf32>
return
@@ -24,7 +24,7 @@
rhs_batching_dimensions = [],
rhs_contracting_dimensions = [2],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<2x3xf32>, tensor<1x1x2xf32>) -> tensor<3x1x1xf32>
check.expect_almost_eq_const(%res, dense<[[[0.23]],[[0.31]],[[0.39]]]> : tensor<3x1x1xf32>) : tensor<3x1x1xf32>
return
@@ -43,7 +43,7 @@
rhs_batching_dimensions = [0],
rhs_contracting_dimensions = [1],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<1x2x3xf32>, tensor<1x3x4xf32>) -> tensor<1x2x4xf32>
check.expect_almost_eq_const(%res, dense<[[[0.6, 1.2, 1.8, 2.4],[1.5, 3.0, 4.5, 6.0]]]> : tensor<1x2x4xf32>) : tensor<1x2x4xf32>
return
@@ -98,7 +98,7 @@
rhs_batching_dimensions = [0],
rhs_contracting_dimensions = [1],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<2x2x3xf32>, tensor<2x3x4xf32>) -> tensor<2x2x4xf32>
check.expect_almost_eq_const(%res, dense<[
[
@@ -120,7 +120,7 @@
rhs_batching_dimensions = [0],
rhs_contracting_dimensions = [1],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<4x32x1024xf32>, tensor<4x1024x64xf32>) -> tensor<4x32x64xf32>
check.expect_almost_eq_const(%res, dense<409.596> : tensor<4x32x64xf32>) : tensor<4x32x64xf32>
return
@@ -136,7 +136,7 @@
rhs_batching_dimensions = [0],
rhs_contracting_dimensions = [1],
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<4x32x1024xf32>, tensor<4x1024x64xf32>) -> tensor<4x32x64xf32>
check.expect_almost_eq_const(%res, dense<409.596> : tensor<4x32x64xf32>) : tensor<4x32x64xf32>
return
@@ -157,7 +157,7 @@
lhs_contracting_dimensions = [3],
rhs_contracting_dimensions = [2]
>,
- precision_config = ["DEFAULT", "DEFAULT"]
+ precision_config = [#mhlo<"precision DEFAULT">, #mhlo<"precision DEFAULT">]
} : (tensor<2x3x2x1xf32>, tensor<2x2x1xf32>) -> tensor<2x2x3x2xf32>
check.expect_almost_eq_const(%res, dense<[
[
diff --git a/iree/test/e2e/xla_ops/fft.mlir b/iree/test/e2e/xla_ops/fft.mlir
index 92d690a..01fc2b6 100644
--- a/iree/test/e2e/xla_ops/fft.mlir
+++ b/iree/test/e2e/xla_ops/fft.mlir
@@ -7,7 +7,7 @@
// 3.5, -4.5, 0.0, 9.0, 1.0, 4.5, -0.3, 10.0, -1.0, 5.5, 0.3, 299.0, 3.5,
// -0.777, 2.0, 1.7, 3.5, -4.5, 0.0]> : tensor<32xf32>
// %0 = "mhlo.fft"(%input) {
-// fft_length = dense<32> : tensor<i64>, fft_type = "RFFT"
+// fft_length = dense<32> : tensor<i64>, fft_type = #mhlo<"fft_type RFFT">
// } : (tensor<32xf32>) -> tensor<17xcomplex<f32>>
// %1 = "mhlo.real"(%0) : (tensor<17xcomplex<f32>>) -> tensor<17xf32>
// %2 = "mhlo.imag"(%0) : (tensor<17xcomplex<f32>>) -> tensor<17xf32>
@@ -22,7 +22,7 @@
3.5, -4.5, 0.0, 9.0, 1.0, 4.5, -0.3, 10.0, -1.0, 5.5, 0.3, 299.0, 3.5,
-0.777, 2.0, 1.7, 3.5, -4.5, 0.0]]> : tensor<1x32xf32>
%0 = "mhlo.fft"(%input) {
- fft_length = dense<32> : tensor<1xi64>, fft_type = "RFFT"
+ fft_length = dense<32> : tensor<1xi64>, fft_type = #mhlo<"fft_type RFFT">
} : (tensor<1x32xf32>) -> tensor<1x17xcomplex<f32>>
%1 = "mhlo.real"(%0) : (tensor<1x17xcomplex<f32>>) -> tensor<1x17xf32>
%2 = "mhlo.imag"(%0) : (tensor<1x17xcomplex<f32>>) -> tensor<1x17xf32>
diff --git a/iree/test/e2e/xla_ops/reduce.mlir b/iree/test/e2e/xla_ops/reduce.mlir
index f0b9e53..891ad93 100644
--- a/iree/test/e2e/xla_ops/reduce.mlir
+++ b/iree/test/e2e/xla_ops/reduce.mlir
@@ -296,9 +296,9 @@
%arg1 = util.unfoldable_constant dense<[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]]> : tensor<9x2xi32>
%res0, %res1 = "mhlo.reduce"(%arg0, %arg1, %cst0, %cst1) ( {
^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>, %arg4: tensor<i32>, %arg5: tensor<i32>): // no predecessors
- %0 = "mhlo.compare"(%arg2, %arg4) {comparison_direction = "GE"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %0 = "mhlo.compare"(%arg2, %arg4) {comparison_direction = #mhlo<"comparison_direction GE">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
%1 = "mhlo.select"(%0, %arg2, %arg4) : (tensor<i1>, tensor<i32>, tensor<i32>) -> tensor<i32>
- %2 = "mhlo.compare"(%arg2, %arg4) {comparison_direction = "EQ"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %2 = "mhlo.compare"(%arg2, %arg4) {comparison_direction = #mhlo<"comparison_direction EQ">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
%3 = mhlo.minimum %arg3, %arg5 : tensor<i32>
%4 = "mhlo.select"(%0, %arg3, %arg5) : (tensor<i1>, tensor<i32>, tensor<i32>) -> tensor<i32>
%5 = "mhlo.select"(%2, %3, %4) : (tensor<i1>, tensor<i32>, tensor<i32>) -> tensor<i32>
diff --git a/iree/test/e2e/xla_ops/select.mlir b/iree/test/e2e/xla_ops/select.mlir
index d2cff05..2819987 100644
--- a/iree/test/e2e/xla_ops/select.mlir
+++ b/iree/test/e2e/xla_ops/select.mlir
@@ -2,7 +2,7 @@
// TODO(b/132205704) support i1 in constants and function signatures.
%input = util.unfoldable_constant dense<[1, 0, 1, 0]> : tensor<4xi32>
%zeros = util.unfoldable_constant dense<0> : tensor<4xi32>
- %cond = "mhlo.compare"(%input, %zeros) {comparison_direction = "GT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
+ %cond = "mhlo.compare"(%input, %zeros) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
%lhs = util.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32>
%rhs = util.unfoldable_constant dense<[5, 6, 7, 8]> : tensor<4xi32>
%result = "mhlo.select"(%cond, %lhs, %rhs) : (tensor<4xi1>, tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32>
diff --git a/iree/test/e2e/xla_ops/sort.mlir b/iree/test/e2e/xla_ops/sort.mlir
index 58813ea..b265682 100644
--- a/iree/test/e2e/xla_ops/sort.mlir
+++ b/iree/test/e2e/xla_ops/sort.mlir
@@ -3,7 +3,7 @@
%sort = "mhlo.sort"(%input) ( {
^bb0(%arg1: tensor<i32>, %arg2: tensor<i32>): // no predecessors
- %compare = "mhlo.compare"(%arg1, %arg2) {comparison_direction = "LT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %compare = "mhlo.compare"(%arg1, %arg2) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
"mhlo.return"(%compare) : (tensor<i1>) -> ()
}) {dimension = 0 : i64, is_stable = false} : (tensor<4xi32>) -> tensor<4xi32>
@@ -17,7 +17,7 @@
%sort = "mhlo.sort"(%input) ( {
^bb0(%arg1: tensor<i32>, %arg2: tensor<i32>): // no predecessors
- %compare = "mhlo.compare"(%arg1, %arg2) {comparison_direction = "LT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %compare = "mhlo.compare"(%arg1, %arg2) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
"mhlo.return"(%compare) : (tensor<i1>) -> ()
}) {dimension = 1 : i64, is_stable = false} : (tensor<2x4xi32>) -> tensor<2x4xi32>
@@ -31,7 +31,7 @@
%sort = "mhlo.sort"(%input) ( {
^bb0(%arg1: tensor<i32>, %arg2: tensor<i32>): // no predecessors
- %compare = "mhlo.compare"(%arg1, %arg2) {comparison_direction = "LT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %compare = "mhlo.compare"(%arg1, %arg2) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
"mhlo.return"(%compare) : (tensor<i1>) -> ()
}) {dimension = 2 : i64, is_stable = false} : (tensor<1x2x4xi32>) -> tensor<1x2x4xi32>
@@ -44,7 +44,7 @@
%sort = "mhlo.sort"(%input) ( {
^bb0(%arg1: tensor<i32>, %arg2: tensor<i32>): // no predecessors
- %compare = "mhlo.compare"(%arg1, %arg2) {comparison_direction = "GT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %compare = "mhlo.compare"(%arg1, %arg2) {comparison_direction = #mhlo<"comparison_direction GT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
"mhlo.return"(%compare) : (tensor<i1>) -> ()
}) {dimension = 0 : i64, is_stable = false} : (tensor<4xi32>) -> tensor<4xi32>
diff --git a/iree/test/e2e/xla_ops/while.mlir b/iree/test/e2e/xla_ops/while.mlir
index 00d02e0..480087f 100644
--- a/iree/test/e2e/xla_ops/while.mlir
+++ b/iree/test/e2e/xla_ops/while.mlir
@@ -5,7 +5,7 @@
%cst_1 = arith.constant dense<4> : tensor<i32>
cf.br ^bb1(%start : tensor<i32>)
^bb1(%2: tensor<i32>):
- %3 = "mhlo.compare"(%2, %bound) {comparison_direction = "LT"} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %3 = "mhlo.compare"(%2, %bound) {comparison_direction = #mhlo<"comparison_direction LT">} : (tensor<i32>, tensor<i32>) -> tensor<i1>
%4 = tensor.extract %3[] : tensor<i1>
cf.cond_br %4, ^bb2(%2 : tensor<i32>), ^bb3(%2 : tensor<i32>)
^bb2(%5: tensor<i32>):
diff --git a/llvm-external-projects/iree-dialects/BUILD b/llvm-external-projects/iree-dialects/BUILD
index 3f47bd6..3280a69 100644
--- a/llvm-external-projects/iree-dialects/BUILD
+++ b/llvm-external-projects/iree-dialects/BUILD
@@ -631,6 +631,7 @@
"@llvm-project//mlir:ArithmeticDialect",
"@llvm-project//mlir:BufferizationDialect",
"@llvm-project//mlir:BufferizationTransforms",
+ "@llvm-project//mlir:FuncDialect",
"@llvm-project//mlir:LinalgOps",
"@llvm-project//mlir:LLVMDialect",
"@llvm-project//mlir:PDLDialect",
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/PassDetail.h b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/PassDetail.h
index 3f3fe9b..417a582 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/PassDetail.h
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/PassDetail.h
@@ -7,6 +7,7 @@
#ifndef IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASS_DETAIL_H_
#define IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASS_DETAIL_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.h b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.h
index febec87..98e37ac 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.h
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.h
@@ -7,6 +7,7 @@
#ifndef IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASSES_H_
#define IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASSES_H_
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
@@ -14,9 +15,9 @@
namespace IREE {
namespace LinalgExt {
-std::unique_ptr<OperationPass<FuncOp>> createTiledOpInterfaceTilingPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createTiledOpInterfaceTilingPass();
-std::unique_ptr<OperationPass<FuncOp>> createLinalgExtToLoopsPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createLinalgExtToLoopsPass();
std::unique_ptr<OperationPass<>> createPadContractionToBlockSizePass();
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.td b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.td
index 54a0484..95ae571 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.td
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Passes/Passes.td
@@ -10,13 +10,13 @@
include "mlir/Pass/PassBase.td"
def LinalgExtToLoops :
- Pass<"iree-linalg-ext-to-loops", "FuncOp"> {
+ Pass<"iree-linalg-ext-to-loops", "func::FuncOp"> {
let summary = "Convert LinalgExt ops to loops and Linalg ops.";
let constructor = "mlir::iree_compiler::IREE::LinalgExt::createLinalgExtToLoopsPass()";
}
def TiledOpInterfaceTiling :
- Pass<"iree-linalg-ext-tile", "FuncOp"> {
+ Pass<"iree-linalg-ext-tile", "func::FuncOp"> {
let summary = "Test pass for tiling using TiledOpInterface";
let constructor = "mlir::iree_compiler::IREE::LinalgExt::createTiledOpInterfaceTilingPass()";
}
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/LinalgTransformOps.h b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/LinalgTransformOps.h
index 6eda492..7683ae0 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/LinalgTransformOps.h
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/LinalgTransformOps.h
@@ -11,6 +11,7 @@
#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.h"
#include "iree-dialects/Dialect/LinalgTransform/TrackingListener.h"
#include "iree-dialects/Dialect/LinalgTransform/TransformOpInterface.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/PDL/IR/PDLTypes.h"
#include "mlir/IR/BuiltinAttributes.h"
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.h b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.h
index 905f3a3..c038c0b 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.h
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.h
@@ -12,10 +12,12 @@
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
+#include "mlir/IR/FunctionInterfaces.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/SymbolTable.h"
+#include "mlir/Interfaces/CallInterfaces.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.td b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.td
index bc5b181..9b4aca3 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.td
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/PyDM/IR/PyDMOps.td
@@ -124,7 +124,7 @@
}];
let arguments = (ins SymbolNameAttr:$sym_name,
- TypeAttr:$type,
+ TypeAttr:$function_type,
OptionalAttr<StrArrayAttr>:$arg_names,
OptionalAttr<StrArrayAttr>:$free_vars,
OptionalAttr<StrArrayAttr>:$cell_vars,
@@ -143,21 +143,21 @@
}
/// Returns the type of this function.
- FunctionType getType() {
+ FunctionType getFunctionType() {
return getOperation()->getAttrOfType<TypeAttr>(getTypeAttrName())
.getValue()
.cast<FunctionType>();
}
/// Returns the argument types of this function.
- ArrayRef<Type> getArgumentTypes() { return getType().getInputs(); }
+ ArrayRef<Type> getArgumentTypes() { return getFunctionType().getInputs(); }
/// Returns the result types of this function.
- ArrayRef<Type> getResultTypes() { return getType().getResults(); }
+ ArrayRef<Type> getResultTypes() { return getFunctionType().getResults(); }
/// Returns the python return type of the function (second return type).
Type getPyReturnType() {
- return getType().getResult(1);
+ return getFunctionType().getResult(1);
}
/// Hook for Trait::FunctionLike, called after verifying that the 'type'
@@ -167,7 +167,7 @@
Region *getCallableRegion() { return &body(); }
ArrayRef<Type> getCallableResults() {
- return getType().getResults();
+ return getFunctionType().getResults();
}
/// Defines SymbolOpInterface::isDeclaration().
@@ -177,8 +177,8 @@
}];
let builders = [
- OpBuilder<(ins "StringAttr":$name, "FunctionType":$type), [{
- build($_builder, $_state, name, TypeAttr::get(type),
+ OpBuilder<(ins "StringAttr":$name, "FunctionType":$function_type), [{
+ build($_builder, $_state, name, TypeAttr::get(function_type),
nullptr, nullptr, nullptr, nullptr);
}]>
];
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/TiledOpInterface.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/TiledOpInterface.cpp
index e509489..604e498 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/TiledOpInterface.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/TiledOpInterface.cpp
@@ -305,9 +305,12 @@
DialectRegistry ®istry) {
LLVM_DEBUG(
{ llvm::dbgs() << "Adding external models of tiled op interface\n"; });
- registry
- .addOpInterface<tensor::ExtractSliceOp, ExtractSliceTiledOpInterface>();
- registry.addOpInterface<tensor::InsertSliceOp, InsertSliceTiledOpInterface>();
+
+ registry.addExtension(+[](MLIRContext *ctx, tensor::TensorDialect *dialect) {
+ tensor::ExtractSliceOp::attachInterface<ExtractSliceTiledOpInterface>(*ctx);
+ tensor::InsertSliceOp::attachInterface<InsertSliceTiledOpInterface>(*ctx);
+ });
+
// TODO(ravishankarm): Needs custom PadTiledOpInterface or equiv.
// registry.addOpInterface<tensor::PadOp,
// ForwardToTilingInterface<tensor::PadOp>>();
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/LinalgExtBufferization.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/LinalgExtBufferization.cpp
index 2b8f8ec..9b3c2fb 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/LinalgExtBufferization.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/LinalgExtBufferization.cpp
@@ -6,11 +6,11 @@
#include "iree-dialects/Dialect/LinalgExt/LinalgExtBufferization.h"
-#include <mlir/IR/BuiltinOps.h>
-
+#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtDialect.h"
#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
+#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/PatternMatch.h"
using namespace mlir;
@@ -22,7 +22,6 @@
using bufferization::BufferRelation;
using bufferization::getMemRefType;
using bufferization::replaceOpWithBufferizedValues;
-using bufferization::replaceOpWithNewBufferizedOp;
using tensor::ExtractSliceOp;
/// Return the destinations that an InParallelOp is inserting into. One per
@@ -341,9 +340,12 @@
void mlir::iree_compiler::IREE::LinalgExt::
registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry) {
- registry.addOpInterface<InParallelOp, InParallelOpInterface>();
- registry
- .addOpInterface<PerformConcurrentlyOp, PerformConcurrentlyOpInterface>();
- registry
- .addOpInterface<ParallelInsertSliceOp, ParallelInsertSliceOpInterface>();
+ registry.addExtension(
+ +[](MLIRContext *ctx, LinalgExt::IREELinalgExtDialect *dialect) {
+ InParallelOp::attachInterface<InParallelOpInterface>(*ctx);
+ PerformConcurrentlyOp::attachInterface<PerformConcurrentlyOpInterface>(
+ *ctx);
+ ParallelInsertSliceOp::attachInterface<ParallelInsertSliceOpInterface>(
+ *ctx);
+ });
}
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/TilingExternalModels.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/TilingExternalModels.cpp
index 174d4ff..0758d7f 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/TilingExternalModels.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/TilingExternalModels.cpp
@@ -157,23 +157,25 @@
} // namespace
template <typename OpType>
-void registerOne(DialectRegistry ®istry) {
- registry.addOpInterface<OpType, LinalgOpTilingInterface<OpType>>();
+void registerOne(MLIRContext *ctx) {
+ OpType::template attachInterface<LinalgOpTilingInterface<OpType>>(*ctx);
}
/// Variadic helper function.
template <typename... OpTypes>
-void registerAll(DialectRegistry ®istry) {
+void registerAll(MLIRContext *ctx) {
// FIXME: In c++17 this can be simplified by using 'fold expressions'.
- (void)std::initializer_list<int>{0, (registerOne<OpTypes>(registry), 0)...};
+ (void)std::initializer_list<int>{0, (registerOne<OpTypes>(ctx), 0)...};
}
#define GET_OP_LIST
void mlir::iree_compiler::IREE::LinalgExt::
registerTilingInterfaceExternalModels(DialectRegistry ®istry) {
- registerOne<linalg::GenericOp>(registry);
- registerAll<
+ registry.addExtension(+[](MLIRContext *ctx, linalg::LinalgDialect *dialect) {
+ registerOne<linalg::GenericOp>(ctx);
+ registerAll<
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
- >(registry);
+ >(ctx);
+ });
}
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
index e28dcd3..03d39ac 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
@@ -31,6 +31,7 @@
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.h"
#include "mlir/Dialect/Linalg/Passes.h"
@@ -581,7 +582,7 @@
// Perform buffer-level hoistings.
state.getTopLevel()->walk(
- [&](FuncOp funcOp) { hoistRedundantVectorTransfers(funcOp); });
+ [&](func::FuncOp funcOp) { hoistRedundantVectorTransfers(funcOp); });
return success();
}
@@ -597,8 +598,8 @@
// the end. Keep module-level for now.
PassManager pm(getContext());
- pm.addNestedPass<FuncOp>(createConvertVectorToSCFPass());
- pm.addNestedPass<FuncOp>(createConvertLinalgToLoopsPass());
+ pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
+ pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
if (enable_async()) {
pm.addPass(createAsyncToAsyncRuntimePass());
pm.addPass(createAsyncRuntimeRefCountingPass());
@@ -618,7 +619,7 @@
.enableAMX(enable_amx())
.enableX86Vector(enable_x86vector())));
// clang-format on
- pm.addNestedPass<FuncOp>(createConvertMathToLLVMPass());
+ pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
pm.addPass(createMemRefToLLVMPass());
if (enable_async())
pm.addPass(createConvertAsyncToLLVMPass());
@@ -631,7 +632,9 @@
// FIXME: this is a terrible hack!
state.getTopLevel()->walk([](LLVM::LLVMFuncOp funcOp) {
for (int64_t i = 0; i < funcOp.getNumArguments(); ++i) {
- if (!funcOp.getType().getParamType(i).isa<LLVM::LLVMPointerType>())
+ if (!funcOp.getFunctionType()
+ .getParamType(i)
+ .isa<LLVM::LLVMPointerType>())
continue;
funcOp.setArgAttr(i, "llvm.noalias", UnitAttr::get(funcOp.getContext()));
}
@@ -760,15 +763,15 @@
return executeRegionOp;
}
-static FailureOr<FuncOp> outlineLoop(scf::ForOp loop, StringRef funcName,
- transform::TransformState &state) {
+static FailureOr<func::FuncOp> outlineLoop(scf::ForOp loop, StringRef funcName,
+ transform::TransformState &state) {
PatternRewriterListener rewriter(loop->getContext());
auto &listener = state.getExtension<TrackingListener>();
rewriter.addListener(&listener);
Location loc = loop.getLoc();
scf::ExecuteRegionOp exec = outlineInExecuteRegion(rewriter, loop);
assert(exec && "failed to produce execute_region");
- FailureOr<FuncOp> outlined =
+ FailureOr<func::FuncOp> outlined =
outlineSingleBlockRegion(rewriter, loc, exec.getRegion(), funcName);
if (failed(listener.checkErrorState()))
return failure();
@@ -781,7 +784,7 @@
SmallVector<Operation *> resultVector;
auto res =
applyTransformToEach(state.getPayloadOps(target()), resultVector,
- [&](scf::ForOp loop) -> FailureOr<FuncOp> {
+ [&](scf::ForOp loop) -> FailureOr<func::FuncOp> {
return outlineLoop(loop, func_name(), state);
});
if (failed(res))
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/IR/PyDMOps.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/IR/PyDMOps.cpp
index b281874..f4ad03a 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/IR/PyDMOps.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/IR/PyDMOps.cpp
@@ -567,9 +567,7 @@
}
void PyFuncOp::print(OpAsmPrinter &p) {
- FunctionType fnType = getType();
- function_interface_impl::printFunctionOp(
- p, *this, fnType.getInputs(), /*isVariadic=*/false, fnType.getResults());
+ function_interface_impl::printFunctionOp(p, *this, /*isVariadic=*/false);
}
//===----------------------------------------------------------------------===//
@@ -764,7 +762,7 @@
<< "' does not reference a valid function";
// Verify that the operand and result types match the callee.
- auto fnType = fn.getType();
+ auto fnType = fn.getFunctionType();
if (fnType.getNumInputs() != getNumOperands())
return emitOpError("incorrect number of operands for callee");
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/Transforms/Optimize/FixateWeakNumeric.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/Transforms/Optimize/FixateWeakNumeric.cpp
index b6e5b51..5d4be33 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/Transforms/Optimize/FixateWeakNumeric.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/Transforms/Optimize/FixateWeakNumeric.cpp
@@ -41,7 +41,7 @@
// Special cases for operations.
if (auto funcOp = llvm::dyn_cast<PYDM::FuncOp>(op)) {
- FunctionType existingFt = funcOp.getType();
+ FunctionType existingFt = funcOp.getFunctionType();
FunctionType newFt = convertFunctionType(existingFt);
if (newFt != existingFt) {
funcOp.setType(newFt);
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/Transforms/ToIREE/LoweringPatterns.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/Transforms/ToIREE/LoweringPatterns.cpp
index 9d92711..2e14e37 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/Transforms/ToIREE/LoweringPatterns.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/PyDM/Transforms/ToIREE/LoweringPatterns.cpp
@@ -637,7 +637,7 @@
LogicalResult
matchAndRewrite(PYDM::FuncOp srcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- FunctionType srcFuncType = srcOp.getType();
+ FunctionType srcFuncType = srcOp.getFunctionType();
TypeConverter::SignatureConversion signatureConversion(
srcOp.getNumArguments());
@@ -839,7 +839,7 @@
auto parentFunc = srcOp->getParentOfType<mlir::FuncOp>();
if (!parentFunc)
return rewriter.notifyMatchFailure(srcOp, "not contained by a func");
- Type convertedReturnType = parentFunc.getType().getResult(1);
+ Type convertedReturnType = parentFunc.getFunctionType().getResult(1);
// Split the entry block.
Block *entryBlock = rewriter.getInsertionBlock();
diff --git a/llvm-external-projects/iree-dialects/python/iree/compiler/dialects/_iree_pydm_ops_ext.py b/llvm-external-projects/iree-dialects/python/iree/compiler/dialects/_iree_pydm_ops_ext.py
index 5155972..7fc8653 100644
--- a/llvm-external-projects/iree-dialects/python/iree/compiler/dialects/_iree_pydm_ops_ext.py
+++ b/llvm-external-projects/iree-dialects/python/iree/compiler/dialects/_iree_pydm_ops_ext.py
@@ -18,7 +18,7 @@
@property
def type(self):
- return ir.FunctionType(ir.TypeAttr(self.attributes["type"]).value)
+ return ir.FunctionType(ir.TypeAttr(self.attributes["function_type"]).value)
@property
def py_return_type(self) -> ir.Type:
diff --git a/llvm-external-projects/iree-dialects/python/iree/compiler/dialects/iree_pydm/importer/importer.py b/llvm-external-projects/iree-dialects/python/iree/compiler/dialects/iree_pydm/importer/importer.py
index 68b1227..fae6591 100644
--- a/llvm-external-projects/iree-dialects/python/iree/compiler/dialects/iree_pydm/importer/importer.py
+++ b/llvm-external-projects/iree-dialects/python/iree/compiler/dialects/iree_pydm/importer/importer.py
@@ -85,7 +85,7 @@
context=ic.context)
f_op = d.FuncOp(
ir.StringAttr.get(symbol),
- type=ir.TypeAttr.get(ir_f_type),
+ function_type=ir.TypeAttr.get(ir_f_type),
arg_names=f_arg_names,
free_vars=f_var_names,
cell_vars=ir.ArrayAttr.get([]),
diff --git a/llvm-external-projects/iree-dialects/test/Transforms/test-listener-cse.mlir b/llvm-external-projects/iree-dialects/test/Transforms/test-listener-cse.mlir
index 13d2994..ebfc348 100644
--- a/llvm-external-projects/iree-dialects/test/Transforms/test-listener-cse.mlir
+++ b/llvm-external-projects/iree-dialects/test/Transforms/test-listener-cse.mlir
@@ -233,7 +233,7 @@
%0 = arith.constant 1 : i32
// CHECK-NEXT: @nested_func
- builtin.func @nested_func() {
+ func.func @nested_func() {
// CHECK-NEXT: arith.constant 1
%foo = arith.constant 1 : i32
"foo.yield"(%foo) : (i32) -> ()
diff --git a/third_party/llvm-project b/third_party/llvm-project
index e9c9ee9..4bbcdb1 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit e9c9ee9fe694067ee96643d05d6ac378349386bb
+Subproject commit 4bbcdb1bf868b9288329fa501d498761abcaa92c
diff --git a/third_party/mlir-hlo b/third_party/mlir-hlo
index 57288f1..467cd37 160000
--- a/third_party/mlir-hlo
+++ b/third_party/mlir-hlo
@@ -1 +1 @@
-Subproject commit 57288f12595a2ee0488806672a42da59b1e56e13
+Subproject commit 467cd37703dc0c4195ce6351617ef320bb60e927