Integrate llvm-project at 72142fbac496 and bump dependencies. (#10322)
* Reset third_party/llvm-project:
72142fbac496a66769e16c80b076216d7c449ab2 (2022-09-07 16:43:22 +0200):
[clangd] Fix hover crashing on integral or enumeral casts
* mlir-hlo: 3fe280545e6c24020fd2786a85896a48d0df9d97
* tensorflow: dc72187bcc9336557a6a0420b9bd2b31dfed612b
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVTile.cpp b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVTile.cpp
index d877182..0595d82 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVTile.cpp
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVTile.cpp
@@ -126,7 +126,7 @@
// affine_map<(d0, s0) -> (d0 - s0 + 4)>(%v, %v).
// Due to the restrictions over dimensions and symbols, the above won't
// simplify. Try to change dimensions for symbols for such cases.
- if (!cstExpr && llvm::is_splat(valueSizes)) {
+ if (!cstExpr && llvm::all_equal(valueSizes)) {
int numDims = map.getNumDims();
int numSyms = map.getNumSymbols();
DenseMap<AffineExpr, AffineExpr> dimToSymMap;
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorToCooperativeOps.cpp b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorToCooperativeOps.cpp
index 0ebfd29..6b51c7a 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorToCooperativeOps.cpp
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorToCooperativeOps.cpp
@@ -90,7 +90,7 @@
Value bufferPtr = spirv::getElementPtr(
*getTypeConverter<SPIRVTypeConverter>(), memrefType,
adaptor.getSource(), adaptor.getIndices(), loc, rewriter);
- rewriter.replaceOpWithNewOp<spirv::CooperativeMatrixLoadNVOp>(
+ rewriter.replaceOpWithNewOp<spirv::NVCooperativeMatrixLoadOp>(
op, matType, bufferPtr, strideValue, coloumnMajor,
spirv::MemoryAccessAttr());
return success();
@@ -102,7 +102,7 @@
Value bufferPtr = spirv::getElementPtr(
*getTypeConverter<SPIRVTypeConverter>(), memrefType,
adaptor.getSource(), adaptor.getIndices(), loc, rewriter);
- rewriter.create<spirv::CooperativeMatrixStoreNVOp>(
+ rewriter.create<spirv::NVCooperativeMatrixStoreOp>(
loc, bufferPtr, adaptor.getVector(), strideValue, coloumnMajor,
spirv::MemoryAccessAttr());
rewriter.eraseOp(op);
@@ -136,7 +136,7 @@
// by this point. Transpose can be handled by load/store operations.
if (!isRowMajorMatmul(contractOp.getIndexingMapsAttr())) return failure();
- rewriter.replaceOpWithNewOp<spirv::CooperativeMatrixMulAddNVOp>(
+ rewriter.replaceOpWithNewOp<spirv::NVCooperativeMatrixMulAddOp>(
contractOp, operands.getAcc().getType(), operands.getLhs(),
operands.getRhs(), operands.getAcc());
return success();
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_cooperative_ops.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_cooperative_ops.mlir
index 77082fc..bf8ddf2 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_cooperative_ops.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/pipeline_matmul_cooperative_ops.mlir
@@ -78,11 +78,11 @@
// CHECK-DAG: %[[COL_MAJOR:.+]] = spv.Constant false
// CHECK-DAG: %[[C128:.+]] = spv.Constant 128 : i32
// CHECK-DAG: %[[C1024:.+]] = spv.Constant 1024 : i32
-// CHECK-COUNT-8: spv.CooperativeMatrixLoadNV %{{.+}}, %[[C128]], %[[COL_MAJOR]]
-// CHECK-COUNT-8: spv.CooperativeMatrixLoadNV %{{.+}}, %[[C1024]], %[[COL_MAJOR]]
-// CHECK-COUNT-8: spv.CooperativeMatrixMulAddNV
-// CHECK: %[[ELEMENTWISE1:.+]] = spv.CooperativeMatrixLoadNV %{{.+}}, %[[C1024]], %[[COL_MAJOR]]
-// CHECK: %[[ELEMENTWISE2:.+]] = spv.CooperativeMatrixLoadNV %{{.+}}, %[[C1024]], %[[COL_MAJOR]]
+// CHECK-COUNT-8: spv.NV.CooperativeMatrixLoad %{{.+}}, %[[C128]], %[[COL_MAJOR]]
+// CHECK-COUNT-8: spv.NV.CooperativeMatrixLoad %{{.+}}, %[[C1024]], %[[COL_MAJOR]]
+// CHECK-COUNT-8: spv.NV.CooperativeMatrixMulAdd
+// CHECK: %[[ELEMENTWISE1:.+]] = spv.NV.CooperativeMatrixLoad %{{.+}}, %[[C1024]], %[[COL_MAJOR]]
+// CHECK: %[[ELEMENTWISE2:.+]] = spv.NV.CooperativeMatrixLoad %{{.+}}, %[[C1024]], %[[COL_MAJOR]]
// CHECK: %[[DIV:.+]] = spv.FDiv %{{.+}}, %[[ELEMENTWISE1]] : !spv.coopmatrix<16x16xf16, Subgroup>
// CHECK: %[[SUB:.+]] = spv.FSub %[[DIV]], %[[ELEMENTWISE2]] : !spv.coopmatrix<16x16xf16, Subgroup>
-// CHECK: spv.CooperativeMatrixStoreNV %{{.+}}, %[[SUB]], %[[C1024]], %[[COL_MAJOR]]
+// CHECK: spv.NV.CooperativeMatrixStore %{{.+}}, %[[SUB]], %[[C1024]], %[[COL_MAJOR]]
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir b/compiler/src/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir
index 914333f..e7ee378 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/test/vector_to_cooperative_matrix.mlir
@@ -24,18 +24,18 @@
// CHECK: %[[C32:.+]] = spv.Constant 32 : i32
// CHECK: %[[COL_MAJOR:.+]] = spv.Constant false
// CHECK: %[[ADDR0:.+]] = spv.AccessChain %[[ARG0_CAST]]
- // CHECK: %[[A:.+]] = spv.CooperativeMatrixLoadNV %[[ADDR0]], %[[C32]], %[[COL_MAJOR]]
+ // CHECK: %[[A:.+]] = spv.NV.CooperativeMatrixLoad %[[ADDR0]], %[[C32]], %[[COL_MAJOR]]
%0 = vector.transfer_read %arg0[%c0, %c0], %cst_i8 : memref<8x32xi8>, vector<8x32xi8>
// CHECK: %[[C8:.+]] = spv.Constant 8 : i32
// CHECK: %[[ADDR1:.+]] = spv.AccessChain %[[ARG1_CAST]]
- // CHECK: %[[B:.+]] = spv.CooperativeMatrixLoadNV %[[ADDR1]], %[[C8]], %[[COL_MAJOR]]
+ // CHECK: %[[B:.+]] = spv.NV.CooperativeMatrixLoad %[[ADDR1]], %[[C8]], %[[COL_MAJOR]]
%1 = vector.transfer_read %arg1[%c0, %c0], %cst_i8 : memref<32x8xi8>, vector<32x8xi8>
// CHECK: %[[ADDR2:.+]] = spv.AccessChain %[[ARG2_CAST]]
- // CHECK: %[[C:.+]] = spv.CooperativeMatrixLoadNV %[[ADDR2]], %[[C8]], %[[COL_MAJOR]]
+ // CHECK: %[[C:.+]] = spv.NV.CooperativeMatrixLoad %[[ADDR2]], %[[C8]], %[[COL_MAJOR]]
%2 = vector.transfer_read %arg2[%c0, %c0], %cst : memref<8x8xi32>, vector<8x8xi32>
- // CHECK: %[[R:.+]] = spv.CooperativeMatrixMulAddNV %[[A]], %[[B]], %[[C]]
+ // CHECK: %[[R:.+]] = spv.NV.CooperativeMatrixMulAdd %[[A]], %[[B]], %[[C]]
%3 = vector.contract {indexing_maps = [#map1, #map2, #map3], iterator_types = ["parallel", "parallel", "reduction"]} %0, %1, %2 : vector<8x32xi8>, vector<32x8xi8> into vector<8x8xi32>
- // CHECK: spv.CooperativeMatrixStoreNV %[[ADDR2]], %[[R]], %[[C8]], %[[COL_MAJOR]]
+ // CHECK: spv.NV.CooperativeMatrixStore %[[ADDR2]], %[[R]], %[[C8]], %[[COL_MAJOR]]
vector.transfer_write %3, %arg2[%c0, %c0] : vector<8x8xi32>, memref<8x8xi32>
return
}
@@ -64,25 +64,25 @@
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c0_i8 = arith.constant 0 : i8
- // CHECK: %[[C:.+]] = spv.CooperativeMatrixLoadNV
+ // CHECK: %[[C:.+]] = spv.NV.CooperativeMatrixLoad
%4 = vector.transfer_read %arg2[%c0, %c0], %c0_i32 {in_bounds = [true, true]} : memref<4096x4096xi32>, vector<16x16xi32>
// CHECK: %[[INIT:.+]] = builtin.unrealized_conversion_cast %[[C]] : !spv.coopmatrix<16x16xi32, Subgroup> to vector<16x16xi32>
// CHECK: %[[LOOP:.+]] = scf.for
// CHECK-SAME: iter_args(%[[ARG:.+]] = %[[INIT]])
// CHECK: %[[C1:.+]] = builtin.unrealized_conversion_cast %[[ARG]] : vector<16x16xi32> to !spv.coopmatrix<16x16xi32, Subgroup>
%5 = scf.for %arg3 = %c0 to %c4096 step %c32 iter_args(%arg4 = %4) -> (vector<16x16xi32>) {
- // CHECK: %[[A:.+]] = spv.CooperativeMatrixLoadNV
+ // CHECK: %[[A:.+]] = spv.NV.CooperativeMatrixLoad
%6 = vector.transfer_read %arg0[%c0, %arg3], %c0_i8 {in_bounds = [true, true]} : memref<4096x4096xi8>, vector<16x32xi8>
- // CHECK: %[[B:.+]] = spv.CooperativeMatrixLoadNV
+ // CHECK: %[[B:.+]] = spv.NV.CooperativeMatrixLoad
%7 = vector.transfer_read %arg1[%arg3, %c0], %c0_i8 {in_bounds = [true, true]} : memref<4096x4096xi8>, vector<32x16xi8>
- // CHECK: %[[R:.+]] = spv.CooperativeMatrixMulAddNV %[[A]], %[[B]], %[[C1]]
+ // CHECK: %[[R:.+]] = spv.NV.CooperativeMatrixMulAdd %[[A]], %[[B]], %[[C1]]
%8 = vector.contract {indexing_maps = [#map1, #map2, #map3], iterator_types = ["parallel", "parallel", "reduction"]} %6, %7, %arg4 : vector<16x32xi8>, vector<32x16xi8> into vector<16x16xi32>
// CHECK: %[[YIELD:.+]] = builtin.unrealized_conversion_cast %[[R]] : !spv.coopmatrix<16x16xi32, Subgroup> to vector<16x16xi32>
// CHECK: scf.yield %[[YIELD]]
scf.yield %8 : vector<16x16xi32>
}
// CHECK: %[[ACCv:.+]] = builtin.unrealized_conversion_cast %[[LOOP]] : vector<16x16xi32> to !spv.coopmatrix<16x16xi32, Subgroup>
- // CHECK: spv.CooperativeMatrixStoreNV %{{.*}}, %[[ACCv]], %{{.*}}, %{{.*}}
+ // CHECK: spv.NV.CooperativeMatrixStore %{{.*}}, %[[ACCv]], %{{.*}}, %{{.*}}
vector.transfer_write %5, %arg2[%c0, %c0] : vector<16x16xi32>, memref<4096x4096xi32>
return
}
@@ -109,19 +109,19 @@
%c4096 = arith.constant 4096 : index
%c0 = arith.constant 0 : index
%cst = arith.constant dense<0> : vector<4xi32>
- // CHECK: %[[C:.+]] = spv.CooperativeMatrixLoadNV
+ // CHECK: %[[C:.+]] = spv.NV.CooperativeMatrixLoad
%4 = vector.transfer_read %arg2[%c0, %c0], %cst : memref<4096x1024xvector<4xi32>>, vector<16x16xi32>
// CHECK: scf.for
%5 = scf.for %arg3 = %c0 to %c4096 step %c32 iter_args(%arg4 = %4) -> (vector<16x16xi32>) {
- // CHECK: %[[A:.+]] = spv.CooperativeMatrixLoadNV
+ // CHECK: %[[A:.+]] = spv.NV.CooperativeMatrixLoad
%6 = vector.transfer_read %arg0[%c0, %arg3], %cst : memref<4096x256xvector<4xi32>>, vector<16x32xi8>
- // CHECK: %[[B:.+]] = spv.CooperativeMatrixLoadNV
+ // CHECK: %[[B:.+]] = spv.NV.CooperativeMatrixLoad
%7 = vector.transfer_read %arg1[%arg3, %c0], %cst : memref<4096x256xvector<4xi32>>, vector<32x16xi8>
- // CHECK: %[[R:.+]] = spv.CooperativeMatrixMulAddNV %[[A]], %[[B]], %{{.*}}
+ // CHECK: %[[R:.+]] = spv.NV.CooperativeMatrixMulAdd %[[A]], %[[B]], %{{.*}}
%8 = vector.contract {indexing_maps = [#map1, #map2, #map3], iterator_types = ["parallel", "parallel", "reduction"]} %6, %7, %arg4 : vector<16x32xi8>, vector<32x16xi8> into vector<16x16xi32>
scf.yield %8 : vector<16x16xi32>
}
- // CHECK: spv.CooperativeMatrixStoreNV
+ // CHECK: spv.NV.CooperativeMatrixStore
vector.transfer_write %5, %arg2[%c0, %c0] : vector<16x16xi32>, memref<4096x1024xvector<4xi32>>
return
}
diff --git a/compiler/src/iree/compiler/Dialect/HAL/Target/LLVM/LLVMIRPasses.cpp b/compiler/src/iree/compiler/Dialect/HAL/Target/LLVM/LLVMIRPasses.cpp
index 41b563c..9c83861 100644
--- a/compiler/src/iree/compiler/Dialect/HAL/Target/LLVM/LLVMIRPasses.cpp
+++ b/compiler/src/iree/compiler/Dialect/HAL/Target/LLVM/LLVMIRPasses.cpp
@@ -77,7 +77,7 @@
llvm::AddressSanitizerOptions Opts;
bool moduleUseAfterScope = false;
bool useOdrIndicator = false;
- modulePassManager.addPass(llvm::ModuleAddressSanitizerPass(
+ modulePassManager.addPass(llvm::AddressSanitizerPass(
Opts, moduleUseAfterScope, useOdrIndicator));
});
} break;
diff --git a/compiler/src/iree/compiler/Dialect/HAL/Transforms/ElideRedundantCommands.cpp b/compiler/src/iree/compiler/Dialect/HAL/Transforms/ElideRedundantCommands.cpp
index 2317f66..9589782 100644
--- a/compiler/src/iree/compiler/Dialect/HAL/Transforms/ElideRedundantCommands.cpp
+++ b/compiler/src/iree/compiler/Dialect/HAL/Transforms/ElideRedundantCommands.cpp
@@ -87,6 +87,15 @@
} // namespace
+// XXX(#10329): The behavior of bitEnumContains was changed in
+// https://github.com/llvm/llvm-project/commit/839b436c93604e042f74050cf2adadd75f30e898
+// This is a workaounrd to keep the same behavior.
+inline static bool legacyBitEnumContains(
+ IREE::HAL::ExecutionStageBitfield bits,
+ IREE::HAL::ExecutionStageBitfield bit) {
+ return (static_cast<uint32_t>(bits) & static_cast<uint32_t>(bit)) != 0;
+}
+
static void processOp(IREE::HAL::CommandBufferExecutionBarrierOp op,
CommandBufferState &state) {
if (state.previousFullBarrier) {
@@ -98,14 +107,14 @@
// See if this is a full barrier. These are all we emit today so this simple
// analysis can remain simple by pattern matching.
- if (bitEnumContains(op.getSourceStageMask(),
- IREE::HAL::ExecutionStageBitfield::CommandRetire |
- IREE::HAL::ExecutionStageBitfield::Transfer |
- IREE::HAL::ExecutionStageBitfield::Dispatch) &&
- bitEnumContains(op.getTargetStageMask(),
- IREE::HAL::ExecutionStageBitfield::CommandRetire |
- IREE::HAL::ExecutionStageBitfield::Transfer |
- IREE::HAL::ExecutionStageBitfield::Dispatch)) {
+ if (legacyBitEnumContains(op.getSourceStageMask(),
+ IREE::HAL::ExecutionStageBitfield::CommandRetire |
+ IREE::HAL::ExecutionStageBitfield::Transfer |
+ IREE::HAL::ExecutionStageBitfield::Dispatch) &&
+ legacyBitEnumContains(op.getTargetStageMask(),
+ IREE::HAL::ExecutionStageBitfield::CommandRetire |
+ IREE::HAL::ExecutionStageBitfield::Transfer |
+ IREE::HAL::ExecutionStageBitfield::Dispatch)) {
state.previousFullBarrier = op;
} else {
state.previousFullBarrier = {};
diff --git a/compiler/src/iree/compiler/Dialect/Modules/Check/IR/CheckOps.cpp b/compiler/src/iree/compiler/Dialect/Modules/Check/IR/CheckOps.cpp
index 7184f8d..80e8bd5 100644
--- a/compiler/src/iree/compiler/Dialect/Modules/Check/IR/CheckOps.cpp
+++ b/compiler/src/iree/compiler/Dialect/Modules/Check/IR/CheckOps.cpp
@@ -23,8 +23,8 @@
using OpRewritePattern<SrcOp>::OpRewritePattern;
LogicalResult matchAndRewrite(SrcOp op,
PatternRewriter &rewriter) const override {
- auto rhs = rewriter.create<arith::ConstantOp>(op.getLoc(), op.value());
- rewriter.replaceOpWithNewOp<DstOp>(op, op.lhs(), rhs);
+ auto rhs = rewriter.create<arith::ConstantOp>(op.getLoc(), op.getValue());
+ rewriter.replaceOpWithNewOp<DstOp>(op, op.getLhs(), rhs);
return success();
}
};
diff --git a/compiler/src/iree/compiler/InputConversion/Common/IREEImportPublic.cpp b/compiler/src/iree/compiler/InputConversion/Common/IREEImportPublic.cpp
index f376db3..6933066 100644
--- a/compiler/src/iree/compiler/InputConversion/Common/IREEImportPublic.cpp
+++ b/compiler/src/iree/compiler/InputConversion/Common/IREEImportPublic.cpp
@@ -90,21 +90,22 @@
LogicalResult matchAndRewrite(
IREE::Input::BufferViewToTensorOp srcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- TensorType resultType = typeConverter->convertType(srcOp.target().getType())
- .dyn_cast_or_null<TensorType>();
+ TensorType resultType =
+ typeConverter->convertType(srcOp.getTarget().getType())
+ .dyn_cast_or_null<TensorType>();
if (!resultType) return failure();
- if (adaptor.target_dims().empty() && !resultType.hasStaticShape()) {
+ if (adaptor.getTargetDims().empty() && !resultType.hasStaticShape()) {
// For the input dialect, we allow ops that don't have their dims
// specified and we reify them here with the specific builder that does
// the work.
- rewriter.replaceOpWithNewOp<IREE::HAL::TensorImportOp>(srcOp, resultType,
- adaptor.source());
+ rewriter.replaceOpWithNewOp<IREE::HAL::TensorImportOp>(
+ srcOp, resultType, adaptor.getSource());
} else {
// Dynamic dims explicitly provided (or wrong, in which case the verifier
// will get it).
rewriter.replaceOpWithNewOp<IREE::HAL::TensorImportOp>(
- srcOp, resultType, adaptor.source(), TypeAttr::get(resultType),
- adaptor.target_dims());
+ srcOp, resultType, adaptor.getSource(), TypeAttr::get(resultType),
+ adaptor.getTargetDims());
}
return success();
}
@@ -117,21 +118,21 @@
LogicalResult matchAndRewrite(
IREE::Input::TensorToBufferViewOp srcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- Type resultType = typeConverter->convertType(srcOp.target().getType());
- TensorType sourceType = adaptor.source().getType().dyn_cast<TensorType>();
+ Type resultType = typeConverter->convertType(srcOp.getTarget().getType());
+ auto sourceType = adaptor.getSource().getType().dyn_cast<TensorType>();
if (!resultType || !sourceType) return failure();
- if (adaptor.source_dims().empty() && !sourceType.hasStaticShape()) {
+ if (adaptor.getSourceDims().empty() && !sourceType.hasStaticShape()) {
// For the input dialect, we allow ops that don't have their dims
// specified and we reify them here with the specific builder that does
// the work.
- rewriter.replaceOpWithNewOp<IREE::HAL::TensorExportOp>(srcOp, resultType,
- adaptor.source());
+ rewriter.replaceOpWithNewOp<IREE::HAL::TensorExportOp>(
+ srcOp, resultType, adaptor.getSource());
} else {
// Dynamic dims explicitly provided (or wrong, in which case the verifier
// will get it).
rewriter.replaceOpWithNewOp<IREE::HAL::TensorExportOp>(
- srcOp, resultType, adaptor.source(),
- TypeAttr::get(adaptor.source().getType()), adaptor.source_dims(),
+ srcOp, resultType, adaptor.getSource(),
+ TypeAttr::get(adaptor.getSource().getType()), adaptor.getSourceDims(),
/*target_storage=*/nullptr);
}
return success();
@@ -201,19 +202,19 @@
LogicalResult matchAndRewrite(
IREE::Input::GlobalOp srcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- Type newType = typeConverter->convertType(srcOp.type());
+ Type newType = typeConverter->convertType(srcOp.getType());
if (!newType) return failure();
auto globalOp = rewriter.replaceOpWithNewOp<IREE::Util::GlobalOp>(
- srcOp, srcOp.getName(), srcOp.is_mutable(), newType,
- srcOp.initial_value());
+ srcOp, srcOp.getName(), srcOp.getIsMutable(), newType,
+ srcOp.getInitialValue());
globalOp.setVisibility(srcOp.getVisibility());
- if (srcOp.initializer().has_value()) {
+ if (srcOp.getInitializer().has_value()) {
auto initializerOp =
rewriter.create<IREE::Util::InitializerOp>(srcOp.getLoc());
auto ip = rewriter.saveInsertionPoint();
rewriter.setInsertionPointToStart(initializerOp.addEntryBlock());
auto callOp = rewriter.create<mlir::func::CallOp>(
- srcOp.getLoc(), srcOp.initializerAttr(), TypeRange{newType});
+ srcOp.getLoc(), srcOp.getInitializerAttr(), TypeRange{newType});
rewriter.create<IREE::Util::GlobalStoreOp>(
srcOp.getLoc(), callOp.getResult(0), srcOp.getName());
rewriter.create<IREE::Util::InitializerReturnOp>(srcOp.getLoc());
diff --git a/compiler/src/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp b/compiler/src/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp
index 0b55d7e..b840a04 100644
--- a/compiler/src/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp
+++ b/compiler/src/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp
@@ -165,9 +165,9 @@
loc, resultTypes,
/*inputs=*/ValueRange{}, adaptor.getOperands(),
mhloSortOp.dimensionAttr());
- rewriter.inlineRegionBefore(mhloSortOp.comparator(), sortOp.region(),
- sortOp.region().begin());
- Region ®ion = sortOp.region();
+ rewriter.inlineRegionBefore(mhloSortOp.comparator(), sortOp.getRegion(),
+ sortOp.getRegion().begin());
+ Region ®ion = sortOp.getRegion();
Block &block = region.front();
TypeConverter::SignatureConversion signature_converter(
block.getNumArguments());
@@ -333,9 +333,9 @@
op.getLoc(), op->getResultTypes(), ValueRange{updates, indices},
ValueRange{original}, op.unique_indices());
- rewriter.inlineRegionBefore(op.update_computation(), scatterOp.region(),
- scatterOp.region().begin());
- Region ®ion = scatterOp.region();
+ rewriter.inlineRegionBefore(op.update_computation(), scatterOp.getRegion(),
+ scatterOp.getRegion().begin());
+ Region ®ion = scatterOp.getRegion();
TypeConverter::SignatureConversion signatureConverter(2);
Type argType = getElementTypeOrSelf(original.getType());
// mhlo.scatter ops takes:
@@ -585,7 +585,8 @@
// Define the region of TopK with a GT comparison
SmallVector<Type> types(2, valueElementType);
SmallVector<Location> locations(2, loc);
- Block *block = rewriter.createBlock(&topkOp.region(), {}, types, locations);
+ Block *block =
+ rewriter.createBlock(&topkOp.getRegion(), {}, types, locations);
{
OpBuilder::InsertionGuard guard(rewriter);
rewriter.setInsertionPointToStart(block);
diff --git a/integrations/tensorflow/WORKSPACE b/integrations/tensorflow/WORKSPACE
index 3b0eccd..5b843f24 100644
--- a/integrations/tensorflow/WORKSPACE
+++ b/integrations/tensorflow/WORKSPACE
@@ -7,7 +7,7 @@
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
-TENSORFLOW_COMMIT = "5ceb98acd49ba18cc49001e30670074a604378dd"
+TENSORFLOW_COMMIT = "dc72187bcc9336557a6a0420b9bd2b31dfed612b"
git_repository(
name = "org_tensorflow",
diff --git a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td
index cc54399..8f85ad6 100644
--- a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td
+++ b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td
@@ -22,7 +22,7 @@
Return the input shape operands.
}],
/*retTy=*/"ValueRange",
- /*methodName=*/"inputs",
+ /*methodName=*/"getInputs",
/*args=*/(ins)
>,
// These special methods rely on `inputs` and `outputs` being defined by
@@ -36,7 +36,7 @@
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.inputs().size();
+ return $_op.getInputs().size();
}]
>,
// `outputs` must be defined by each op that wants to implement the
@@ -46,7 +46,7 @@
Return the output shape operands.
}],
/*retTy=*/"ValueRange",
- /*methodName=*/"outputs",
+ /*methodName=*/"getOutputs",
/*args=*/(ins)
>,
InterfaceMethod<
@@ -58,7 +58,7 @@
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.outputs().size();
+ return $_op.getOutputs().size();
}]
>,
InterfaceMethod<
diff --git a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td
index 63b53d3..21dfdfe 100644
--- a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td
+++ b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td
@@ -36,7 +36,7 @@
let hasCustomAssemblyFormat = 1;
code extraLinalgExtOpClassDeclaration = [{
SmallVector<Value> getDestinationOperands(OpBuilder &b) {
- SmallVector<Value> dest(outputs().begin(), outputs().end());
+ SmallVector<Value> dest(getOutputs().begin(), getOutputs().end());
return dest;
}
@@ -180,7 +180,7 @@
}];
let extraClassDeclaration = extraLinalgExtOpClassDeclaration # [{
Value operand(int index) {
- return outputs()[index];
+ return getOutputs()[index];
}
ShapedType getOperandType(int index) {
return operand(index).getType().cast<ShapedType>();
@@ -226,9 +226,9 @@
(`:` type($results)^)?
}];
let extraClassDeclaration = extraLinalgExtOpClassDeclaration # [{
- Value getStage() { return inputs()[0]; }
- Value getReal() { return outputs()[0]; }
- Value getImag() { return outputs()[1]; }
+ Value getStage() { return getInputs()[0]; }
+ Value getReal() { return getOutputs()[0]; }
+ Value getImag() { return getOutputs()[1]; }
bool hasCoeff() { return getNumInputs() > 1; }
void generateScalarImplWithoutCoeffBuf(
OpBuilder & b, Location loc, ArrayRef<Value> operands, Value wholeSize);
@@ -236,11 +236,11 @@
ArrayRef<Value> operands);
Value getRealCoeff() {
if (!hasCoeff()) return Value();
- return inputs()[1];
+ return getInputs()[1];
}
Value getImagCoeff() {
if (!hasCoeff()) return Value();
- return inputs()[2];
+ return getInputs()[2];
}
ShapedType getOperandType() {
return getReal().getType().cast<ShapedType>();
@@ -356,7 +356,7 @@
}
SmallVector<int64_t> dims() {
SmallVector<int64_t> ret;
- for (const APInt& elem : dimensions()) {
+ for (const APInt& elem : getDimensions()) {
ret.push_back(elem.getLimitedValue());
}
return ret;
@@ -434,7 +434,7 @@
}
}];
}
-
+
//===----------------------------------------------------------------------===//
// Pure ops
//===----------------------------------------------------------------------===//
diff --git a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/ScopedTransform.h b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/ScopedTransform.h
index 8bb66b3..a0594fe 100644
--- a/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/ScopedTransform.h
+++ b/integrations/tensorflow/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/ScopedTransform.h
@@ -18,7 +18,7 @@
template <typename TransformT>
auto scoped(Operation *target, TransformT &&transform) {
auto scope = wrapInScope(target);
- Operation &op = *scope.body().front().begin();
+ Operation &op = *scope.getBody().front().begin();
auto result = transform(scope, &op);
if (failed(unwrapScope(scope)) || failed(result))
return decltype(result)(failure());
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/Input/InputOps.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/Input/InputOps.cpp
index 227c6b0..7558ee5 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/Input/InputOps.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/Input/InputOps.cpp
@@ -138,21 +138,21 @@
LogicalResult
GlobalLoadOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
auto globalOp =
- symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, globalAttr());
+ symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, getGlobalAttr());
if (!globalOp) {
- return emitOpError() << "undefined global: " << global();
+ return emitOpError() << "undefined global: " << getGlobal();
}
auto loadType = getResult().getType();
- if (!isGlobalTypeCompatible(globalOp.type(), loadType)) {
- return emitOpError() << "global type mismatch; global " << global()
- << " is " << globalOp.type() << " but load is "
+ if (!isGlobalTypeCompatible(globalOp.getType(), loadType)) {
+ return emitOpError() << "global type mismatch; global " << getGlobal()
+ << " is " << globalOp.getType() << " but load is "
<< loadType;
}
return success();
}
LogicalResult GlobalLoadIndirectOp::verify() {
- auto globalType = global().getType().cast<PtrType>().getTargetType();
+ auto globalType = getGlobal().getType().cast<PtrType>().getTargetType();
auto loadType = getResult().getType();
if (!isGlobalTypeCompatible(globalType, loadType)) {
return emitOpError() << "global type mismatch; global pointer is "
@@ -164,22 +164,22 @@
LogicalResult
GlobalStoreOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
auto globalOp =
- symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, globalAttr());
+ symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, getGlobalAttr());
if (!globalOp) {
- return emitOpError() << "undefined global: " << global();
+ return emitOpError() << "undefined global: " << getGlobal();
}
- auto storeType = value().getType();
- if (!isGlobalTypeCompatible(globalOp.type(), storeType)) {
- return emitOpError() << "global type mismatch; global " << global()
- << " is " << globalOp.type() << " but store is "
+ auto storeType = getValue().getType();
+ if (!isGlobalTypeCompatible(globalOp.getType(), storeType)) {
+ return emitOpError() << "global type mismatch; global " << getGlobal()
+ << " is " << globalOp.getType() << " but store is "
<< storeType;
}
return success();
}
LogicalResult GlobalStoreIndirectOp::verify() {
- auto globalType = global().getType().cast<PtrType>().getTargetType();
- auto storeType = value().getType();
+ auto globalType = getGlobal().getType().cast<PtrType>().getTargetType();
+ auto storeType = getValue().getType();
if (!isGlobalTypeCompatible(globalType, storeType)) {
return emitOpError() << "global type mismatch; global pointer is "
<< globalType << " but store is " << storeType;
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtInterfaces.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtInterfaces.cpp
index 899c4fc..0478492 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtInterfaces.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtInterfaces.cpp
@@ -30,12 +30,12 @@
"expected inputs and outputs to be RankedTensorType or scalar");
}
- if (op->getNumResults() != linalgExtOp.outputs().size()) {
+ if (op->getNumResults() != linalgExtOp.getNumOutputs()) {
return linalgExtOp.emitOpError(
"expected number of outputs to be same as the number of results");
}
for (auto en : llvm::enumerate(op->getResultTypes())) {
- Type outputType = linalgExtOp.outputs()[en.index()].getType();
+ Type outputType = linalgExtOp.getOutputs()[en.index()].getType();
if (en.value() != outputType) {
return linalgExtOp.emitOpError("expected type of `outs` operand #")
<< en.index() << " " << outputType
@@ -68,7 +68,7 @@
LogicalResult LinalgExtOp::reifyResultShapes(
OpBuilder &b, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
Operation *op = getOperation();
- for (auto output : outputs()) {
+ for (auto output : getOutputs()) {
SmallVector<Value> dims;
Type outputType = output.getType();
if (auto rankedTensorType = outputType.dyn_cast<RankedTensorType>()) {
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
index 2f670cb..3b1ca13 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
@@ -112,10 +112,10 @@
//===----------------------------------------------------------------------===//
LogicalResult ScatterOp::verify() {
Operation *op = getOperation();
- if (inputs().size() != 2) {
+ if (getInputs().size() != 2) {
return op->emitOpError("expected two input operands");
}
- if (outputs().size() != 1) {
+ if (getOutputs().size() != 1) {
return op->emitOpError("expected one output operand");
}
auto checkDimensionsMatch = [&](ShapedType t1, ShapedType t2, unsigned dim) {
@@ -188,7 +188,7 @@
}
}
- Region ®ion = this->region();
+ Region ®ion = this->getRegion();
Block *body = ®ion.front();
if (body->getNumArguments() != 2) {
return op->emitOpError("expected region to have two arguments");
@@ -228,7 +228,7 @@
SmallVector<StringRef> ScatterOp::getLoopIteratorTypes() {
SmallVector<StringRef> iteratorTypes(getUpdateType().getRank(),
getParallelIteratorTypeName());
- if (!unique_indices()) {
+ if (!getUniqueIndices()) {
iteratorTypes[0] = getReductionIteratorTypeName();
}
return iteratorTypes;
@@ -246,10 +246,11 @@
return ranges;
}
-SmallVector<Operation *> ScatterOp::getTiledImplementation(
- OpBuilder &builder, ValueRange outputs, ArrayRef<OpFoldResult> offsets,
- ArrayRef<OpFoldResult> sizes, bool /*tileDestOperands*/) {
- assert(outputs.size() >= 1 && offsets.size() >= 1 && sizes.size() >= 1);
+SmallVector<Operation *>
+ScatterOp::getTiledImplementation(OpBuilder &builder,
+ ArrayRef<OpFoldResult> offsets,
+ ArrayRef<OpFoldResult> sizes) {
+ assert(offsets.size() >= 1 && sizes.size() >= 1);
Location loc = getLoc();
auto zeroAttr = builder.getI64IntegerAttr(0);
auto oneAttr = builder.getI64IntegerAttr(1);
@@ -283,7 +284,7 @@
}
auto originalRank = getOriginalType().getRank();
SmallVector<OpFoldResult> originalStrides(originalRank, oneAttr);
- Value tiledOriginal = getSlice(builder, loc, outputs[0], originalOffsets,
+ Value tiledOriginal = getSlice(builder, loc, original(), originalOffsets,
originalSizes, originalStrides);
assert(tiledOriginal && "failed to get slice of original tensor");
@@ -354,7 +355,7 @@
Value init = b.create<memref::LoadOp>(loc, original(), starts);
BlockAndValueMapping bvm;
- Block &block = region().front();
+ Block &block = getRegion().front();
bvm.map(block.getArgument(0), update);
bvm.map(block.getArgument(1), init);
for (auto &blockOp : block.without_terminator()) {
@@ -381,7 +382,7 @@
return op->emitOpError("expected at least one `outs` operand");
}
- Block &block = region().front();
+ Block &block = getRegion().front();
size_t numOutputs = getNumOutputs();
if (block.getNumArguments() != 2 * numOutputs) {
return op->emitOpError("region block should have ")
@@ -389,13 +390,13 @@
}
int64_t rank = getOperandRank();
- int sortDim = dimension();
+ int sortDim = getDimension();
if (sortDim < 0 || sortDim >= rank) {
return op->emitOpError("dimension must be within (0, ") << rank << "]";
}
ArrayRef<int64_t> shape = getOperandShape();
- for (auto indexedOperand : llvm::enumerate(outputs())) {
+ for (auto indexedOperand : llvm::enumerate(getOutputs())) {
int index = indexedOperand.index();
auto operandType = getOperandType(index);
if (operandType.getRank() != rank) {
@@ -433,7 +434,7 @@
// All loops except the dimension to sort along are parallel.
SmallVector<StringRef> iteratorTypes(getOperandRank(),
getParallelIteratorTypeName());
- iteratorTypes[dimension()] = getReductionIteratorTypeName();
+ iteratorTypes[getDimension()] = getReductionIteratorTypeName();
return iteratorTypes;
}
@@ -452,18 +453,18 @@
return loopBounds;
}
-SmallVector<Operation *> SortOp::getTiledImplementation(
- OpBuilder &builder, ValueRange outputs, ArrayRef<OpFoldResult> offsets,
- ArrayRef<OpFoldResult> sizes, bool /*tileDestOperands*/) {
- assert(outputs.size() == this->outputs().size());
+SmallVector<Operation *>
+SortOp::getTiledImplementation(OpBuilder &builder,
+ ArrayRef<OpFoldResult> offsets,
+ ArrayRef<OpFoldResult> sizes) {
int64_t rank = getOperandRank();
assert(offsets.size() == static_cast<size_t>(rank) &&
sizes.size() == static_cast<size_t>(rank));
auto oneAttr = builder.getI64IntegerAttr(1);
SmallVector<OpFoldResult> strides(rank, oneAttr);
Location loc = getLoc();
- SmallVector<Value> tiledOperands(outputs.size());
- for (auto en : llvm::enumerate(outputs)) {
+ SmallVector<Value> tiledOperands(getOutputs().size());
+ for (auto en : llvm::enumerate(getOutputs())) {
tiledOperands[en.index()] =
getSlice(builder, getLoc(), en.value(), offsets, sizes, strides);
assert(tiledOperands[en.index()] && "failed to get slice of operand");
@@ -489,7 +490,7 @@
LogicalResult SortOp::generateScalarImplementation(OpBuilder &b, Location loc,
ValueRange ivs) {
- auto sortDim = dimension();
+ auto sortDim = getDimension();
SmallVector<Value> indices, sortBlkArgs;
indices.append(ivs.begin(), ivs.end());
// Bubble sort innermost loop.
@@ -518,7 +519,7 @@
}
});
- auto &srcBlock = region().front();
+ auto &srcBlock = getRegion().front();
Region ®ion = scfFor.getRegion();
BlockAndValueMapping bvm;
{
@@ -769,9 +770,10 @@
return success();
}
-SmallVector<Operation *> FftOp::getTiledImplementation(
- OpBuilder &builder, ValueRange outputs, ArrayRef<OpFoldResult> offsets,
- ArrayRef<OpFoldResult> sizes, bool /*tileDestOperands*/) {
+SmallVector<Operation *>
+FftOp::getTiledImplementation(OpBuilder &builder,
+ ArrayRef<OpFoldResult> offsets,
+ ArrayRef<OpFoldResult> sizes) {
int64_t rank = getOperandRank();
SmallVector<OpFoldResult> strides(rank, builder.getI64IntegerAttr(1));
Location loc = getLoc();
@@ -781,7 +783,7 @@
tiledOperands[2] = getImagCoeff();
SmallVector<Type, 4> resultTypes;
- for (auto out : outputs) {
+ for (auto out : getOutputs()) {
tiledOperands.push_back(
getSlice(builder, getLoc(), out, offsets, sizes, strides));
if (hasTensorSemantics()) {
@@ -834,7 +836,7 @@
}
SmallVector<int64_t> expectedAccumulatorShape;
for (int i = 0; i < inputType.getRank(); i++) {
- if (i != dimension())
+ if (i != getDimension())
expectedAccumulatorShape.push_back(inputShapes[i]);
}
if (llvm::any_of(llvm::zip(expectedAccumulatorShape, accumulatorShape),
@@ -881,7 +883,7 @@
SmallVector<StringRef> ScanOp::getLoopIteratorTypes() {
SmallVector<StringRef> iteratorTypes(getOperandRank(),
getParallelIteratorTypeName());
- iteratorTypes[dimension()] = getReductionIteratorTypeName();
+ iteratorTypes[getDimension()] = getReductionIteratorTypeName();
return iteratorTypes;
}
@@ -900,10 +902,10 @@
indices.append(ivs.begin(), ivs.end());
Value zero = b.create<arith::ConstantIndexOp>(loc, 0);
Value one = b.create<arith::ConstantIndexOp>(loc, 1);
- auto scanDim = dimension();
+ auto scanDim = getDimension();
auto cond = b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
indices[scanDim], zero);
- bool isInclusive = inclusive();
+ bool isInclusive = getInclusive();
SmallVector<Value> accIndices;
for (int i = 0; i < indices.size(); i++) {
if (i != scanDim)
@@ -937,7 +939,7 @@
scanBlkArgs.push_back(i0);
});
- auto &srcBlock = region().front();
+ auto &srcBlock = getRegion().front();
Region ®ion = scfIf.getElseRegion();
BlockAndValueMapping bvm;
{
@@ -961,10 +963,10 @@
return success();
}
-SmallVector<Operation *> ScanOp::getTiledImplementation(
- OpBuilder &builder, ValueRange outputs, ArrayRef<OpFoldResult> offsets,
- ArrayRef<OpFoldResult> sizes, bool /*tileDestOperands*/) {
- assert(outputs.size() == this->outputs().size());
+SmallVector<Operation *>
+ScanOp::getTiledImplementation(OpBuilder &builder,
+ ArrayRef<OpFoldResult> offsets,
+ ArrayRef<OpFoldResult> sizes) {
int64_t rank = getOperandRank();
assert(offsets.size() == static_cast<size_t>(rank) &&
sizes.size() == static_cast<size_t>(rank));
@@ -975,7 +977,7 @@
tiledOperands.emplace_back(
getSlice(builder, getLoc(), input(), offsets, sizes, strides));
tiledOperands.emplace_back(
- getSlice(builder, getLoc(), outputs[0], offsets, sizes, strides));
+ getSlice(builder, getLoc(), getOutputs()[0], offsets, sizes, strides));
if (rank > 1) {
SmallVector<OpFoldResult> accumOffsets, accumSizes;
if (failed(getResultTilePosition(builder, 1, offsets, sizes, accumOffsets,
@@ -983,10 +985,11 @@
return {};
}
SmallVector<OpFoldResult> accumStrides(rank - 1, oneAttr);
- tiledOperands.emplace_back(getSlice(
- builder, getLoc(), outputs[1], accumOffsets, accumSizes, accumStrides));
+ tiledOperands.emplace_back(getSlice(builder, getLoc(), getOutputs()[1],
+ accumOffsets, accumSizes,
+ accumStrides));
} else {
- tiledOperands.emplace_back(outputs[1]);
+ tiledOperands.emplace_back(getOutputs()[1]);
}
SmallVector<Type, 4> resultTypes;
@@ -1013,7 +1016,7 @@
int64_t rank = getOperandRank();
if (rank > 1) {
for (auto i : llvm::seq<int64_t>(0, rank)) {
- if (i == dimension())
+ if (i == getDimension())
continue;
resultOffsets.push_back(offsets[i]);
resultSizes.push_back(sizes[i]);
@@ -1122,9 +1125,10 @@
return success();
}
-SmallVector<Operation *> ReverseOp::getTiledImplementation(
- OpBuilder &builder, ValueRange outputs, ArrayRef<OpFoldResult> offsets,
- ArrayRef<OpFoldResult> sizes, bool /*tileDestOperands*/) {
+SmallVector<Operation *>
+ReverseOp::getTiledImplementation(OpBuilder &builder,
+ ArrayRef<OpFoldResult> offsets,
+ ArrayRef<OpFoldResult> sizes) {
int64_t rank = getOperandRank();
SmallVector<OpFoldResult> strides(rank, builder.getI64IntegerAttr(1));
Location loc = getLoc();
@@ -1190,7 +1194,7 @@
if (getNumOutputs() != 2) {
return op->emitOpError("expected two output operands");
}
- if (dimension() >= getInputRank()) {
+ if (getDimension() >= getInputRank()) {
return op->emitOpError("dimension exceeds rank");
}
// Ensure input/output element types match
@@ -1239,7 +1243,7 @@
return op->emitOpError("output indices/values shape must match");
}
// Input shape must match the output shape except for the dimension()
- uint64_t dim = dimension();
+ uint64_t dim = getDimension();
if (llvm::any_of(llvm::enumerate(llvm::zip(inputValuesType.getShape(),
outputValuesType.getShape())),
[dim](auto e) {
@@ -1253,7 +1257,7 @@
return op->emitOpError("incompatible input/output shapes");
}
// Check region compatibility
- Block &block = region().front();
+ Block &block = getRegion().front();
if (block.getNumArguments() != 2) {
return op->emitOpError("region block should have 2 arguments");
}
@@ -1287,13 +1291,13 @@
SmallVector<StringRef> TopkOp::getLoopIteratorTypes() {
SmallVector<StringRef> iteratorTypes(getInputRank(),
getParallelIteratorTypeName());
- iteratorTypes[dimension()] = getReductionIteratorTypeName();
+ iteratorTypes[getDimension()] = getReductionIteratorTypeName();
return iteratorTypes;
}
LogicalResult TopkOp::generateScalarImplementation(OpBuilder &b, Location loc,
ValueRange ivs) {
- uint64_t kDim = dimension();
+ uint64_t kDim = getDimension();
Value zero = b.create<arith::ConstantIndexOp>(loc, 0);
Value one = b.create<arith::ConstantIndexOp>(loc, 1);
Value initialValue = b.create<memref::LoadOp>(loc, values(), ivs);
@@ -1310,7 +1314,7 @@
}
// Compute K (ub) from the selected dim of the output
- Value ub = b.create<memref::DimOp>(loc, outputValues(), dimension());
+ Value ub = b.create<memref::DimOp>(loc, outputValues(), getDimension());
// Inner K loop functions:
// Load current K value and index
@@ -1335,7 +1339,7 @@
auto loopCarryValues = scfFor.getRegionIterArgs();
// Retrieve region as black box comparision function f(x,y). Plug into op.
- auto &srcBlock = region().front();
+ auto &srcBlock = getRegion().front();
BlockAndValueMapping bvmF; // f(x,y)
BlockAndValueMapping bvmR; // f(y,x)
{
@@ -1387,10 +1391,10 @@
return success();
}
-SmallVector<Operation *> TopkOp::getTiledImplementation(
- OpBuilder &builder, ValueRange outputs, ArrayRef<OpFoldResult> offsets,
- ArrayRef<OpFoldResult> sizes, bool /*tileDestOperands*/) {
- assert(outputs.size() == this->outputs().size());
+SmallVector<Operation *>
+TopkOp::getTiledImplementation(OpBuilder &builder,
+ ArrayRef<OpFoldResult> offsets,
+ ArrayRef<OpFoldResult> sizes) {
int64_t rank = getInputRank();
assert(offsets.size() == static_cast<size_t>(rank) &&
sizes.size() == static_cast<size_t>(rank));
@@ -1413,13 +1417,13 @@
// Replace the tile size for the K dimension to use the output size instead of
// the input size.
- Value kSize = getDimValue(builder, getLoc(), outputValues(), dimension());
- outputSizes[dimension()] = getAsOpFoldResult(kSize);
+ Value kSize = getDimValue(builder, getLoc(), outputValues(), getDimension());
+ outputSizes[getDimension()] = getAsOpFoldResult(kSize);
tiledOperands.emplace_back(
- getSlice(builder, loc, outputs[0], offsets, outputSizes, strides));
+ getSlice(builder, loc, getOutputs()[0], offsets, outputSizes, strides));
tiledOperands.emplace_back(
- getSlice(builder, loc, outputs[1], offsets, outputSizes, strides));
+ getSlice(builder, loc, getOutputs()[1], offsets, outputSizes, strides));
SmallVector<Type, 2> resultTypes;
if (hasTensorSemantics()) {
resultTypes.push_back(tiledOperands[tiledOperands.size() - 2].getType());
@@ -1437,8 +1441,9 @@
SmallVector<OpFoldResult> &resultSizes) {
resultOffsets.assign(offsets.begin(), offsets.end());
resultSizes.assign(sizes.begin(), sizes.end());
- Value kSize = getDimValue(builder, getLoc(), outputValues(), dimension());
- resultSizes[dimension()] = getAsOpFoldResult(kSize);
+ Value kSize = getDimValue(
+ builder, getLoc(), getOutputOperand(resultNumber)->get(), getDimension());
+ resultSizes[getDimension()] = getAsOpFoldResult(kSize);
return success();
}
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Passes/SplitReduction.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Passes/SplitReduction.cpp
index 37b2bb7..50d24ad 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Passes/SplitReduction.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Passes/SplitReduction.cpp
@@ -186,8 +186,8 @@
parallelTopkResultTypes,
/*ins=*/parallelTopkIns,
/*outs=*/parallelTopkOuts, kDimParallel);
- rewriter.cloneRegionBefore(topkOp.region(), parallelTopkOp.region(),
- parallelTopkOp.region().end());
+ rewriter.cloneRegionBefore(topkOp.getRegion(), parallelTopkOp.getRegion(),
+ parallelTopkOp.getRegion().end());
return parallelTopkOp;
}
@@ -262,9 +262,9 @@
loc,
/*resultTypes=*/topkOp->getResultTypes(),
/*ins=*/ValueRange{valuesCollapsed, indicesCollapsed},
- /*outs=*/topkOp.outputs(), kDimOrig);
- rewriter.cloneRegionBefore(topkOp.region(), reductionTopkOp.region(),
- reductionTopkOp.region().end());
+ /*outs=*/topkOp.getOutputs(), kDimOrig);
+ rewriter.cloneRegionBefore(topkOp.getRegion(), reductionTopkOp.getRegion(),
+ reductionTopkOp.getRegion().end());
return reductionTopkOp;
}
@@ -310,7 +310,7 @@
}
Location loc = topkOp.getLoc();
// Original reduction dimension used for the final combined reduction
- int64_t kDimOrig = topkOp.dimension();
+ int64_t kDimOrig = topkOp.getDimension();
// For parallel topk: the dimension that we compute parallel reductions
int64_t splitDimParallel = kDimOrig;
// For parallel topk: the dimension that we reduce
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
index 6c30998..95e83c8 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
@@ -77,7 +77,7 @@
if (index)
regions.emplace_back(getResults());
else
- regions.emplace_back(&body());
+ regions.emplace_back(&getBody());
}
#define GET_OP_CLASSES
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/ScopedTransform.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/ScopedTransform.cpp
index b3bfeab..e40b8d5 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/ScopedTransform.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/IR/ScopedTransform.cpp
@@ -22,7 +22,7 @@
auto scope = rewriter.create<linalg::transform::ScopeOp>(
op->getLoc(), op->getResultTypes(), op->getOperands());
- Region &body = scope.body();
+ Region &body = scope.getBody();
rewriter.setInsertionPointToStart(&body.emplaceBlock());
BlockAndValueMapping bv;
SmallVector<Location> locs(op->getOperandTypes().size(), op->getLoc());
@@ -72,9 +72,10 @@
linalg::transform::unwrapScope(linalg::transform::ScopeOp scope) {
ScopeInliner interface(scope->getContext());
SmallVector<Operation *> ops;
- scope.body().walk([&](Operation *op) { ops.push_back(op); });
- if (failed(inlineRegion(interface, &scope.body(), scope, scope.getOperands(),
- scope.getResults(), /*inlineLoc=*/{},
+ scope.getBody().walk([&](Operation *op) { ops.push_back(op); });
+ if (failed(inlineRegion(interface, &scope.getBody(), scope,
+ scope.getOperands(), scope.getResults(),
+ /*inlineLoc=*/{},
/*shouldCloneInlinedRegion=*/false)))
return failure();
Rewriter(scope->getContext()).eraseOp(scope);
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/Passes/ExpertExpansion.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/Passes/ExpertExpansion.cpp
index 9cfff21..72a3350 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/Passes/ExpertExpansion.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgTransform/Passes/ExpertExpansion.cpp
@@ -38,7 +38,7 @@
SimplePatternRewriter rewriter(expertOp);
if (failed(applicator.matchAndRewrite(expertOp, rewriter))) {
LLVM_DEBUG(DBGS() << "failed to rewrite strategy \""
- << expertOp.expertName() << "\"\n");
+ << expertOp.getExpertName() << "\"\n");
}
});
}
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td
index cc54399..8f85ad6 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td
@@ -22,7 +22,7 @@
Return the input shape operands.
}],
/*retTy=*/"ValueRange",
- /*methodName=*/"inputs",
+ /*methodName=*/"getInputs",
/*args=*/(ins)
>,
// These special methods rely on `inputs` and `outputs` being defined by
@@ -36,7 +36,7 @@
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.inputs().size();
+ return $_op.getInputs().size();
}]
>,
// `outputs` must be defined by each op that wants to implement the
@@ -46,7 +46,7 @@
Return the output shape operands.
}],
/*retTy=*/"ValueRange",
- /*methodName=*/"outputs",
+ /*methodName=*/"getOutputs",
/*args=*/(ins)
>,
InterfaceMethod<
@@ -58,7 +58,7 @@
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.outputs().size();
+ return $_op.getOutputs().size();
}]
>,
InterfaceMethod<
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td
index 63b53d3..21dfdfe 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td
@@ -36,7 +36,7 @@
let hasCustomAssemblyFormat = 1;
code extraLinalgExtOpClassDeclaration = [{
SmallVector<Value> getDestinationOperands(OpBuilder &b) {
- SmallVector<Value> dest(outputs().begin(), outputs().end());
+ SmallVector<Value> dest(getOutputs().begin(), getOutputs().end());
return dest;
}
@@ -180,7 +180,7 @@
}];
let extraClassDeclaration = extraLinalgExtOpClassDeclaration # [{
Value operand(int index) {
- return outputs()[index];
+ return getOutputs()[index];
}
ShapedType getOperandType(int index) {
return operand(index).getType().cast<ShapedType>();
@@ -226,9 +226,9 @@
(`:` type($results)^)?
}];
let extraClassDeclaration = extraLinalgExtOpClassDeclaration # [{
- Value getStage() { return inputs()[0]; }
- Value getReal() { return outputs()[0]; }
- Value getImag() { return outputs()[1]; }
+ Value getStage() { return getInputs()[0]; }
+ Value getReal() { return getOutputs()[0]; }
+ Value getImag() { return getOutputs()[1]; }
bool hasCoeff() { return getNumInputs() > 1; }
void generateScalarImplWithoutCoeffBuf(
OpBuilder & b, Location loc, ArrayRef<Value> operands, Value wholeSize);
@@ -236,11 +236,11 @@
ArrayRef<Value> operands);
Value getRealCoeff() {
if (!hasCoeff()) return Value();
- return inputs()[1];
+ return getInputs()[1];
}
Value getImagCoeff() {
if (!hasCoeff()) return Value();
- return inputs()[2];
+ return getInputs()[2];
}
ShapedType getOperandType() {
return getReal().getType().cast<ShapedType>();
@@ -356,7 +356,7 @@
}
SmallVector<int64_t> dims() {
SmallVector<int64_t> ret;
- for (const APInt& elem : dimensions()) {
+ for (const APInt& elem : getDimensions()) {
ret.push_back(elem.getLimitedValue());
}
return ret;
@@ -434,7 +434,7 @@
}
}];
}
-
+
//===----------------------------------------------------------------------===//
// Pure ops
//===----------------------------------------------------------------------===//
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/ScopedTransform.h b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/ScopedTransform.h
index 8bb66b3..a0594fe 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/ScopedTransform.h
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgTransform/ScopedTransform.h
@@ -18,7 +18,7 @@
template <typename TransformT>
auto scoped(Operation *target, TransformT &&transform) {
auto scope = wrapInScope(target);
- Operation &op = *scope.body().front().begin();
+ Operation &op = *scope.getBody().front().begin();
auto result = transform(scope, &op);
if (failed(unwrapScope(scope)) || failed(result))
return decltype(result)(failure());
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/Input/InputOps.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/Input/InputOps.cpp
index 227c6b0..7558ee5 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/Input/InputOps.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/Input/InputOps.cpp
@@ -138,21 +138,21 @@
LogicalResult
GlobalLoadOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
auto globalOp =
- symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, globalAttr());
+ symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, getGlobalAttr());
if (!globalOp) {
- return emitOpError() << "undefined global: " << global();
+ return emitOpError() << "undefined global: " << getGlobal();
}
auto loadType = getResult().getType();
- if (!isGlobalTypeCompatible(globalOp.type(), loadType)) {
- return emitOpError() << "global type mismatch; global " << global()
- << " is " << globalOp.type() << " but load is "
+ if (!isGlobalTypeCompatible(globalOp.getType(), loadType)) {
+ return emitOpError() << "global type mismatch; global " << getGlobal()
+ << " is " << globalOp.getType() << " but load is "
<< loadType;
}
return success();
}
LogicalResult GlobalLoadIndirectOp::verify() {
- auto globalType = global().getType().cast<PtrType>().getTargetType();
+ auto globalType = getGlobal().getType().cast<PtrType>().getTargetType();
auto loadType = getResult().getType();
if (!isGlobalTypeCompatible(globalType, loadType)) {
return emitOpError() << "global type mismatch; global pointer is "
@@ -164,22 +164,22 @@
LogicalResult
GlobalStoreOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
auto globalOp =
- symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, globalAttr());
+ symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, getGlobalAttr());
if (!globalOp) {
- return emitOpError() << "undefined global: " << global();
+ return emitOpError() << "undefined global: " << getGlobal();
}
- auto storeType = value().getType();
- if (!isGlobalTypeCompatible(globalOp.type(), storeType)) {
- return emitOpError() << "global type mismatch; global " << global()
- << " is " << globalOp.type() << " but store is "
+ auto storeType = getValue().getType();
+ if (!isGlobalTypeCompatible(globalOp.getType(), storeType)) {
+ return emitOpError() << "global type mismatch; global " << getGlobal()
+ << " is " << globalOp.getType() << " but store is "
<< storeType;
}
return success();
}
LogicalResult GlobalStoreIndirectOp::verify() {
- auto globalType = global().getType().cast<PtrType>().getTargetType();
- auto storeType = value().getType();
+ auto globalType = getGlobal().getType().cast<PtrType>().getTargetType();
+ auto storeType = getValue().getType();
if (!isGlobalTypeCompatible(globalType, storeType)) {
return emitOpError() << "global type mismatch; global pointer is "
<< globalType << " but store is " << storeType;
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtInterfaces.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtInterfaces.cpp
index 899c4fc..0478492 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtInterfaces.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtInterfaces.cpp
@@ -30,12 +30,12 @@
"expected inputs and outputs to be RankedTensorType or scalar");
}
- if (op->getNumResults() != linalgExtOp.outputs().size()) {
+ if (op->getNumResults() != linalgExtOp.getNumOutputs()) {
return linalgExtOp.emitOpError(
"expected number of outputs to be same as the number of results");
}
for (auto en : llvm::enumerate(op->getResultTypes())) {
- Type outputType = linalgExtOp.outputs()[en.index()].getType();
+ Type outputType = linalgExtOp.getOutputs()[en.index()].getType();
if (en.value() != outputType) {
return linalgExtOp.emitOpError("expected type of `outs` operand #")
<< en.index() << " " << outputType
@@ -68,7 +68,7 @@
LogicalResult LinalgExtOp::reifyResultShapes(
OpBuilder &b, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
Operation *op = getOperation();
- for (auto output : outputs()) {
+ for (auto output : getOutputs()) {
SmallVector<Value> dims;
Type outputType = output.getType();
if (auto rankedTensorType = outputType.dyn_cast<RankedTensorType>()) {
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
index b87e5d1..3b1ca13 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
@@ -112,10 +112,10 @@
//===----------------------------------------------------------------------===//
LogicalResult ScatterOp::verify() {
Operation *op = getOperation();
- if (inputs().size() != 2) {
+ if (getInputs().size() != 2) {
return op->emitOpError("expected two input operands");
}
- if (outputs().size() != 1) {
+ if (getOutputs().size() != 1) {
return op->emitOpError("expected one output operand");
}
auto checkDimensionsMatch = [&](ShapedType t1, ShapedType t2, unsigned dim) {
@@ -188,7 +188,7 @@
}
}
- Region ®ion = this->region();
+ Region ®ion = this->getRegion();
Block *body = ®ion.front();
if (body->getNumArguments() != 2) {
return op->emitOpError("expected region to have two arguments");
@@ -228,7 +228,7 @@
SmallVector<StringRef> ScatterOp::getLoopIteratorTypes() {
SmallVector<StringRef> iteratorTypes(getUpdateType().getRank(),
getParallelIteratorTypeName());
- if (!unique_indices()) {
+ if (!getUniqueIndices()) {
iteratorTypes[0] = getReductionIteratorTypeName();
}
return iteratorTypes;
@@ -355,7 +355,7 @@
Value init = b.create<memref::LoadOp>(loc, original(), starts);
BlockAndValueMapping bvm;
- Block &block = region().front();
+ Block &block = getRegion().front();
bvm.map(block.getArgument(0), update);
bvm.map(block.getArgument(1), init);
for (auto &blockOp : block.without_terminator()) {
@@ -382,7 +382,7 @@
return op->emitOpError("expected at least one `outs` operand");
}
- Block &block = region().front();
+ Block &block = getRegion().front();
size_t numOutputs = getNumOutputs();
if (block.getNumArguments() != 2 * numOutputs) {
return op->emitOpError("region block should have ")
@@ -390,13 +390,13 @@
}
int64_t rank = getOperandRank();
- int sortDim = dimension();
+ int sortDim = getDimension();
if (sortDim < 0 || sortDim >= rank) {
return op->emitOpError("dimension must be within (0, ") << rank << "]";
}
ArrayRef<int64_t> shape = getOperandShape();
- for (auto indexedOperand : llvm::enumerate(outputs())) {
+ for (auto indexedOperand : llvm::enumerate(getOutputs())) {
int index = indexedOperand.index();
auto operandType = getOperandType(index);
if (operandType.getRank() != rank) {
@@ -434,7 +434,7 @@
// All loops except the dimension to sort along are parallel.
SmallVector<StringRef> iteratorTypes(getOperandRank(),
getParallelIteratorTypeName());
- iteratorTypes[dimension()] = getReductionIteratorTypeName();
+ iteratorTypes[getDimension()] = getReductionIteratorTypeName();
return iteratorTypes;
}
@@ -463,8 +463,8 @@
auto oneAttr = builder.getI64IntegerAttr(1);
SmallVector<OpFoldResult> strides(rank, oneAttr);
Location loc = getLoc();
- SmallVector<Value> tiledOperands(outputs().size());
- for (auto en : llvm::enumerate(outputs())) {
+ SmallVector<Value> tiledOperands(getOutputs().size());
+ for (auto en : llvm::enumerate(getOutputs())) {
tiledOperands[en.index()] =
getSlice(builder, getLoc(), en.value(), offsets, sizes, strides);
assert(tiledOperands[en.index()] && "failed to get slice of operand");
@@ -490,7 +490,7 @@
LogicalResult SortOp::generateScalarImplementation(OpBuilder &b, Location loc,
ValueRange ivs) {
- auto sortDim = dimension();
+ auto sortDim = getDimension();
SmallVector<Value> indices, sortBlkArgs;
indices.append(ivs.begin(), ivs.end());
// Bubble sort innermost loop.
@@ -519,7 +519,7 @@
}
});
- auto &srcBlock = region().front();
+ auto &srcBlock = getRegion().front();
Region ®ion = scfFor.getRegion();
BlockAndValueMapping bvm;
{
@@ -783,7 +783,7 @@
tiledOperands[2] = getImagCoeff();
SmallVector<Type, 4> resultTypes;
- for (auto out : outputs()) {
+ for (auto out : getOutputs()) {
tiledOperands.push_back(
getSlice(builder, getLoc(), out, offsets, sizes, strides));
if (hasTensorSemantics()) {
@@ -836,7 +836,7 @@
}
SmallVector<int64_t> expectedAccumulatorShape;
for (int i = 0; i < inputType.getRank(); i++) {
- if (i != dimension())
+ if (i != getDimension())
expectedAccumulatorShape.push_back(inputShapes[i]);
}
if (llvm::any_of(llvm::zip(expectedAccumulatorShape, accumulatorShape),
@@ -883,7 +883,7 @@
SmallVector<StringRef> ScanOp::getLoopIteratorTypes() {
SmallVector<StringRef> iteratorTypes(getOperandRank(),
getParallelIteratorTypeName());
- iteratorTypes[dimension()] = getReductionIteratorTypeName();
+ iteratorTypes[getDimension()] = getReductionIteratorTypeName();
return iteratorTypes;
}
@@ -902,10 +902,10 @@
indices.append(ivs.begin(), ivs.end());
Value zero = b.create<arith::ConstantIndexOp>(loc, 0);
Value one = b.create<arith::ConstantIndexOp>(loc, 1);
- auto scanDim = dimension();
+ auto scanDim = getDimension();
auto cond = b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
indices[scanDim], zero);
- bool isInclusive = inclusive();
+ bool isInclusive = getInclusive();
SmallVector<Value> accIndices;
for (int i = 0; i < indices.size(); i++) {
if (i != scanDim)
@@ -939,7 +939,7 @@
scanBlkArgs.push_back(i0);
});
- auto &srcBlock = region().front();
+ auto &srcBlock = getRegion().front();
Region ®ion = scfIf.getElseRegion();
BlockAndValueMapping bvm;
{
@@ -977,7 +977,7 @@
tiledOperands.emplace_back(
getSlice(builder, getLoc(), input(), offsets, sizes, strides));
tiledOperands.emplace_back(
- getSlice(builder, getLoc(), outputs()[0], offsets, sizes, strides));
+ getSlice(builder, getLoc(), getOutputs()[0], offsets, sizes, strides));
if (rank > 1) {
SmallVector<OpFoldResult> accumOffsets, accumSizes;
if (failed(getResultTilePosition(builder, 1, offsets, sizes, accumOffsets,
@@ -985,11 +985,11 @@
return {};
}
SmallVector<OpFoldResult> accumStrides(rank - 1, oneAttr);
- tiledOperands.emplace_back(getSlice(builder, getLoc(), outputs()[1],
+ tiledOperands.emplace_back(getSlice(builder, getLoc(), getOutputs()[1],
accumOffsets, accumSizes,
accumStrides));
} else {
- tiledOperands.emplace_back(outputs()[1]);
+ tiledOperands.emplace_back(getOutputs()[1]);
}
SmallVector<Type, 4> resultTypes;
@@ -1016,7 +1016,7 @@
int64_t rank = getOperandRank();
if (rank > 1) {
for (auto i : llvm::seq<int64_t>(0, rank)) {
- if (i == dimension())
+ if (i == getDimension())
continue;
resultOffsets.push_back(offsets[i]);
resultSizes.push_back(sizes[i]);
@@ -1194,7 +1194,7 @@
if (getNumOutputs() != 2) {
return op->emitOpError("expected two output operands");
}
- if (dimension() >= getInputRank()) {
+ if (getDimension() >= getInputRank()) {
return op->emitOpError("dimension exceeds rank");
}
// Ensure input/output element types match
@@ -1243,7 +1243,7 @@
return op->emitOpError("output indices/values shape must match");
}
// Input shape must match the output shape except for the dimension()
- uint64_t dim = dimension();
+ uint64_t dim = getDimension();
if (llvm::any_of(llvm::enumerate(llvm::zip(inputValuesType.getShape(),
outputValuesType.getShape())),
[dim](auto e) {
@@ -1257,7 +1257,7 @@
return op->emitOpError("incompatible input/output shapes");
}
// Check region compatibility
- Block &block = region().front();
+ Block &block = getRegion().front();
if (block.getNumArguments() != 2) {
return op->emitOpError("region block should have 2 arguments");
}
@@ -1291,13 +1291,13 @@
SmallVector<StringRef> TopkOp::getLoopIteratorTypes() {
SmallVector<StringRef> iteratorTypes(getInputRank(),
getParallelIteratorTypeName());
- iteratorTypes[dimension()] = getReductionIteratorTypeName();
+ iteratorTypes[getDimension()] = getReductionIteratorTypeName();
return iteratorTypes;
}
LogicalResult TopkOp::generateScalarImplementation(OpBuilder &b, Location loc,
ValueRange ivs) {
- uint64_t kDim = dimension();
+ uint64_t kDim = getDimension();
Value zero = b.create<arith::ConstantIndexOp>(loc, 0);
Value one = b.create<arith::ConstantIndexOp>(loc, 1);
Value initialValue = b.create<memref::LoadOp>(loc, values(), ivs);
@@ -1314,7 +1314,7 @@
}
// Compute K (ub) from the selected dim of the output
- Value ub = b.create<memref::DimOp>(loc, outputValues(), dimension());
+ Value ub = b.create<memref::DimOp>(loc, outputValues(), getDimension());
// Inner K loop functions:
// Load current K value and index
@@ -1339,7 +1339,7 @@
auto loopCarryValues = scfFor.getRegionIterArgs();
// Retrieve region as black box comparision function f(x,y). Plug into op.
- auto &srcBlock = region().front();
+ auto &srcBlock = getRegion().front();
BlockAndValueMapping bvmF; // f(x,y)
BlockAndValueMapping bvmR; // f(y,x)
{
@@ -1417,13 +1417,13 @@
// Replace the tile size for the K dimension to use the output size instead of
// the input size.
- Value kSize = getDimValue(builder, getLoc(), outputValues(), dimension());
- outputSizes[dimension()] = getAsOpFoldResult(kSize);
+ Value kSize = getDimValue(builder, getLoc(), outputValues(), getDimension());
+ outputSizes[getDimension()] = getAsOpFoldResult(kSize);
tiledOperands.emplace_back(
- getSlice(builder, loc, outputs()[0], offsets, outputSizes, strides));
+ getSlice(builder, loc, getOutputs()[0], offsets, outputSizes, strides));
tiledOperands.emplace_back(
- getSlice(builder, loc, outputs()[1], offsets, outputSizes, strides));
+ getSlice(builder, loc, getOutputs()[1], offsets, outputSizes, strides));
SmallVector<Type, 2> resultTypes;
if (hasTensorSemantics()) {
resultTypes.push_back(tiledOperands[tiledOperands.size() - 2].getType());
@@ -1441,9 +1441,9 @@
SmallVector<OpFoldResult> &resultSizes) {
resultOffsets.assign(offsets.begin(), offsets.end());
resultSizes.assign(sizes.begin(), sizes.end());
- Value kSize = getDimValue(builder, getLoc(),
- getOutputOperand(resultNumber)->get(), dimension());
- resultSizes[dimension()] = getAsOpFoldResult(kSize);
+ Value kSize = getDimValue(
+ builder, getLoc(), getOutputOperand(resultNumber)->get(), getDimension());
+ resultSizes[getDimension()] = getAsOpFoldResult(kSize);
return success();
}
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Passes/SplitReduction.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Passes/SplitReduction.cpp
index 37b2bb7..50d24ad 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Passes/SplitReduction.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Passes/SplitReduction.cpp
@@ -186,8 +186,8 @@
parallelTopkResultTypes,
/*ins=*/parallelTopkIns,
/*outs=*/parallelTopkOuts, kDimParallel);
- rewriter.cloneRegionBefore(topkOp.region(), parallelTopkOp.region(),
- parallelTopkOp.region().end());
+ rewriter.cloneRegionBefore(topkOp.getRegion(), parallelTopkOp.getRegion(),
+ parallelTopkOp.getRegion().end());
return parallelTopkOp;
}
@@ -262,9 +262,9 @@
loc,
/*resultTypes=*/topkOp->getResultTypes(),
/*ins=*/ValueRange{valuesCollapsed, indicesCollapsed},
- /*outs=*/topkOp.outputs(), kDimOrig);
- rewriter.cloneRegionBefore(topkOp.region(), reductionTopkOp.region(),
- reductionTopkOp.region().end());
+ /*outs=*/topkOp.getOutputs(), kDimOrig);
+ rewriter.cloneRegionBefore(topkOp.getRegion(), reductionTopkOp.getRegion(),
+ reductionTopkOp.getRegion().end());
return reductionTopkOp;
}
@@ -310,7 +310,7 @@
}
Location loc = topkOp.getLoc();
// Original reduction dimension used for the final combined reduction
- int64_t kDimOrig = topkOp.dimension();
+ int64_t kDimOrig = topkOp.getDimension();
// For parallel topk: the dimension that we compute parallel reductions
int64_t splitDimParallel = kDimOrig;
// For parallel topk: the dimension that we reduce
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
index 6c30998..95e83c8 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/LinalgTransformOps.cpp
@@ -77,7 +77,7 @@
if (index)
regions.emplace_back(getResults());
else
- regions.emplace_back(&body());
+ regions.emplace_back(&getBody());
}
#define GET_OP_CLASSES
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/ScopedTransform.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/ScopedTransform.cpp
index b3bfeab..e40b8d5 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/ScopedTransform.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/IR/ScopedTransform.cpp
@@ -22,7 +22,7 @@
auto scope = rewriter.create<linalg::transform::ScopeOp>(
op->getLoc(), op->getResultTypes(), op->getOperands());
- Region &body = scope.body();
+ Region &body = scope.getBody();
rewriter.setInsertionPointToStart(&body.emplaceBlock());
BlockAndValueMapping bv;
SmallVector<Location> locs(op->getOperandTypes().size(), op->getLoc());
@@ -72,9 +72,10 @@
linalg::transform::unwrapScope(linalg::transform::ScopeOp scope) {
ScopeInliner interface(scope->getContext());
SmallVector<Operation *> ops;
- scope.body().walk([&](Operation *op) { ops.push_back(op); });
- if (failed(inlineRegion(interface, &scope.body(), scope, scope.getOperands(),
- scope.getResults(), /*inlineLoc=*/{},
+ scope.getBody().walk([&](Operation *op) { ops.push_back(op); });
+ if (failed(inlineRegion(interface, &scope.getBody(), scope,
+ scope.getOperands(), scope.getResults(),
+ /*inlineLoc=*/{},
/*shouldCloneInlinedRegion=*/false)))
return failure();
Rewriter(scope->getContext()).eraseOp(scope);
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/Passes/ExpertExpansion.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/Passes/ExpertExpansion.cpp
index 9cfff21..72a3350 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/Passes/ExpertExpansion.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgTransform/Passes/ExpertExpansion.cpp
@@ -38,7 +38,7 @@
SimplePatternRewriter rewriter(expertOp);
if (failed(applicator.matchAndRewrite(expertOp, rewriter))) {
LLVM_DEBUG(DBGS() << "failed to rewrite strategy \""
- << expertOp.expertName() << "\"\n");
+ << expertOp.getExpertName() << "\"\n");
}
});
}
diff --git a/third_party/llvm-project b/third_party/llvm-project
index 2d52c6b..72142fb 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit 2d52c6bfae801b016dd3627b8c0e7c4a99405549
+Subproject commit 72142fbac496a66769e16c80b076216d7c449ab2
diff --git a/third_party/mlir-hlo b/third_party/mlir-hlo
index 440a6d3..3fe2805 160000
--- a/third_party/mlir-hlo
+++ b/third_party/mlir-hlo
@@ -1 +1 @@
-Subproject commit 440a6d36a76592979b0597568b868eb9a07cf842
+Subproject commit 3fe280545e6c24020fd2786a85896a48d0df9d97
diff --git a/third_party/torch-mlir-dialects/include/torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorInterfaces.td b/third_party/torch-mlir-dialects/include/torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorInterfaces.td
index 94ce68b..2b412a5 100644
--- a/third_party/torch-mlir-dialects/include/torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorInterfaces.td
+++ b/third_party/torch-mlir-dialects/include/torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorInterfaces.td
@@ -25,7 +25,7 @@
Return the input shape operands.
}],
/*retTy=*/"ValueRange",
- /*methodName=*/"inputs",
+ /*methodName=*/"getInputs",
/*args=*/(ins)
>,
// These special methods rely on `inputs` and `outputs` being defined by
@@ -39,7 +39,7 @@
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.inputs().size();
+ return $_op.getInputs().size();
}]
>,
// `outputs` must be defined by each op that wants to implement the
@@ -49,7 +49,7 @@
Return the output shape operands.
}],
/*retTy=*/"ValueRange",
- /*methodName=*/"outputs",
+ /*methodName=*/"getOutputs",
/*args=*/(ins)
>,
InterfaceMethod<
@@ -61,7 +61,7 @@
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.outputs().size();
+ return $_op.getOutputs().size();
}]
>,
InterfaceMethod<
diff --git a/third_party/torch-mlir-dialects/include/torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorOps.td b/third_party/torch-mlir-dialects/include/torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorOps.td
index 151dee4..8c5e94f 100644
--- a/third_party/torch-mlir-dialects/include/torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorOps.td
+++ b/third_party/torch-mlir-dialects/include/torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorOps.td
@@ -35,7 +35,7 @@
let hasCustomAssemblyFormat = 1;
code extraTMTensorOpClassDeclaration = [{
SmallVector<Value> getDestinationOperands(OpBuilder &b) {
- SmallVector<Value> dest(outputs().begin(), outputs().end());
+ SmallVector<Value> dest(getOutputs().begin(), getOutputs().end());
return dest;
}
}];
diff --git a/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorInterfaces.cpp b/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorInterfaces.cpp
index 3612ec1..b140b79 100644
--- a/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorInterfaces.cpp
+++ b/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorInterfaces.cpp
@@ -22,12 +22,12 @@
"expected inputs and outputs to be RankedTensorType or scalar");
}
- if (op->getNumResults() != mtTensorOp.outputs().size()) {
+ if (op->getNumResults() != mtTensorOp.getOutputs().size()) {
return mtTensorOp.emitOpError(
"expected number of outputs to be same as the number of results");
}
for (auto en : llvm::enumerate(op->getResultTypes())) {
- Type outputType = mtTensorOp.outputs()[en.index()].getType();
+ Type outputType = mtTensorOp.getOutputs()[en.index()].getType();
if (en.value() != outputType) {
return mtTensorOp.emitOpError("expected type of `outs` operand #")
<< en.index() << " " << outputType
diff --git a/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp b/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp
index b224491..070c998 100644
--- a/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp
+++ b/third_party/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp
@@ -112,7 +112,7 @@
}
SmallVector<int64_t> expectedAccumulatorShape;
for (size_t i = 0; i < (size_t)inputType.getRank(); i++) {
- if (i != dimension())
+ if (i != getDimension())
expectedAccumulatorShape.push_back(inputShapes[i]);
}
if (llvm::any_of(llvm::zip(expectedAccumulatorShape, accumulatorShape),
@@ -158,7 +158,7 @@
SmallVector<StringRef> ScanOp::getLoopIteratorTypes() {
SmallVector<StringRef> iteratorTypes(getOperandRank(),
getParallelIteratorTypeName());
- iteratorTypes[dimension()] = getReductionIteratorTypeName();
+ iteratorTypes[getDimension()] = getReductionIteratorTypeName();
return iteratorTypes;
}
@@ -177,10 +177,10 @@
indices.append(ivs.begin(), ivs.end());
Value zero = b.create<arith::ConstantIndexOp>(loc, 0);
Value one = b.create<arith::ConstantIndexOp>(loc, 1);
- uint64_t scanDim = dimension();
+ uint64_t scanDim = getDimension();
Value cond = b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
indices[scanDim], zero);
- bool isInclusive = inclusive();
+ bool isInclusive = getInclusive();
SmallVector<Value> accIndices;
for (size_t i = 0; i < indices.size(); i++) {
if (i != scanDim)
@@ -214,7 +214,7 @@
scanBlkArgs.push_back(i0);
});
- auto &srcBlock = region().front();
+ auto &srcBlock = getRegion().front();
Region ®ion = scfIf.getElseRegion();
BlockAndValueMapping bvm;
{
@@ -259,10 +259,10 @@
// ScatterOp
//===----------------------------------------------------------------------===//
LogicalResult ScatterOp::verify() {
- if (inputs().size() != 2) {
+ if (getInputs().size() != 2) {
return emitOpError("expected two input operands");
}
- if (outputs().size() != 1) {
+ if (getOutputs().size() != 1) {
return emitOpError("expected one output operand");
}
auto checkDimensionsMatch = [&](ShapedType t1, ShapedType t2, unsigned dim) {
@@ -334,7 +334,7 @@
}
}
- Region ®ion = this->region();
+ Region ®ion = this->getRegion();
Block *body = ®ion.front();
if (body->getNumArguments() != 2) {
return emitOpError("expected region to have two arguments");
@@ -374,7 +374,7 @@
SmallVector<StringRef> ScatterOp::getLoopIteratorTypes() {
SmallVector<StringRef> iteratorTypes(getUpdateType().getRank(),
getParallelIteratorTypeName());
- if (!unique_indices()) {
+ if (!getUniqueIndices()) {
iteratorTypes[0] = getReductionIteratorTypeName();
}
return iteratorTypes;
@@ -424,7 +424,7 @@
Value init = b.create<memref::LoadOp>(loc, original(), starts);
BlockAndValueMapping bvm;
- Block &block = region().front();
+ Block &block = getRegion().front();
bvm.map(block.getArgument(0), update);
bvm.map(block.getArgument(1), init);
for (auto &blockOp : block.without_terminator()) {