Fix out of bound access lock argument in linalg ext ops (#8452)
linalg ext ops don't always have block argument corresponding to output
operand. Don't call payloadUsesValueFromOperand on linalg.ext output operand.
diff --git a/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp b/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp
index bcb1b79..6775676 100644
--- a/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp
+++ b/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp
@@ -710,9 +710,11 @@
OpOperand *outOperand = std::get<1>(it);
Value outTensor = outOperand->get();
Value outBuffer = bvm.lookupOrNull(outTensor);
- if (outBuffer && !plan.isEquivalent(outTensor, resultTensor) &&
- op.payloadUsesValueFromOperand(outOperand)) {
- createLinalgCopyOp(b, loc, outBuffer, resultBuffer);
+ if (outBuffer && !plan.isEquivalent(outTensor, resultTensor)) {
+ auto linalgOp = dyn_cast<linalg::LinalgOp>(op.getOperation());
+ if (!linalgOp || linalgOp.payloadUsesValueFromOperand(outOperand)) {
+ createLinalgCopyOp(b, loc, outBuffer, resultBuffer);
+ }
}
newOutputBuffers.push_back(resultBuffer);
}
diff --git a/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir b/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
index ae53a94..c21310e 100644
--- a/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
+++ b/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
@@ -2698,3 +2698,32 @@
// CHECK: linalg.generic
// CHECK-SAME: ins(%[[SUBVIEW]] :
// CHECK-SAME: outs(%[[DEST]] :
+
+// -----
+
+// CHECK-LABEL: func @dispatch_scatter()
+func @dispatch_scatter() {
+ %c1 = arith.constant 1 : index
+ %c0 = arith.constant 0 : index
+ %cst = arith.constant dense<0> : tensor<1x1xi32>
+ %cst_0 = arith.constant dense<0> : tensor<1x2xi32>
+ %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(32) : !flow.dispatch.tensor<readonly:1xi32>
+ %1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) offset(%c0) alignment(32) : !flow.dispatch.tensor<writeonly:1x1xi32>
+ %workgroup_size_x = hal.interface.workgroup.size[0] : index
+ %workgroup_id_x = hal.interface.workgroup.id[0] : index
+ %workgroup_count_x = hal.interface.workgroup.count[0] : index
+ %2 = affine.apply affine_map<()[s0, s1] -> (s1 * s0)>()[%workgroup_size_x, %workgroup_id_x]
+ %3 = affine.apply affine_map<()[s0, s1] -> (s1 * s0)>()[%workgroup_size_x, %workgroup_count_x]
+ scf.for %arg0 = %2 to %c1 step %3 {
+ %4 = affine.min affine_map<(d0)[s0] -> (s0, -d0 + 1)>(%arg0)[%workgroup_size_x]
+ %5 = flow.dispatch.tensor.load %0, offsets = [%arg0], sizes = [%4], strides = [1] : !flow.dispatch.tensor<readonly:1xi32> -> tensor<?xi32>
+ %6 = tensor.extract_slice %cst_0[%arg0, 0] [%4, 2] [1, 1] : tensor<1x2xi32> to tensor<?x2xi32>
+ // CHECK: iree_linalg_ext.scatter unique_indices(true) ins(%{{.*}}, %{{.*}} : memref<?xi32, #{{.*}}>, memref<?x2xi32, #{{.*}}>) outs(%{{.*}} : memref<1x1xi32>)
+ %7 = iree_linalg_ext.scatter unique_indices(true) ins(%5, %6 : tensor<?xi32>, tensor<?x2xi32>) outs(%cst : tensor<1x1xi32>) {
+ ^bb0(%arg1: i32, %arg2: i32):
+ iree_linalg_ext.yield %arg1 : i32
+ } -> tensor<1x1xi32>
+ flow.dispatch.tensor.store %7, %1, offsets = [0, 0], sizes = [1, 1], strides = [1, 1] : tensor<1x1xi32> -> !flow.dispatch.tensor<writeonly:1x1xi32>
+ }
+ return
+}
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td
index 638d4ed..4ae75cc 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtInterfaces.td
@@ -284,30 +284,6 @@
>,
InterfaceMethod<
/*desc=*/[{
- Return true if the payload uses the value loaded from `opOperand`. This
- is useful to avoid loading from "write-only" memory that may be
- uninitialized, as well as properly cloning "read-write" operands.
- }],
- /*retTy=*/"bool",
- /*methodName=*/"payloadUsesValueFromOperand",
- /*args=*/(ins "OpOperand *":$opOperand),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- unsigned bbArgNumber = opOperand->getOperandNumber();
- // Safeguard against the named linalg ops that are manually defined and
- // that only support buffer semantics: we should not be there.
- // Such ops have an empty regionBuilder and are not constructed with a
- // region for now. In the future they are slated to disappear.
- assert(this->getOperation()->getNumRegions() == 1 && "unexpected "
- "missing region (calling `payloadUsesValueFromOperand` on "
- "manually defined named Linalg op?)");
- Block &block = this->getOperation()->getRegion(0).front();
- // Init tensors have uses.
- return !block.getArgument(bbArgNumber).use_empty();
- }]
- >,
- InterfaceMethod<
- /*desc=*/[{
Return true if `opOperand` is an input tensor.
}],
/*retTy=*/"bool",
@@ -340,21 +316,6 @@
>,
InterfaceMethod<
/*desc=*/[{
- Return true if `opOperand` is an init tensor. This is true when it is
- an output tensor operand whose value is used in the payload region.
- }],
- /*retTy=*/"bool",
- /*methodName=*/"isInitTensor",
- /*args=*/(ins "OpOperand *":$opOperand),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- if (!$_op.isOutputTensor(opOperand))
- return false;
- return payloadUsesValueFromOperand(opOperand);
- }]
- >,
- InterfaceMethod<
- /*desc=*/[{
Return the `opOperand` rank or zero for scalars.
}],
/*retTy=*/"int64_t",
diff --git a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td
index 228c357..b182052 100644
--- a/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td
+++ b/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.td
@@ -184,10 +184,7 @@
"getPartitionableLoops", "getTiledImplementation",
"generateScalarImplementation"
]>,
- DeclareOpInterfaceMethods<LinalgExtInterface,
- // FftOp does not have a region, so we have to
- // overwrite the method.
- ["payloadUsesValueFromOperand"]>
+ DeclareOpInterfaceMethods<LinalgExtInterface>
]> {
let summary = "Fft operator";
let description = [{
@@ -300,10 +297,7 @@
DeclareOpInterfaceMethods<
TiledOpInterface,
["generateScalarImplementation", "getTiledImplementation"]>,
- DeclareOpInterfaceMethods<LinalgExtInterface,
- // ReverseOp does not have a region, so we have to
- // overwrite the method.
- ["payloadUsesValueFromOperand"]>]> {
+ DeclareOpInterfaceMethods<LinalgExtInterface>]> {
let summary = "Reverse operator";
let description = [{
A temporary solution for lowering reverse ops into IREE, allowing IREE to
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
index 4fb8a6b..a895a0f 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
@@ -758,8 +758,6 @@
return success();
}
-bool FftOp::payloadUsesValueFromOperand(OpOperand *) { return false; }
-
SmallVector<unsigned> FftOp::getPartitionableLoops(
unsigned maxNumParallelDims) {
auto range = llvm::seq<unsigned>(0, getOperandRank());
@@ -1086,8 +1084,6 @@
return success();
}
-bool ReverseOp::payloadUsesValueFromOperand(OpOperand *) { return false; }
-
SmallVector<StringRef> ReverseOp::getLoopIteratorTypes() {
SmallVector<StringRef> iteratorTypes(getOperandRank(),
getParallelIteratorTypeName());