[NFC] Switch to use ShapedType::isDynamic instead of using kDynamic (#15840)
One of MLIR goals is to make ShapedType::kDynamic private, and force all
users to go through safe APIs. Some codes already do this, the revision
switches all to use ShapedType::isDynamic for consistency.
diff --git a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/LegalizeToLinalgUtils.cpp b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/LegalizeToLinalgUtils.cpp
index a19d702..5ace28d 100644
--- a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/LegalizeToLinalgUtils.cpp
+++ b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/LegalizeToLinalgUtils.cpp
@@ -71,7 +71,7 @@
assert(reifiedShapes.size() == 1 && "Expected one reified result");
// Construct sizes for the required dimensions.
for (const auto &en : llvm::enumerate(resultType.getShape())) {
- if (en.value() != ShapedType::kDynamic)
+ if (!ShapedType::isDynamic(en.value()))
continue;
sizes.push_back(b.create<tensor::ExtractOp>(
loc, reifiedShapes[0],
diff --git a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/Preprocessing/StableHLOToStableHLO.cpp b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/Preprocessing/StableHLOToStableHLO.cpp
index 601ee88..53321b6 100644
--- a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/Preprocessing/StableHLOToStableHLO.cpp
+++ b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/Preprocessing/StableHLOToStableHLO.cpp
@@ -633,7 +633,7 @@
for (int i = 0, s = batchCount; i < s; i++) {
reassociationMap.front().push_back(rewriter.getAffineDimExpr(i));
bool isDynamic =
- valueTy.isDynamicDim(i) || batchSize == ShapedType::kDynamic;
+ valueTy.isDynamicDim(i) || ShapedType::isDynamic(batchSize);
batchSize =
isDynamic ? ShapedType::kDynamic : valueTy.getDimSize(i) * batchSize;
}
@@ -1589,7 +1589,7 @@
}
int64_t k = topVTy.getDimSize(1);
- if (k == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(k)) {
return rewriter.notifyMatchFailure(op, "dynamic top-k k value");
}
diff --git a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalg.cpp b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalg.cpp
index a6b128b..d26afec 100644
--- a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalg.cpp
+++ b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalg.cpp
@@ -109,7 +109,7 @@
auto dimIndPos = dimIndIt - lhsLoopVec.begin();
auto lhsShape =
llvm::dyn_cast<RankedTensorType>(lhs.getType()).getShape();
- if (lhsShape[dimIndPos] != ShapedType::kDynamic)
+ if (!ShapedType::isDynamic(lhsShape[dimIndPos]))
continue;
dimSize = b.create<tensor::DimOp>(loc, lhs, dimIndPos);
} else {
@@ -118,7 +118,7 @@
auto dimIndPos = dimIndIt - rhsLoopVec.begin();
auto rhsShape =
llvm::dyn_cast<RankedTensorType>(rhs.getType()).getShape();
- if (rhsShape[dimIndPos] != ShapedType::kDynamic)
+ if (!ShapedType::isDynamic(rhsShape[dimIndPos]))
continue;
dimSize = b.create<tensor::DimOp>(loc, rhs, dimIndPos);
}
@@ -1154,7 +1154,7 @@
if (resultType.isDynamicDim(idx))
continue;
for (auto targetDim : dims) {
- if (shape[targetDim] == ShapedType::kDynamic)
+ if (ShapedType::isDynamic(shape[targetDim]))
shape[targetDim] = 1;
}
}
diff --git a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgDotProd.cpp b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgDotProd.cpp
index 9d22b7b..a2bca09 100644
--- a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgDotProd.cpp
+++ b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgDotProd.cpp
@@ -32,7 +32,7 @@
ArrayRef<int64_t> rhsShape =
cast<ShapedType>(dotOp.getRhs().getType()).getShape();
auto shapeMatches = [](int64_t a, int64_t b) {
- return a == ShapedType::kDynamic || b == ShapedType::kDynamic || a == b;
+ return ShapedType::isDynamic(a) || ShapedType::isDynamic(b) || a == b;
};
if (lhsShape.size() == 1 && rhsShape.size() == 1 &&
shapeMatches(lhsShape[0], rhsShape[0])) {
diff --git a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgExt.cpp b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgExt.cpp
index 2ab7ce5..e924809 100644
--- a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgExt.cpp
+++ b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgExt.cpp
@@ -554,7 +554,7 @@
if (i == reduceAxis)
continue;
initDims.push_back(input0Ty.getDimSize(i));
- if (initDims.back() == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(initDims.back())) {
initDynDims.push_back(
rewriter.createOrFold<tensor::DimOp>(op.getLoc(), input0, i));
}
diff --git a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgReduce.cpp b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgReduce.cpp
index c06cb46..a579fe7 100644
--- a/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgReduce.cpp
+++ b/compiler/plugins/input/StableHLO/stablehlo-iree/Conversion/StableHLOToLinalgReduce.cpp
@@ -629,7 +629,7 @@
SmallVector<Value> resultDynamicDims;
for (const auto &en : llvm::enumerate(resultType.getShape())) {
- if (en.value() != ShapedType::kDynamic)
+ if (!ShapedType::isDynamic(en.value()))
continue;
Value dimSize = rewriter.create<tensor::DimOp>(loc, input, en.index());
if (en.index() == 0 || static_cast<int64_t>(en.index()) == rank - 1) {
diff --git a/compiler/plugins/input/TOSA/tosa-iree/InputConversion/TosaToLinalgExt.cpp b/compiler/plugins/input/TOSA/tosa-iree/InputConversion/TosaToLinalgExt.cpp
index affa17e..01a2536 100644
--- a/compiler/plugins/input/TOSA/tosa-iree/InputConversion/TosaToLinalgExt.cpp
+++ b/compiler/plugins/input/TOSA/tosa-iree/InputConversion/TosaToLinalgExt.cpp
@@ -122,8 +122,8 @@
int64_t batch = valueTy.getShape().front();
int64_t rows = collapseShape.front();
- bool batchDyn = batch == ShapedType::kDynamic;
- bool rowsDyn = rows == ShapedType::kDynamic;
+ bool batchDyn = ShapedType::isDynamic(batch);
+ bool rowsDyn = ShapedType::isDynamic(rows);
collapseShape[0] =
(batchDyn || rowsDyn) ? ShapedType::kDynamic : batch * rows;
diff --git a/compiler/src/iree/compiler/Codegen/Common/CPU/CPUMaterializeEncodingPass.cpp b/compiler/src/iree/compiler/Codegen/Common/CPU/CPUMaterializeEncodingPass.cpp
index 92dbb2f..7f96a75 100644
--- a/compiler/src/iree/compiler/Codegen/Common/CPU/CPUMaterializeEncodingPass.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/CPU/CPUMaterializeEncodingPass.cpp
@@ -292,11 +292,11 @@
// how to incorporate the handling of kDynamic in the cost-model evaluation
// below to decide when to prefer a dynamic vs a static tile shape.
for (auto tile : enumeratedTiles) {
- if (tile.M == ShapedType::kDynamic || tile.N == ShapedType::kDynamic ||
- tile.K == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(tile.M) || ShapedType::isDynamic(tile.N) ||
+ ShapedType::isDynamic(tile.K)) {
assert(enumeratedTiles.size() == 1);
- assert(tile.M == ShapedType::kDynamic && tile.N == ShapedType::kDynamic &&
- tile.K == ShapedType::kDynamic);
+ assert(ShapedType::isDynamic(tile.M) && ShapedType::isDynamic(tile.N) &&
+ ShapedType::isDynamic(tile.K));
return tile;
}
}
@@ -493,7 +493,7 @@
return failure();
}
for (unsigned i = 0; i < info->innerTileSizes.size(); ++i) {
- if (info->innerTileSizes[i] == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(info->innerTileSizes[i])) {
result->innerTileSizes[i] = ShapedType::kDynamic;
} else {
result->innerTileSizes[i] =
diff --git a/compiler/src/iree/compiler/Codegen/Common/MaterializeEncodingIntoPackUnPack.cpp b/compiler/src/iree/compiler/Codegen/Common/MaterializeEncodingIntoPackUnPack.cpp
index 15ac207..d460feb 100644
--- a/compiler/src/iree/compiler/Codegen/Common/MaterializeEncodingIntoPackUnPack.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/MaterializeEncodingIntoPackUnPack.cpp
@@ -70,7 +70,7 @@
SmallVector<OpFoldResult> result(staticTileSizes.size());
for (size_t i = 0; i < result.size(); ++i) {
- if (staticTileSizes[i] == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(staticTileSizes[i])) {
result[i] = innerTileSizeValues[i];
} else if (tensorType.isDynamicDim(i)) {
result[i] =
diff --git a/compiler/src/iree/compiler/Codegen/Common/PadDynamicAlloc.cpp b/compiler/src/iree/compiler/Codegen/Common/PadDynamicAlloc.cpp
index 5b45441..31bb2f3 100644
--- a/compiler/src/iree/compiler/Codegen/Common/PadDynamicAlloc.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/PadDynamicAlloc.cpp
@@ -42,7 +42,7 @@
SmallVector<OpFoldResult> sizes;
size_t dynamicDimIdx = 0;
for (int64_t &dimSize : shape) {
- if (dimSize != ShapedType::kDynamic) {
+ if (!ShapedType::isDynamic(dimSize)) {
sizes.push_back(rewriter.getIndexAttr(dimSize));
continue;
}
diff --git a/compiler/src/iree/compiler/Codegen/Common/TileAndDistributeToWorkgroupsPass.cpp b/compiler/src/iree/compiler/Codegen/Common/TileAndDistributeToWorkgroupsPass.cpp
index 380e207..f8d2486 100644
--- a/compiler/src/iree/compiler/Codegen/Common/TileAndDistributeToWorkgroupsPass.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/TileAndDistributeToWorkgroupsPass.cpp
@@ -157,7 +157,7 @@
int64_t staticLoopRange = std::get<1>(p);
OpFoldResult workload =
- (staticLoopRange == ShapedType::kDynamic
+ (ShapedType::isDynamic(staticLoopRange)
? OpFoldResult(std::get<0>(p))
: OpFoldResult(rewriter.getIndexAttr(staticLoopRange)));
AffineExpr s0, s1;
diff --git a/compiler/src/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp b/compiler/src/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp
index 077b369..6612229 100644
--- a/compiler/src/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp
+++ b/compiler/src/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp
@@ -56,7 +56,7 @@
SmallVector<int64_t> strides(shape.size(), ShapedType::kDynamic);
strides.back() = 1;
for (int i = strides.size() - 1; i > 0; --i) {
- if (shape[i] == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(shape[i])) {
break;
}
strides[i - 1] = strides[i] * shape[i];
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/DispatchABI.cpp b/compiler/src/iree/compiler/Codegen/LLVMCPU/DispatchABI.cpp
index 2f17657..79d8507 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/DispatchABI.cpp
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/DispatchABI.cpp
@@ -815,7 +815,7 @@
desc.setConstantStride(builder, loc, rank - 1, 1);
OpFoldResult currentStride = builder.getIndexAttr(1);
for (int i = rank - 1; i > 0; --i) {
- if (strides[i - 1] == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(strides[i - 1])) {
auto dim = desc.size(builder, loc, i);
Value currentStrideVal;
if (std::optional<int64_t> currentStrideInt =
diff --git a/compiler/src/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp b/compiler/src/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp
index 253f89d..70bc1fb 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp
+++ b/compiler/src/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp
@@ -387,7 +387,7 @@
while (numWorkgroups > numWorkgroupsLimit && currDim > 0) {
unsigned index = currDim - 1;
int64_t currSize = distributedTileSizes[index];
- if (workload[index] == ShapedType::kDynamic ||
+ if (ShapedType::isDynamic(workload[index]) ||
(maxTileSizes && currSize >= maxTileSizes.value()[index]) ||
currSize >= workload[index]) {
currDim--;
@@ -515,7 +515,7 @@
static int64_t getMaxDistributionTileSize(int64_t lb, int64_t ub,
int64_t maxSize, int64_t vectorSize,
bool allowIncompleteTile = false) {
- if (ub == ShapedType::kDynamic || lb == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(ub) || ShapedType::isDynamic(lb)) {
return maxSize;
}
int64_t numIters = ub - lb;
@@ -561,7 +561,7 @@
int64_t vectorSize,
bool allowIncompleteTile = false,
bool enforcePowerOfTwo = false) {
- if (ub == ShapedType::kDynamic || lb == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(ub) || ShapedType::isDynamic(lb)) {
return roundUpToPow2(maxSize, enforcePowerOfTwo);
}
int64_t numIters = ub - lb;
@@ -1640,7 +1640,7 @@
auto shape = genericOp.getStaticLoopRanges();
int64_t numWorkload = 1;
for (const auto &[index, size] : llvm::enumerate(shape)) {
- if (size == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(size)) {
numWorkload = ShapedType::kDynamic;
break;
}
@@ -1650,8 +1650,8 @@
numWorkload < kMinimumWorkload && currDim < numLoops;) {
int64_t currSize = distTileSizes[currDim];
if (currSize == shape[currDim] || currSize == 0 ||
- shape[currDim] == ShapedType::kDynamic ||
- numWorkload == ShapedType::kDynamic) {
+ ShapedType::isDynamic(shape[currDim]) ||
+ ShapedType::isDynamic(numWorkload)) {
currDim++;
continue;
}
@@ -1911,7 +1911,7 @@
unsigned typeWidthInBytes = IREE::Util::getRoundedElementByteWidth(
padOp.getResultType().getElementType());
int64_t typeVectorSize = getVectorSize(entryPointFn, typeWidthInBytes);
- vectorTileSizes.back() = (ubs.back() == ShapedType::kDynamic
+ vectorTileSizes.back() = (ShapedType::isDynamic(ubs.back())
? 1
: std::min(typeVectorSize, ubs.back()));
minTileSizes.back() = vectorTileSizes.back();
diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp b/compiler/src/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp
index b347fd9..9ef2084 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp
+++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp
@@ -138,7 +138,7 @@
return failure();
ArrayRef<int64_t> shape = allocOp.getType().getShape();
if (llvm::any_of(shape,
- [](int64_t dim) { return dim == ShapedType::kDynamic; })) {
+ [](int64_t dim) { return ShapedType::isDynamic(dim); })) {
return failure();
}
@@ -428,7 +428,7 @@
desc.setConstantStride(rewriter, loc, rank - 1, 1);
OpFoldResult currentStride = rewriter.getIndexAttr(1);
for (int i = rank - 1; i > 0; --i) {
- if (strides[i - 1] == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(strides[i - 1])) {
auto dim = desc.size(rewriter, loc, i);
Value currentStrideVal;
if (std::optional<int64_t> currentStrideInt =
diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/KernelConfig.cpp b/compiler/src/iree/compiler/Codegen/LLVMGPU/KernelConfig.cpp
index dcb62ec..ef26385 100644
--- a/compiler/src/iree/compiler/Codegen/LLVMGPU/KernelConfig.cpp
+++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/KernelConfig.cpp
@@ -384,9 +384,9 @@
}
}
}
- bool isStaticSize = sizeM != ShapedType::kDynamic &&
- sizeN != ShapedType::kDynamic &&
- sizeK != ShapedType::kDynamic;
+ bool isStaticSize = !ShapedType::isDynamic(sizeM) &&
+ !ShapedType::isDynamic(sizeN) &&
+ !ShapedType::isDynamic(sizeK);
if (isStaticSize) {
/// Try tensorcore config first.
if (supportsTensorCore(entryPoint, op, targetInfo)) {
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp
index adae172..59cf41d 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp
@@ -566,13 +566,13 @@
MemRefLayoutAttrInterface layout = {};
if (auto stridedLayout = dyn_cast<StridedLayoutAttr>(type.getLayout())) {
auto offset = stridedLayout.getOffset();
- if (offset != ShapedType::kDynamic) {
+ if (!ShapedType::isDynamic(offset)) {
offset = offset / ratio;
}
auto strides = llvm::to_vector(stridedLayout.getStrides());
for (auto [index, stride] : llvm::enumerate(llvm::drop_end(strides))) {
- if (index == strides.size() - 1 || stride == ShapedType::kDynamic) {
+ if (index == strides.size() - 1 || ShapedType::isDynamic(stride)) {
continue;
}
strides[index] = stride / ratio;
diff --git a/compiler/src/iree/compiler/Codegen/VMVX/LowerLinalgMicrokernels.cpp b/compiler/src/iree/compiler/Codegen/VMVX/LowerLinalgMicrokernels.cpp
index be67bfb..9e4f332 100644
--- a/compiler/src/iree/compiler/Codegen/VMVX/LowerLinalgMicrokernels.cpp
+++ b/compiler/src/iree/compiler/Codegen/VMVX/LowerLinalgMicrokernels.cpp
@@ -103,11 +103,11 @@
}
int64_t product_of_inner_sizes = 1;
for (int i = rank - 1; i >= 2; --i) {
- if (sizes[i] == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(sizes[i])) {
// TODO(#11633): Dynamic dimensions are currently assumed to be row-major.
product_of_inner_sizes = ShapedType::kDynamic;
} else {
- if (product_of_inner_sizes != ShapedType::kDynamic) {
+ if (!ShapedType::isDynamic(product_of_inner_sizes)) {
product_of_inner_sizes *= sizes[i];
}
}
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/Patterns.cpp b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/Patterns.cpp
index 7c9189f..4bdac70 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/Patterns.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/Patterns.cpp
@@ -256,7 +256,7 @@
SmallVector<Value> outputDynamicShapes;
for (auto [resultShape, outputShp] : llvm::zip_equal(
reshapeOp.getResultType().getShape(), outputShape[0])) {
- if (resultShape != ShapedType::kDynamic)
+ if (!ShapedType::isDynamic(resultShape))
continue;
outputDynamicShapes.push_back(getValueOrCreateConstantIndexOp(
rewriter, reshapeOp.getLoc(), outputShp));
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/Utils.cpp b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/Utils.cpp
index 55926c3..a1567a5 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/Utils.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/Utils.cpp
@@ -72,17 +72,17 @@
// cases, the dynamic offset/size value is obtained by computing from
// another tensor which lives on the device. To avoid host-round tripping
// enforce that offset/size is also static.
- if (staticSize == ShapedType::kDynamic)
+ if (ShapedType::isDynamic(staticSize))
return false;
- if (staticOffset == ShapedType::kDynamic)
+ if (ShapedType::isDynamic(staticOffset))
return false;
if (fullSlices == false) {
if (staticSize != 1)
return false;
} else {
- if (!(staticOffset == 0 && staticSize != ShapedType::kDynamic &&
- baseShape[dim - 1] != ShapedType::kDynamic &&
+ if (!(staticOffset == 0 && !ShapedType::isDynamic(staticSize) &&
+ !ShapedType::isDynamic(baseShape[dim - 1]) &&
staticSize == baseShape[dim - 1])) {
fullSlices = false;
}
diff --git a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.cpp b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
index 32d51f3..78a242e 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
@@ -651,7 +651,7 @@
auto tensorType =
llvm::cast<IREE::Flow::DispatchTensorType>(getResult().getType());
for (int64_t dim : tensorType.getShape()) {
- if (dim == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(dim)) {
shape.push_back(getDynamicDims()[dynamicIdx++]);
} else {
shape.push_back(b.getIndexAttr(dim));
@@ -801,7 +801,7 @@
// Result size matches the source size (no slicing).
unsigned dynamicIdx = 0;
for (int64_t dim : getType().getShape()) {
- if (dim == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(dim)) {
shape.push_back(getSourceDims()[dynamicIdx++]);
} else {
shape.push_back(b.getIndexAttr(dim));
@@ -1577,7 +1577,7 @@
unsigned dynamicIdx = 0;
auto tensorType = llvm::cast<RankedTensorType>(getResult().getType());
for (int64_t dim : tensorType.getShape()) {
- if (dim == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(dim)) {
shape.push_back(getDynamicDims()[dynamicIdx++]);
} else {
shape.push_back(b.getIndexAttr(dim));
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/AnnotateDispatches.cpp b/compiler/src/iree/compiler/Dialect/Flow/Transforms/AnnotateDispatches.cpp
index 301f899..983d4a9 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/AnnotateDispatches.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/AnnotateDispatches.cpp
@@ -33,7 +33,7 @@
static int64_t costOfDomain(ArrayRef<int64_t> domain) {
int64_t product = 1;
for (int64_t size : domain) {
- if (size == mlir::ShapedType::kDynamic)
+ if (ShapedType::isDynamic(size))
return INT64_MAX;
product *= size;
}
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/CollapseDimensions.cpp b/compiler/src/iree/compiler/Dialect/Flow/Transforms/CollapseDimensions.cpp
index c7687a8..da952e6 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/CollapseDimensions.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/CollapseDimensions.cpp
@@ -325,7 +325,7 @@
int64_t staticCollapsedShape = 1;
SmallVector<OpFoldResult> dynamicCollapsedDims;
for (auto collapsedDim : reassociation[index]) {
- if (expandedShape[collapsedDim] == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(expandedShape[collapsedDim])) {
dynamicCollapsedDims.push_back(dynamicDimsList.front());
dynamicDimsList = dynamicDimsList.drop_front();
} else {
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp b/compiler/src/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp
index 7eaef10..943d40a 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgTensorOps.cpp
@@ -48,7 +48,7 @@
SmallVector<Value> outputDynamicShapes;
for (auto shape : llvm::zip_equal(reshapeOp.getResultType().getShape(),
outputShape[0])) {
- if (std::get<0>(shape) != ShapedType::kDynamic)
+ if (!ShapedType::isDynamic(std::get<0>(shape))
continue;
outputDynamicShapes.push_back(std::get<1>(shape));
}
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/ConvertRegionToWorkgroups.cpp b/compiler/src/iree/compiler/Dialect/Flow/Transforms/ConvertRegionToWorkgroups.cpp
index 398b08a..0518f4c 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/ConvertRegionToWorkgroups.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/ConvertRegionToWorkgroups.cpp
@@ -37,7 +37,7 @@
}
for (auto dim : llvm::enumerate(tensorType.getShape())) {
- if (dim.value() != ShapedType::kDynamic)
+ if (!ShapedType::isDynamic(dim.value()))
continue;
argumentDims.push_back(
b.createOrFold<tensor::DimOp>(loc, tensor, dim.index()));
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/RaiseSpecialOps.cpp b/compiler/src/iree/compiler/Dialect/Flow/Transforms/RaiseSpecialOps.cpp
index bbcb586..7b3df6c 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/RaiseSpecialOps.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/RaiseSpecialOps.cpp
@@ -186,8 +186,8 @@
// on the `source` tensor. This is not same as raising the whole
// operation to an extract_slice, as there can be permutations and
// projections involved.
- if (sourceShape[idx] == ShapedType::kDynamic ||
- resultShape[indexOp.getDim()] == ShapedType::kDynamic ||
+ if (ShapedType::isDynamic(sourceShape[idx]) ||
+ ShapedType::isDynamic(resultShape[indexOp.getDim()]) ||
sourceShape[idx] != resultShape[indexOp.getDim()]) {
return failure();
}
@@ -286,7 +286,7 @@
expr == outputIndexingMap.getResult(currOutDim)) {
offsets.push_back(zero);
// Get the dim size from the output tensor.
- if (outShape[currOutDim] == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(outShape[currOutDim])) {
auto dim = rewriter.create<tensor::DimOp>(linalgOp.getLoc(), output,
currOutDim);
sizes.push_back(dim.getResult());
@@ -538,8 +538,8 @@
/// divisible by 2.
int64_t sliceSize = targetShape.back();
targetShape[targetShape.size() - 1] = 2;
- targetShape.push_back(sliceSize == ShapedType::kDynamic ? sliceSize
- : sliceSize / 2);
+ targetShape.push_back(ShapedType::isDynamic(sliceSize) ? sliceSize
+ : sliceSize / 2);
Type expandedType =
RankedTensorType::get(targetShape, sourceType.getElementType());
Value expanded = rewriter.create<tensor::ExpandShapeOp>(loc, expandedType,
diff --git a/compiler/src/iree/compiler/Dialect/Util/Analysis/Constant/ConstExpr.cpp b/compiler/src/iree/compiler/Dialect/Util/Analysis/Constant/ConstExpr.cpp
index 012ad58..44bfe71 100644
--- a/compiler/src/iree/compiler/Dialect/Util/Analysis/Constant/ConstExpr.cpp
+++ b/compiler/src/iree/compiler/Dialect/Util/Analysis/Constant/ConstExpr.cpp
@@ -330,7 +330,7 @@
for (int64_t dim : type.getShape()) {
// Conservatively treat dynamic values as 1, to find a lower bound on
// input size.
- if (dim != ShapedType::kDynamic) {
+ if (!ShapedType::isDynamic(dim)) {
elementCount *= dim;
}
}
@@ -343,7 +343,7 @@
if (auto type = dyn_cast<ShapedType>(info->constValue.getType())) {
int64_t elementCount = 1;
for (int64_t dim : type.getShape()) {
- if (dim == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(dim)) {
// Dynamic values can lead to an unbounded increase in size, treat this
// as a significant increase.
return true;
diff --git a/compiler/src/iree/compiler/Dialect/Util/IR/UtilDialect.cpp b/compiler/src/iree/compiler/Dialect/Util/IR/UtilDialect.cpp
index 4911dee..e7f9be2 100644
--- a/compiler/src/iree/compiler/Dialect/Util/IR/UtilDialect.cpp
+++ b/compiler/src/iree/compiler/Dialect/Util/IR/UtilDialect.cpp
@@ -121,7 +121,7 @@
// If it's a static dim then just fold to that.
auto type = llvm::cast<ShapedType>(op.getSource().getType());
int64_t staticDim = type.getDimSize(index.getZExtValue());
- if (staticDim != ShapedType::kDynamic) {
+ if (!ShapedType::isDynamic(staticDim)) {
rewriter.replaceOpWithNewOp<arith::ConstantIndexOp>(op, staticDim);
return success();
}
diff --git a/compiler/src/iree/compiler/Dialect/Util/IR/UtilTypes.cpp b/compiler/src/iree/compiler/Dialect/Util/IR/UtilTypes.cpp
index f40ae38..4d320c2 100644
--- a/compiler/src/iree/compiler/Dialect/Util/IR/UtilTypes.cpp
+++ b/compiler/src/iree/compiler/Dialect/Util/IR/UtilTypes.cpp
@@ -741,7 +741,7 @@
unsigned dynamicIdx = 0;
for (unsigned i = 0; i < type.getRank(); ++i) {
int64_t dim = type.getDimSize(i);
- if (dim == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(dim)) {
dims.push_back(dynamicDims[dynamicIdx++]);
} else {
dims.push_back(builder.create<arith::ConstantIndexOp>(loc, dim));
diff --git a/compiler/src/iree/compiler/GlobalOptimization/Convert1X1FilterConv2DToMatmul.cpp b/compiler/src/iree/compiler/GlobalOptimization/Convert1X1FilterConv2DToMatmul.cpp
index 8a4a09c..1d7b3a0 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/Convert1X1FilterConv2DToMatmul.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/Convert1X1FilterConv2DToMatmul.cpp
@@ -53,8 +53,8 @@
const int owIndex = isNHWC ? 2 : 3;
const int ocIndex = isNHWC ? 3 : 1;
- bool isInputHWDynamic = inputShape[ohIndex] == ShapedType::kDynamic &&
- inputShape[owIndex] == ShapedType::kDynamic;
+ bool isInputHWDynamic = ShapedType::isDynamic(inputShape[ohIndex]) &&
+ ShapedType::isDynamic(inputShape[owIndex]);
// We cannot merge the width and height if they are both dynamic as we
// cannot expand them back to their dynamic values.
@@ -78,7 +78,7 @@
return failure();
auto combineDims = [](int64_t a, int64_t b) {
- if (a == ShapedType::kDynamic || b == ShapedType::kDynamic)
+ if (ShapedType::isDynamic(a) || ShapedType::isDynamic(b))
return ShapedType::kDynamic;
return a * b;
};
diff --git a/compiler/src/iree/compiler/GlobalOptimization/ExpandTensorShapes.cpp b/compiler/src/iree/compiler/GlobalOptimization/ExpandTensorShapes.cpp
index d236082..ebdc7b2 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/ExpandTensorShapes.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/ExpandTensorShapes.cpp
@@ -81,7 +81,7 @@
auto tensorType = llvm::cast<RankedTensorType>(global.tensorOp.getType());
for (auto it : llvm::enumerate(tensorType.getShape())) {
- if (it.value() == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(it.value())) {
auto dimName =
(global.tensorOp.getName() + "__d" + std::to_string(it.index()))
.str();
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
index 02bda23..f731cc7 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/IR/LinalgExtOps.cpp
@@ -114,8 +114,8 @@
llvm::zip(sourceShape, limitShape), [](std::tuple<int64_t, int64_t> it) {
int64_t sourceExtent = std::get<0>(it);
int64_t limit = std::get<1>(it);
- return sourceExtent == ShapedType::kDynamic ||
- limit == ShapedType::kDynamic || sourceExtent <= limit;
+ return ShapedType::isDynamic(sourceExtent) ||
+ ShapedType::isDynamic(limit) || sourceExtent <= limit;
});
}
@@ -142,7 +142,7 @@
"expected indices to be of rank 2 of i32 element type");
}
auto indexDepth = getIndexDepth();
- if (indexDepth == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(indexDepth)) {
return op->emitOpError("expected index depth is static");
}
@@ -612,7 +612,7 @@
// After tiling, it could be dynamic shape. (Because
// subview/subtensor does not inference the type correctly
// on (1 << x)) cases).
- if (length == ShapedType::kDynamic)
+ if (ShapedType::isDynamic(length))
return success();
if (length & (length - 1)) {
return op->emitOpError("only powers of 2 are handled currently");
@@ -649,7 +649,7 @@
Value one = builder.create<arith::ConstantIndexOp>(loc, 1);
for (auto en : llvm::enumerate(getOperandShape().drop_back())) {
Value size;
- if (en.value() == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(en.value())) {
size = getDimValue(builder, loc, getReal(), en.index());
} else {
size = builder.create<arith::ConstantIndexOp>(loc, en.value());
@@ -886,8 +886,8 @@
}
if (llvm::any_of(llvm::zip(expectedAccumulatorShape, accumulatorShape),
[](std::tuple<int64_t, int64_t> s) {
- return std::get<0>(s) != ShapedType::kDynamic &&
- std::get<1>(s) != ShapedType::kDynamic &&
+ return !ShapedType::isDynamic(std::get<0>(s)) &&
+ !ShapedType::isDynamic(std::get<1>(s)) &&
std::get<0>(s) != std::get<1>(s);
})) {
return op->emitOpError("incompatible input/accumulator shapes");
@@ -901,8 +901,8 @@
}
if (llvm::any_of(llvm::zip(inputShapes, outputShapes),
[](std::tuple<int64_t, int64_t> s) {
- return std::get<0>(s) != ShapedType::kDynamic &&
- std::get<1>(s) != ShapedType::kDynamic &&
+ return !ShapedType::isDynamic(std::get<0>(s)) &&
+ !ShapedType::isDynamic(std::get<1>(s)) &&
std::get<0>(s) != std::get<1>(s);
})) {
return op->emitOpError("incompatible input/output shapes");
@@ -1108,8 +1108,8 @@
}
if (llvm::any_of(llvm::zip(inputShapes, outputShapes),
[](std::tuple<int64_t, int64_t> s) {
- return std::get<0>(s) != ShapedType::kDynamic &&
- std::get<1>(s) != ShapedType::kDynamic &&
+ return !ShapedType::isDynamic(std::get<0>(s)) &&
+ !ShapedType::isDynamic(std::get<1>(s)) &&
std::get<0>(s) != std::get<1>(s);
})) {
return op->emitOpError("incompatible input/output shapes");
@@ -1509,7 +1509,7 @@
DenseMap<int64_t, OpFoldResult> const &dimAndTileMapping) {
int64_t rank = inputShape.size();
for (int64_t dim = 0; dim < rank; dim++) {
- if (inputShape[dim] == ShapedType::kDynamic)
+ if (ShapedType::isDynamic(inputShape[dim]))
continue;
auto it = dimAndTileMapping.find(dim);
if (it != dimAndTileMapping.end()) {
@@ -1656,9 +1656,9 @@
if (!constTileSize) {
// If specified tile size is dynamic, output shape should
// be dynamic too.
- return shape == ShapedType::kDynamic;
+ return ShapedType::isDynamic(shape);
} else {
- if (shape == ShapedType::kDynamic) {
+ if (ShapedType::isDynamic(shape)) {
// For the shape being dynamic when tile size is
// specified, return true. In canonical form a constant
// tile size should lead to constant shape of the tiled