Avoid needles isa checks. NFC. (#21885)
Do not check types with `isa` just to repeat the same lookup in `cast`.
See
https://llvm.org/docs/ProgrammersManual.html#the-isa-cast-and-dyn-cast-templates.
Also fix nits in surrounding code.
diff --git a/compiler/src/iree/compiler/API/Internal/IREEGPUDialectCAPI.cpp b/compiler/src/iree/compiler/API/Internal/IREEGPUDialectCAPI.cpp
index 63c392e..9ad8de8 100644
--- a/compiler/src/iree/compiler/API/Internal/IREEGPUDialectCAPI.cpp
+++ b/compiler/src/iree/compiler/API/Internal/IREEGPUDialectCAPI.cpp
@@ -176,8 +176,6 @@
}
mma_intrinsic_enum_t ireeGPUMMAIntrinsicAttrGetValue(MlirAttribute attr) {
- assert(ireeAttributeIsAGPUMMAIntrinsicAttr(attr) &&
- "attr is not a GPUMMAIntrinsicAttr");
return static_cast<mma_intrinsic_enum_t>(
llvm::cast<mlir::iree_compiler::IREE::GPU::MMAIntrinsicAttr>(unwrap(attr))
.getValue());
@@ -185,8 +183,6 @@
mma_intrinsic_enum_t
ireeGPUVirtualMMAIntrinsicAttrGetValue(MlirAttribute attr) {
- assert(ireeAttributeIsAGPUVirtualMMAIntrinsicAttr(attr) &&
- "attr is not a GPUVirtualMMAIntrinsicAttr");
return static_cast<mma_intrinsic_enum_t>(
llvm::cast<mlir::iree_compiler::IREE::GPU::VirtualMMAIntrinsicAttr>(
unwrap(attr))
@@ -252,7 +248,6 @@
}
MlirAttribute ireeGPUMMAAttrGetVirtualMMAIntrinsic(MlirAttribute attr) {
- assert(ireeAttributeIsAGPUMMAAttr(attr) && "attr is not a MMAAttr");
auto mma = llvm::cast<mlir::iree_compiler::IREE::GPU::MMAAttr>(unwrap(attr));
llvm::SmallVector<mlir::iree_compiler::IREE::GPU::VirtualMMAIntrinsic>
virtualIntrinsics = mma.getVirtualIntrinsics();
@@ -278,7 +273,6 @@
MlirAttribute ireeGPULoweringConfigAttrGet(MlirContext mlirCtx,
MlirAttribute attributesDictionary) {
- assert(mlirAttributeIsADictionary(attributesDictionary));
auto attributes =
llvm::cast<mlir::DictionaryAttr>(unwrap(attributesDictionary));
mlir::MLIRContext *ctx = unwrap(mlirCtx);
@@ -320,7 +314,6 @@
ireeGPUSubgroupCountInfo
ireeGPULoweringConfigAttrGetSubgroupCount(MlirAttribute attr) {
- assert(ireeAttributeIsAGPULoweringConfigAttr(attr));
auto loweringConfigAttr =
llvm::cast<mlir::iree_compiler::IREE::GPU::LoweringConfigAttr>(
unwrap(attr));
@@ -346,7 +339,6 @@
}
MlirAttribute ireeGPULoweringConfigAttrGetMmaKind(MlirAttribute attr) {
- assert(ireeAttributeIsAGPULoweringConfigAttr(attr));
auto loweringConfigAttr =
llvm::cast<mlir::iree_compiler::IREE::GPU::LoweringConfigAttr>(
unwrap(attr));
@@ -396,7 +388,6 @@
ireeGPUTargetInfo
ireeHALExecutableTargetAttrGetGPUTargetInfo(MlirAttribute attr) {
- assert(!mlirAttributeIsNull(attr) && "attr cannot be null");
auto executableTargetAttr =
llvm::cast<mlir::iree_compiler::IREE::HAL::ExecutableTargetAttr>(
unwrap(attr));
diff --git a/compiler/src/iree/compiler/Codegen/Common/CPU/CPULowerToUKernels.cpp b/compiler/src/iree/compiler/Codegen/Common/CPU/CPULowerToUKernels.cpp
index f8a96e7..1e01c42 100644
--- a/compiler/src/iree/compiler/Codegen/Common/CPU/CPULowerToUKernels.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/CPU/CPULowerToUKernels.cpp
@@ -50,9 +50,10 @@
return std::nullopt;
}
Value castIn = castOp->getOperand(0);
- if (isa<BlockArgument>(castIn) &&
- cast<BlockArgument>(castIn).getArgNumber() != 0) {
- return std::nullopt;
+ if (auto blockArg = dyn_cast<BlockArgument>(castIn)) {
+ if (blockArg.getArgNumber() != 0) {
+ return std::nullopt;
+ }
}
return castOp;
}
diff --git a/compiler/src/iree/compiler/Codegen/Common/DecomposeConvolutionToLowerDimOps.cpp b/compiler/src/iree/compiler/Codegen/Common/DecomposeConvolutionToLowerDimOps.cpp
index 3211d86..f7dcced 100644
--- a/compiler/src/iree/compiler/Codegen/Common/DecomposeConvolutionToLowerDimOps.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/DecomposeConvolutionToLowerDimOps.cpp
@@ -60,9 +60,8 @@
// ATM only 2D depthwise HWC convs are supported.
// TODO: Add support for other convs
linalg::DepthwiseConv2DNhwcHwcOp convOp;
- for (auto op : computeOps) {
- if (isa<linalg::DepthwiseConv2DNhwcHwcOp>(op)) {
- convOp = cast<linalg::DepthwiseConv2DNhwcHwcOp>(op);
+ for (Operation *op : computeOps) {
+ if ((convOp = dyn_cast<linalg::DepthwiseConv2DNhwcHwcOp>(op))) {
break;
}
}
diff --git a/compiler/src/iree/compiler/Codegen/Common/VectorLayoutAnalysis.cpp b/compiler/src/iree/compiler/Codegen/Common/VectorLayoutAnalysis.cpp
index d051933..0e2e74b 100644
--- a/compiler/src/iree/compiler/Codegen/Common/VectorLayoutAnalysis.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/VectorLayoutAnalysis.cpp
@@ -35,8 +35,7 @@
explicit DistributionLayout(Value val) : AnalysisState(val) {}
TypedValue<VectorType> getValue() const {
- auto anchor = getAnchor();
- assert(isa<Value>(anchor) && "expected anchor to be a value");
+ LatticeAnchor anchor = getAnchor();
Value val = cast<Value>(anchor);
assert(isa<VectorType>(val.getType()) &&
"expected value to be of vector type");
@@ -239,7 +238,7 @@
// Resolve conflict by create an operation that takes the input the conflicted
// value and returns the resolved value.
Value input = opOperand.get();
- // Create a resolution operation. This conflict should be handeled later by
+ // Create a resolution operation. This conflict should be handled later by
// someone else, not this analysis.
Operation *resolveOp =
builder.create<IREE::VectorExt::ToLayoutOp>(input.getLoc(), input, rhs);
@@ -336,7 +335,7 @@
solver->enqueue({solver->getProgramPointAfter(definingOp), enforcement});
} else {
// TODO: This is not always correct. Ideally, we should enqueue all
- // predecessors of these block arguements.
+ // predecessors of these block arguments.
solver->enqueue(
{solver->getProgramPointAfter(value.getParentBlock()->getParentOp()),
enforcement});
@@ -390,10 +389,10 @@
return initializedLayouts[0];
}
-/// Hueristic to use to choose the best layout when enforcing the same layout
-/// to all operands. Current hueristic is to simply choose the first operand
+/// Heuristic to use to choose the best layout when enforcing the same layout
+/// to all operands. Current heuristic is to simply choose the first operand
/// which has a layout.
-/// TODO: Use a better hueristic.
+/// TODO: Use a better heuristic.
static DistributionLayout *
enforceSameLayoutHueristic(ArrayRef<DistributionLayout *> operands) {
DistributionLayout *chosenOperandLayout = nullptr;
@@ -756,7 +755,7 @@
// Ensure that there are no broadcasted unit dims as we do not know how to
// handle them as of now.
assert(broadcast.computeBroadcastedUnitDims().empty() &&
- "Streching in broadcasting not implemented yet.");
+ "Stretching in broadcasting not implemented yet.");
// The starting k dimensions of the result are the ones that need to be
// projected out.
diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp
index 71fba06..83d2e70 100644
--- a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp
+++ b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVVectorizeLoadStore.cpp
@@ -92,8 +92,7 @@
if (type.isIntOrFloat()) {
return type.getIntOrFloatBitWidth();
}
- if (llvm::isa<VectorType>(type)) {
- auto vecType = cast<VectorType>(type);
+ if (auto vecType = dyn_cast<VectorType>(type)) {
auto elementType = vecType.getElementType();
return elementType.getIntOrFloatBitWidth() * vecType.getNumElements();
}
diff --git a/compiler/src/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp b/compiler/src/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp
index 35b0a6f..180e25e 100644
--- a/compiler/src/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp
+++ b/compiler/src/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp
@@ -400,9 +400,9 @@
// Check if workgroup size is set externally.
ArrayAttr workgroupSize;
for (auto attr : exportOp->getAttrs()) {
- if (isa<IREE::Codegen::ExportConfigAttr>(attr.getValue())) {
- workgroupSize = cast<IREE::Codegen::ExportConfigAttr>(attr.getValue())
- .getWorkgroupSizeIndexArray();
+ if (auto exportConfig =
+ dyn_cast<IREE::Codegen::ExportConfigAttr>(attr.getValue())) {
+ workgroupSize = exportConfig.getWorkgroupSizeIndexArray();
if (workgroupSize.size() < 3) {
SmallVector<Attribute> workgroupSizeVals =
llvm::to_vector(workgroupSize);
@@ -433,7 +433,7 @@
auto newRefAttr =
makeExportSymbolRefAttr(targetExecutableOp, variantOp, newExportOp);
exportExpansions[oldRefAttr].push_back(
- std::make_pair(newRefAttr, variantOp.getTargetAttr()));
+ {newRefAttr, variantOp.getTargetAttr()});
// Clone the workgroup count calculation function.
if (!exportOp.getWorkgroupCount().empty()) {
diff --git a/compiler/src/iree/compiler/Dialect/LinalgExt/IR/TilingInterfaceImpl.cpp b/compiler/src/iree/compiler/Dialect/LinalgExt/IR/TilingInterfaceImpl.cpp
index ab6c45b..b65a914 100644
--- a/compiler/src/iree/compiler/Dialect/LinalgExt/IR/TilingInterfaceImpl.cpp
+++ b/compiler/src/iree/compiler/Dialect/LinalgExt/IR/TilingInterfaceImpl.cpp
@@ -2185,16 +2185,15 @@
OpFoldResult zero = b.getIndexAttr(0);
OpFoldResult one = b.getIndexAttr(1);
- for (auto dim : llvm::seq<int64_t>(0, domainRank)) {
- loopBounds[dim].offset = zero;
- loopBounds[dim].stride = one;
+ for (Range &bound : loopBounds) {
+ bound.offset = zero;
+ bound.stride = one;
}
SmallVector<bool> dimsFound(domainRank, false);
auto fillSizes = [&](Value val, AffineMap indexingMap) {
for (auto [idx, dimExpr] : llvm::enumerate(indexingMap.getResults())) {
- assert(isa<AffineDimExpr>(dimExpr));
- AffineDimExpr dim = cast<AffineDimExpr>(dimExpr);
+ auto dim = cast<AffineDimExpr>(dimExpr);
int64_t pos = dim.getPosition();
if (dimsFound[pos]) {
continue;
diff --git a/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp b/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp
index 6c81b3a..755a94e 100644
--- a/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp
+++ b/compiler/src/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp
@@ -207,8 +207,8 @@
ArrayAttr::get(
ctx, {emitc::OpaqueAttr::get(ctx, ptr->second.first)}))
.getResult(0);
- } else if (llvm::isa<IREE::VM::RefType>(elementType)) {
- Type objType = llvm::cast<IREE::VM::RefType>(elementType).getObjectType();
+ } else if (auto elemRefType = dyn_cast<IREE::VM::RefType>(elementType)) {
+ Type objType = elemRefType.getObjectType();
Type typeRefType = emitc::OpaqueType::get(ctx, "iree_vm_ref_type_t");
Type typeRefArrayType = emitc::PointerType::get(typeRefType);
@@ -2743,7 +2743,6 @@
importOp ? importOp.getFunctionType().getNumInputs() : operands.size();
for (int i = 0; i < numInputs; i++) {
if (importOp && importOp.isFuncArgumentVariadic(i)) {
- assert(isa<IREE::VM::CallVariadicOp>(op));
auto variadicCallOp = cast<IREE::VM::CallVariadicOp>(op);
APInt segment = *(variadicCallOp.getSegmentSizes().begin() + i);
int64_t size = segment.getSExtValue();
diff --git a/compiler/src/iree/compiler/GlobalOptimization/Utils.cpp b/compiler/src/iree/compiler/GlobalOptimization/Utils.cpp
index ec841fc..8d562de 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/Utils.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/Utils.cpp
@@ -47,9 +47,10 @@
return std::nullopt;
}
Value castIn = castOp->getOperand(0);
- if (isa<BlockArgument>(castIn) &&
- cast<BlockArgument>(castIn).getArgNumber() != 0) {
- return std::nullopt;
+ if (auto blockArg = dyn_cast<BlockArgument>(castIn)) {
+ if (blockArg.getArgNumber() != 0) {
+ return std::nullopt;
+ }
}
if (!isI1Src(castOp) && isExtending(castOp)) {
result = castOp;