Upgrade GlobalOpt, InputConversion, ExternalInterfacess to free create function. NFC. (#21878)

The builder create methods are deprecated:
https://mlir.llvm.org/deprecation/. See
https://discourse.llvm.org/t/psa-opty-create-now-with-100-more-tab-complete/87339.

The main benefit of free functions is better tab completion with
LSP/IDE.

I'm splitting the upgrade in chunks going by project directories.
diff --git a/compiler/src/iree/compiler/ExternalInterfaces/EncodingExternalModels.cpp b/compiler/src/iree/compiler/ExternalInterfaces/EncodingExternalModels.cpp
index 6f50bd9..820aff0 100644
--- a/compiler/src/iree/compiler/ExternalInterfaces/EncodingExternalModels.cpp
+++ b/compiler/src/iree/compiler/ExternalInterfaces/EncodingExternalModels.cpp
@@ -92,13 +92,13 @@
           RankedTensorType operandEncodingType =
               collapseOp.getSrcType().cloneWithEncoding(
                   operandEncodings.front());
-          Value newEncodingOp = builder.create<IREE::Encoding::SetEncodingOp>(
-              loc, operandEncodingType, collapseOp.getSrc());
+          Value newEncodingOp = IREE::Encoding::SetEncodingOp::create(
+              builder, loc, operandEncodingType, collapseOp.getSrc());
           auto resultEncodingType =
               dyn_cast<RankedTensorType>(opResult.getType())
                   .cloneWithEncoding(resultEncodings.front());
-          Value newCollapseOp = builder.create<tensor::CollapseShapeOp>(
-              loc, resultEncodingType, newEncodingOp,
+          Value newCollapseOp = tensor::CollapseShapeOp::create(
+              builder, loc, resultEncodingType, newEncodingOp,
               collapseOp.getReassociationIndices());
           IREE::Encoding::PropagationResult result;
           result.replacements = {newCollapseOp};
@@ -228,9 +228,8 @@
                 auto resType = RankedTensorType::get(
                     operandType.getShape(), operandType.getElementType(),
                     encoding);
-                Value encodedInput =
-                    rewriter.create<IREE::Encoding::SetEncodingOp>(
-                        loc, resType, operand->get());
+                Value encodedInput = IREE::Encoding::SetEncodingOp::create(
+                    rewriter, loc, resType, operand->get());
                 result.generatedEncodingOps.push_back(
                     encodedInput.getDefiningOp());
                 encodedOperands.push_back(encodedInput);
@@ -253,8 +252,8 @@
 
                 // Create encoded generic op.
                 rewriter.setInsertionPointAfter(emptyOp);
-                Value encodedInit = rewriter.create<tensor::EmptyOp>(
-                    loc, emptyOp.getType().getShape(),
+                Value encodedInit = tensor::EmptyOp::create(
+                    rewriter, loc, emptyOp.getType().getShape(),
                     resultEncodingType.getElementType(),
                     emptyOp.getDynamicSizes(), encoding);
                 resultEncodingTypes.push_back(resultEncodingType);
@@ -271,10 +270,9 @@
                 auto resultType =
                     cast<RankedTensorType>(genericResult.getType())
                         .dropEncoding();
-                auto newUnsetEncoding =
-                    rewriter.create<IREE::Encoding::UnsetEncodingOp>(
-                        encodingOp.getLoc(), resultType, genericResult,
-                        encodingOp.getResultDims());
+                auto newUnsetEncoding = IREE::Encoding::UnsetEncodingOp::create(
+                    rewriter, encodingOp.getLoc(), resultType, genericResult,
+                    encodingOp.getResultDims());
                 result.replacements.push_back(newUnsetEncoding.getResult());
                 result.generatedEncodingOps.push_back(newUnsetEncoding);
               }
diff --git a/compiler/src/iree/compiler/GlobalOptimization/ConvertStridedContractionToContraction.cpp b/compiler/src/iree/compiler/GlobalOptimization/ConvertStridedContractionToContraction.cpp
index ca5d54b..13d3257 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/ConvertStridedContractionToContraction.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/ConvertStridedContractionToContraction.cpp
@@ -117,8 +117,8 @@
       }
       vSizes.push_back(rewriter.createOrFold<tensor::DimOp>(loc, input, i));
     }
-    Value extractedSlice = rewriter.create<tensor::ExtractSliceOp>(
-        loc, sliceTy, input, vOffset, vSizes, vStride);
+    Value extractedSlice = tensor::ExtractSliceOp::create(
+        rewriter, loc, sliceTy, input, vOffset, vSizes, vStride);
     rewriter.startOpModification(op);
     op.setIndexingMapsAttr(rewriter.getAffineMapArrayAttr(mapRange));
     op.setOperand(0, extractedSlice);
diff --git a/compiler/src/iree/compiler/GlobalOptimization/DecomposeConcat.cpp b/compiler/src/iree/compiler/GlobalOptimization/DecomposeConcat.cpp
index 50129e9..3ba3678 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/DecomposeConcat.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/DecomposeConcat.cpp
@@ -28,7 +28,7 @@
   applyPermutationToVector(mixedSizes, perm);
   Type elemType = cast<RankedTensorType>(source.getType()).getElementType();
   Value empty =
-      builder.create<tensor::EmptyOp>(source.getLoc(), mixedSizes, elemType)
+      tensor::EmptyOp::create(builder, source.getLoc(), mixedSizes, elemType)
           .getResult();
   return builder
       .create<linalg::TransposeOp>(source.getLoc(), source, empty, perm)
@@ -75,9 +75,9 @@
     SmallVector<int64_t> newShape = applyPermutation(concatShape, permutation);
     auto newConcatType = RankedTensorType::get(
         newShape, concatOp.getResultType().getElementType());
-    Value newConcat = rewriter.create<tensor::ConcatOp>(
-        concatOp.getLoc(), newConcatType, /*dim=*/outerMostNonUnitDim,
-        transposedInputs);
+    Value newConcat =
+        tensor::ConcatOp::create(rewriter, concatOp.getLoc(), newConcatType,
+                                 /*dim=*/outerMostNonUnitDim, transposedInputs);
     auto invPerm = invertPermutationVector(permutation);
     Value transposedConcat = createTranspose(rewriter, newConcat, invPerm);
     rewriter.replaceOp(concatOp, transposedConcat);
diff --git a/compiler/src/iree/compiler/GlobalOptimization/DemoteContractionInputsToBF16.cpp b/compiler/src/iree/compiler/GlobalOptimization/DemoteContractionInputsToBF16.cpp
index df83d07..6e6e5d9 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/DemoteContractionInputsToBF16.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/DemoteContractionInputsToBF16.cpp
@@ -65,17 +65,17 @@
             inputType.getRank(), utils::IteratorType::parallel);
         SmallVector<OpFoldResult> mixedSizes =
             tensor::getMixedSizes(rewriter, loc, input);
-        Value empty = rewriter.create<tensor::EmptyOp>(loc, mixedSizes,
-                                                       rewriter.getBF16Type());
+        Value empty = tensor::EmptyOp::create(rewriter, loc, mixedSizes,
+                                              rewriter.getBF16Type());
         demotedInputs.push_back(
             rewriter
                 .create<linalg::GenericOp>(
                     loc, TypeRange{demotedInputType}, ValueRange{input},
                     ValueRange{empty}, maps, iteratorTypes,
                     [&](OpBuilder &b, Location loc, ValueRange args) {
-                      Value result = b.create<arith::TruncFOp>(
-                          loc, rewriter.getBF16Type(), args[0]);
-                      b.create<linalg::YieldOp>(loc, result);
+                      Value result = arith::TruncFOp::create(
+                          b, loc, rewriter.getBF16Type(), args[0]);
+                      linalg::YieldOp::create(b, loc, result);
                     })
                 ->getResults()[0]);
       }
diff --git a/compiler/src/iree/compiler/GlobalOptimization/DetachElementwiseFromNamedOps.cpp b/compiler/src/iree/compiler/GlobalOptimization/DetachElementwiseFromNamedOps.cpp
index 19e689c..3ae5177 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/DetachElementwiseFromNamedOps.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/DetachElementwiseFromNamedOps.cpp
@@ -85,11 +85,11 @@
     SmallVector<OpFoldResult> mixedSizes =
         tensor::getMixedSizes(rewriter, loc, outputOperand);
     auto initOp =
-        rewriter.create<tensor::EmptyOp>(loc, mixedSizes, elementType);
-    Value zero = rewriter.create<arith::ConstantOp>(
-        loc, rewriter.getZeroAttr(elementType));
-    Value fill =
-        rewriter.create<linalg::FillOp>(loc, zero, initOp.getResult()).result();
+        tensor::EmptyOp::create(rewriter, loc, mixedSizes, elementType);
+    Value zero = arith::ConstantOp::create(rewriter, loc,
+                                           rewriter.getZeroAttr(elementType));
+    Value fill = linalg::FillOp::create(rewriter, loc, zero, initOp.getResult())
+                     .result();
 
     // Update the contraction op to use the new zero tensor as output operand.
     rewriter.modifyOpInPlace(linalgOp,
@@ -97,17 +97,17 @@
 
     // Create a generic op to add back the original output tensor operand.
     rewriter.setInsertionPointAfter(linalgOp);
-    auto genericOp = rewriter.create<linalg::GenericOp>(
-        loc, outputType, ValueRange{linalgOp->getResult(0), outputOperand},
-        fill, maps, iterators,
-        [&](OpBuilder &b, Location nestedLoc, ValueRange args) {
+    auto genericOp = linalg::GenericOp::create(
+        rewriter, loc, outputType,
+        ValueRange{linalgOp->getResult(0), outputOperand}, fill, maps,
+        iterators, [&](OpBuilder &b, Location nestedLoc, ValueRange args) {
           Value result;
           if (llvm::isa<FloatType>(elementType)) {
-            result = b.create<arith::AddFOp>(nestedLoc, args[0], args[1]);
+            result = arith::AddFOp::create(b, nestedLoc, args[0], args[1]);
           } else {
-            result = b.create<arith::AddIOp>(nestedLoc, args[0], args[1]);
+            result = arith::AddIOp::create(b, nestedLoc, args[0], args[1]);
           }
-          b.create<linalg::YieldOp>(nestedLoc, result);
+          linalg::YieldOp::create(b, nestedLoc, result);
         });
     linalgOp->getResult(0).replaceAllUsesExcept(genericOp->getResult(0),
                                                 genericOp);
@@ -153,8 +153,8 @@
 
       Location loc = constOp.getLoc();
       Type elementType = resultType.getElementType();
-      Value emptyTensorOp = rewriter.create<tensor::EmptyOp>(
-          loc, resultType.getShape(), elementType);
+      Value emptyTensorOp = tensor::EmptyOp::create(
+          rewriter, loc, resultType.getShape(), elementType);
       TypedAttr constValue;
       if (llvm::isa<IntegerType>(elementType)) {
         constValue = rewriter.getIntegerAttr(
@@ -164,7 +164,7 @@
             elementType, attr.template getSplatValue<APFloat>());
       }
       Value scalarConstantOp =
-          rewriter.create<arith::ConstantOp>(loc, elementType, constValue);
+          arith::ConstantOp::create(rewriter, loc, elementType, constValue);
 
       Value fillOp = rewriter
                          .create<linalg::FillOp>(
diff --git a/compiler/src/iree/compiler/GlobalOptimization/ExpandTensorShapes.cpp b/compiler/src/iree/compiler/GlobalOptimization/ExpandTensorShapes.cpp
index c9974d1..b4832f7 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/ExpandTensorShapes.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/ExpandTensorShapes.cpp
@@ -87,8 +87,8 @@
         auto dimName =
             (global.tensorOp.getName() + "__d" + std::to_string(it.index()))
                 .str();
-        auto dimOp = builder.create<IREE::Util::GlobalOp>(
-            global.tensorOp.getLoc(), dimName,
+        auto dimOp = IREE::Util::GlobalOp::create(
+            builder, global.tensorOp.getLoc(), dimName,
             /*isMutable=*/true, indexType);
         dimOp.setVisibility(global.tensorOp.getVisibility());
         symbolTable.insert(dimOp);
@@ -234,9 +234,9 @@
     // Insert shape ties that we've sunk from callers.
     auto builder = OpBuilder::atBlockBegin(&block);
     for (auto &expansion : llvm::reverse(expansions)) {
-      auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
-          region.getLoc(), expansion.tensor.getType(), expansion.tensor,
-          expansion.dynamicDims);
+      auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
+          builder, region.getLoc(), expansion.tensor.getType(),
+          expansion.tensor, expansion.dynamicDims);
       expansion.tensor.replaceAllUsesExcept(tieShapeOp.getResult(), tieShapeOp);
     }
   }
@@ -283,9 +283,9 @@
         newOp->getResults().slice(newIdx, tensorType.getNumDynamicDims());
     newIdx += expandedValue.dynamicDims.size();
     tensorDimMap[expandedValue.tensor] = expandedValue;
-    auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
-        op->getLoc(), expandedValue.tensor.getType(), expandedValue.tensor,
-        expandedValue.dynamicDims);
+    auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
+        builder, op->getLoc(), expandedValue.tensor.getType(),
+        expandedValue.tensor, expandedValue.dynamicDims);
     oldResult.replaceAllUsesExcept(tieShapeOp.getResult(), tieShapeOp);
   }
 }
@@ -315,9 +315,9 @@
         dimOp.createLoadOp(op.getLoc(), builder).getLoadedGlobalValue());
   }
   tensorDimMap[op.getLoadedGlobalValue()] = expandedValue;
-  auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
-      op.getLoc(), expandedValue.tensor.getType(), expandedValue.tensor,
-      expandedValue.dynamicDims);
+  auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
+      builder, op.getLoc(), expandedValue.tensor.getType(),
+      expandedValue.tensor, expandedValue.dynamicDims);
   op.getLoadedGlobalValue().replaceAllUsesExcept(tieShapeOp.getResult(),
                                                  tieShapeOp);
 }
@@ -436,7 +436,7 @@
   OpBuilder builder(op);
   auto operands = expandOperands(op.getLoc(), op.getOperands(), tensorDimMap,
                                  indexSet, builder);
-  builder.create<IREE::Util::ReturnOp>(op.getLoc(), operands);
+  IREE::Util::ReturnOp::create(builder, op.getLoc(), operands);
   op.erase();
 }
 
@@ -456,7 +456,7 @@
   OpBuilder builder(op);
   auto operands = expandOperands(op.getLoc(), op.getDestOperands(),
                                  tensorDimMap, indexSet, builder);
-  builder.create<mlir::cf::BranchOp>(op.getLoc(), op.getDest(), operands);
+  mlir::cf::BranchOp::create(builder, op.getLoc(), op.getDest(), operands);
   op.erase();
 }
 
@@ -465,8 +465,8 @@
   if (!usesDynamicTensors(op))
     return;
   OpBuilder builder(op);
-  builder.create<mlir::cf::CondBranchOp>(
-      op.getLoc(), op.getCondition(), op.getTrueDest(),
+  mlir::cf::CondBranchOp::create(
+      builder, op.getLoc(), op.getCondition(), op.getTrueDest(),
       expandOperands(op.getLoc(), op.getTrueDestOperands(), tensorDimMap,
                      indexSet, builder),
       op.getFalseDest(),
@@ -496,8 +496,9 @@
   auto falseValue = consumeExpandedValue(op.getLoc(), op.getFalseValue(),
                                          tensorDimMap, indexSet, builder);
 
-  auto selectOp = builder.create<mlir::arith::SelectOp>(
-      op.getLoc(), op.getCondition(), op.getTrueValue(), op.getFalseValue());
+  auto selectOp =
+      mlir::arith::SelectOp::create(builder, op.getLoc(), op.getCondition(),
+                                    op.getTrueValue(), op.getFalseValue());
 
   SmallVector<Value> selectedDims;
   for (auto [trueDynamicDims, falseDynamicDims] :
@@ -508,9 +509,9 @@
                                      trueDynamicDims, falseDynamicDims)
             .getResult());
   }
-  auto tieShapeOp = builder.create<IREE::Flow::TensorTieShapeOp>(
-      selectOp.getLoc(), selectOp.getResult().getType(), selectOp.getResult(),
-      selectedDims);
+  auto tieShapeOp = IREE::Flow::TensorTieShapeOp::create(
+      builder, selectOp.getLoc(), selectOp.getResult().getType(),
+      selectOp.getResult(), selectedDims);
 
   op.getResult().replaceAllUsesExcept(tieShapeOp.getResult(), tieShapeOp);
   op.erase();
@@ -524,9 +525,9 @@
                                  indexSet, builder);
   auto resultTypes = expandTypes(op.getResultTypes());
 
-  auto newOp = builder.create<scf::WhileOp>(op.getLoc(), resultTypes, operands,
-                                            /*beforeBody*/ nullptr,
-                                            /*afterBody*/ nullptr);
+  auto newOp = scf::WhileOp::create(builder, op.getLoc(), resultTypes, operands,
+                                    /*beforeBody*/ nullptr,
+                                    /*afterBody*/ nullptr);
 
   newOp.getBefore().takeBody(op.getBefore());
   newOp.getAfter().takeBody(op.getAfter());
@@ -545,8 +546,8 @@
   OpBuilder builder(op);
   auto resultTypes = expandTypes(op.getResultTypes());
 
-  auto newOp = builder.create<scf::IfOp>(
-      op.getLoc(), resultTypes, op.getOperand(), op.elseBlock() != nullptr);
+  auto newOp = scf::IfOp::create(builder, op.getLoc(), resultTypes,
+                                 op.getOperand(), op.elseBlock() != nullptr);
 
   newOp.getBodyRegion().takeBody(op.getBodyRegion());
   expandRegion(newOp.getBodyRegion(), symbolTable, globalMap, indexSet,
@@ -566,7 +567,7 @@
   OpBuilder builder(op);
   auto operands = expandOperands(op.getLoc(), op.getOperands(), tensorDimMap,
                                  indexSet, builder);
-  builder.create<mlir::scf::YieldOp>(op.getLoc(), operands);
+  mlir::scf::YieldOp::create(builder, op.getLoc(), operands);
   op.erase();
 }
 
@@ -575,8 +576,8 @@
   OpBuilder builder(op);
   auto operands = expandOperands(op.getLoc(), op.getArgs(), tensorDimMap,
                                  indexSet, builder);
-  builder.create<mlir::scf::ConditionOp>(op.getLoc(), op.getCondition(),
-                                         operands);
+  mlir::scf::ConditionOp::create(builder, op.getLoc(), op.getCondition(),
+                                 operands);
   op.erase();
 }
 
diff --git a/compiler/src/iree/compiler/GlobalOptimization/FuseDequantizationMatmul.cpp b/compiler/src/iree/compiler/GlobalOptimization/FuseDequantizationMatmul.cpp
index 6769349..f1c24ae 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/FuseDequantizationMatmul.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/FuseDequantizationMatmul.cpp
@@ -384,15 +384,15 @@
 Value QuantizedMatmulRewriter::getGroupReductionInit(Value input) {
   RankedTensorType inputType = llvm::cast<RankedTensorType>(input.getType());
   assert(isa<FloatType>(inputType.getElementType()) && "expected float type");
-  Value zero = rewriter.create<arith::ConstantOp>(
-      loc, rewriter.getFloatAttr(inputType.getElementType(), 0.0));
+  Value zero = arith::ConstantOp::create(
+      rewriter, loc, rewriter.getFloatAttr(inputType.getElementType(), 0.0));
   SmallVector<int64_t> inputShape(inputType.getShape());
   SmallVector<int64_t> outputShape(llvm::drop_end(inputShape));
   RankedTensorType outputType =
       RankedTensorType::get(outputShape, inputType.getElementType());
-  Value emptyOut = rewriter.create<tensor::EmptyOp>(
-      loc, outputType.getShape(), outputType.getElementType());
-  return rewriter.create<linalg::FillOp>(loc, zero, emptyOut).result();
+  Value emptyOut = tensor::EmptyOp::create(rewriter, loc, outputType.getShape(),
+                                           outputType.getElementType());
+  return linalg::FillOp::create(rewriter, loc, zero, emptyOut).result();
 }
 
 // Creates a generic that computes the absolute max along the group
@@ -405,12 +405,12 @@
   auto iterators = mapsAndIterators.second;
   Value input = inputOperand->get();
   Value output = getGroupReductionInit(input);
-  auto groupMaxOp = rewriter.create<linalg::GenericOp>(
-      loc, output.getType(), input, output, maps, iterators,
+  auto groupMaxOp = linalg::GenericOp::create(
+      rewriter, loc, output.getType(), input, output, maps, iterators,
       [&](OpBuilder &b, Location nestedLoc, ValueRange args) {
-        Value abs = b.create<math::AbsFOp>(nestedLoc, args[0]);
-        Value max = b.create<arith::MaximumFOp>(nestedLoc, abs, args[1]);
-        b.create<linalg::YieldOp>(nestedLoc, max);
+        Value abs = math::AbsFOp::create(b, nestedLoc, args[0]);
+        Value max = arith::MaximumFOp::create(b, nestedLoc, abs, args[1]);
+        linalg::YieldOp::create(b, nestedLoc, max);
       });
   LLVM_DEBUG(DBGS() << "groupMaxOp:   " << groupMaxOp << "\n");
   return groupMaxOp.getResult(0);
@@ -422,19 +422,20 @@
   auto groupMaxType = llvm::cast<RankedTensorType>(groupMax.getType());
   assert(isa<FloatType>(groupMaxType.getElementType()) &&
          "expected float type");
-  Value cst = rewriter.create<arith::ConstantOp>(
-      loc, rewriter.getFloatAttr(groupMaxType.getElementType(),
-                                 (1 << (quantizedBitWidth - 1)) - 1));
-  Value output = rewriter.create<tensor::EmptyOp>(
-      loc, groupMaxType.getShape(), groupMaxType.getElementType());
+  Value cst = arith::ConstantOp::create(
+      rewriter, loc,
+      rewriter.getFloatAttr(groupMaxType.getElementType(),
+                            (1 << (quantizedBitWidth - 1)) - 1));
+  Value output = tensor::EmptyOp::create(rewriter, loc, groupMaxType.getShape(),
+                                         groupMaxType.getElementType());
   SmallVector<AffineMap> maps(
       2, rewriter.getMultiDimIdentityMap(groupMaxType.getShape().size()));
-  auto scalesOp = rewriter.create<linalg::GenericOp>(
-      loc, output.getType(), groupMax, output, maps,
+  auto scalesOp = linalg::GenericOp::create(
+      rewriter, loc, output.getType(), groupMax, output, maps,
       getParallelAndReductionIterators(groupMaxType.getRank(), 0),
       [&](OpBuilder &b, Location nestedLoc, ValueRange args) {
-        Value scale = b.create<arith::DivFOp>(nestedLoc, args[0], cst);
-        b.create<linalg::YieldOp>(nestedLoc, scale);
+        Value scale = arith::DivFOp::create(b, nestedLoc, args[0], cst);
+        linalg::YieldOp::create(b, nestedLoc, scale);
       });
   LLVM_DEBUG(DBGS() << "scalesOp:   " << scalesOp << "\n");
   return scalesOp.getResult(0);
@@ -450,11 +451,11 @@
   auto iterators = mapsAndIterators.second;
   Value input = inputOperand->get();
   Value output = getGroupReductionInit(input);
-  auto groupSumsOp = rewriter.create<linalg::GenericOp>(
-      loc, output.getType(), input, output, maps, iterators,
+  auto groupSumsOp = linalg::GenericOp::create(
+      rewriter, loc, output.getType(), input, output, maps, iterators,
       [&](OpBuilder &b, Location nestedLoc, ValueRange args) {
-        Value sum = b.create<arith::AddFOp>(nestedLoc, args[0], args[1]);
-        b.create<linalg::YieldOp>(nestedLoc, sum);
+        Value sum = arith::AddFOp::create(b, nestedLoc, args[0], args[1]);
+        linalg::YieldOp::create(b, nestedLoc, sum);
       });
   LLVM_DEBUG(DBGS() << "groupSumsOp:   " << groupSumsOp << "\n");
   return groupSumsOp.getResult(0);
@@ -476,22 +477,23 @@
   Value scales = generateScalesGeneric(groupMax);
   Value groupSums = generateGroupSumsGeneric();
 
-  Value output = rewriter.create<tensor::EmptyOp>(loc, unquantizedShape,
-                                                  quantizedElementType);
+  Value output = tensor::EmptyOp::create(rewriter, loc, unquantizedShape,
+                                         quantizedElementType);
   AffineMap inputMap = rewriter.getMultiDimIdentityMap(unquantizedShape.size());
   AffineMap scalesMap = rewriter.getMultiDimIdentityMap(unquantizedShape.size())
                             .getMajorSubMap(unquantizedShape.size() - 1);
   AffineMap outputMap =
       rewriter.getMultiDimIdentityMap(unquantizedShape.size());
   SmallVector<AffineMap> maps{inputMap, scalesMap, outputMap};
-  auto quantizeOp = rewriter.create<linalg::GenericOp>(
-      loc, output.getType(), ValueRange{unquantizedInput, scales}, output, maps,
+  auto quantizeOp = linalg::GenericOp::create(
+      rewriter, loc, output.getType(), ValueRange{unquantizedInput, scales},
+      output, maps,
       getParallelAndReductionIterators(unquantizedShape.size(), 0),
       [&](OpBuilder &b, Location nestedLoc, ValueRange args) {
-        Value scaled = b.create<arith::DivFOp>(nestedLoc, args[0], args[1]);
+        Value scaled = arith::DivFOp::create(b, nestedLoc, args[0], args[1]);
         Value quant =
-            b.create<arith::FPToSIOp>(nestedLoc, quantizedElementType, scaled);
-        b.create<linalg::YieldOp>(nestedLoc, quant);
+            arith::FPToSIOp::create(b, nestedLoc, quantizedElementType, scaled);
+        linalg::YieldOp::create(b, nestedLoc, quant);
       });
   LLVM_DEBUG(DBGS() << "quantizeOp:   " << quantizeOp << "\n");
   Value newQuantizedInput = quantizeOp.getResult(0);
@@ -535,31 +537,31 @@
           .getShape());
   outputShape.push_back(
       newQuantizedInputShape[newQuantizedInputShape.size() - 2]);
-  Value zero = rewriter.create<arith::ConstantOp>(
-      loc, rewriter.getIntegerAttr(accType, 0.0));
-  Value emptyOut = rewriter.create<tensor::EmptyOp>(loc, outputShape, accType);
-  Value output = rewriter.create<linalg::FillOp>(loc, zero, emptyOut).result();
-  auto integerMatmulOp = rewriter.create<linalg::GenericOp>(
-      loc, output.getType(), ValueRange{newQuantizedInput, quantizedInput},
-      output, maps, iterators,
+  Value zero = arith::ConstantOp::create(rewriter, loc,
+                                         rewriter.getIntegerAttr(accType, 0.0));
+  Value emptyOut = tensor::EmptyOp::create(rewriter, loc, outputShape, accType);
+  Value output = linalg::FillOp::create(rewriter, loc, zero, emptyOut).result();
+  auto integerMatmulOp = linalg::GenericOp::create(
+      rewriter, loc, output.getType(),
+      ValueRange{newQuantizedInput, quantizedInput}, output, maps, iterators,
       [&](OpBuilder &b, Location nestedLoc, ValueRange args) {
         Value mul;
         if (quantType == mulType) {
-          Value ext1 = b.create<arith::ExtUIOp>(nestedLoc, mulType, args[1]);
-          mul = b.create<arith::MulIOp>(nestedLoc, args[0], ext1);
+          Value ext1 = arith::ExtUIOp::create(b, nestedLoc, mulType, args[1]);
+          mul = arith::MulIOp::create(b, nestedLoc, args[0], ext1);
         } else {
-          Value ext0 = b.create<arith::ExtSIOp>(nestedLoc, mulType, args[0]);
-          Value ext1 = b.create<arith::ExtUIOp>(nestedLoc, mulType, args[1]);
-          mul = b.create<arith::MulIOp>(nestedLoc, ext0, ext1);
+          Value ext0 = arith::ExtSIOp::create(b, nestedLoc, mulType, args[0]);
+          Value ext1 = arith::ExtUIOp::create(b, nestedLoc, mulType, args[1]);
+          mul = arith::MulIOp::create(b, nestedLoc, ext0, ext1);
         }
         Value sum;
         if (mulType == accType) {
-          sum = b.create<arith::AddIOp>(nestedLoc, mul, args[2]);
+          sum = arith::AddIOp::create(b, nestedLoc, mul, args[2]);
         } else {
-          Value extMul = b.create<arith::ExtSIOp>(nestedLoc, accType, mul);
-          sum = b.create<arith::AddIOp>(nestedLoc, extMul, args[2]);
+          Value extMul = arith::ExtSIOp::create(b, nestedLoc, accType, mul);
+          sum = arith::AddIOp::create(b, nestedLoc, extMul, args[2]);
         }
-        b.create<linalg::YieldOp>(nestedLoc, sum);
+        linalg::YieldOp::create(b, nestedLoc, sum);
       });
   LLVM_DEBUG(DBGS() << "integerMatmulOp:   " << integerMatmulOp << "\n");
   return integerMatmulOp;
@@ -626,20 +628,20 @@
 
   Type floatType = getElementTypeOrSelf(scales);
   Value output = matmulOutputOperand->get();
-  auto reassociatedDequantizationOp = rewriter.create<linalg::GenericOp>(
-      loc, output.getType(),
+  auto reassociatedDequantizationOp = linalg::GenericOp::create(
+      rewriter, loc, output.getType(),
       ValueRange{quantizedIntegerMatmul, newScales, groupSums, scales, zps},
       output, maps, iterators,
       [&](OpBuilder &b, Location loc, ValueRange args) {
         Value dq;
-        dq = b.create<arith::SIToFPOp>(loc, floatType, args[0]);
-        Value scaledRes0 = b.create<arith::MulFOp>(loc, dq, args[1]);
-        Value scaledRes1 = b.create<arith::MulFOp>(loc, scaledRes0, args[3]);
-        Value scaledZp0 = b.create<arith::MulFOp>(loc, args[4], args[3]);
-        Value scaledZp1 = b.create<arith::MulFOp>(loc, scaledZp0, args[2]);
-        Value groupRes = b.create<arith::SubFOp>(loc, scaledRes1, scaledZp1);
-        Value sum = b.create<arith::AddFOp>(loc, groupRes, args[5]);
-        b.create<linalg::YieldOp>(loc, sum);
+        dq = arith::SIToFPOp::create(b, loc, floatType, args[0]);
+        Value scaledRes0 = arith::MulFOp::create(b, loc, dq, args[1]);
+        Value scaledRes1 = arith::MulFOp::create(b, loc, scaledRes0, args[3]);
+        Value scaledZp0 = arith::MulFOp::create(b, loc, args[4], args[3]);
+        Value scaledZp1 = arith::MulFOp::create(b, loc, scaledZp0, args[2]);
+        Value groupRes = arith::SubFOp::create(b, loc, scaledRes1, scaledZp1);
+        Value sum = arith::AddFOp::create(b, loc, groupRes, args[5]);
+        linalg::YieldOp::create(b, loc, sum);
       });
   LLVM_DEBUG(DBGS() << "reassociatedDequantizationOp:   "
                     << reassociatedDequantizationOp << "\n");
diff --git a/compiler/src/iree/compiler/GlobalOptimization/InferNumericNarrowing.cpp b/compiler/src/iree/compiler/GlobalOptimization/InferNumericNarrowing.cpp
index 9eef51a..5cdcc17 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/InferNumericNarrowing.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/InferNumericNarrowing.cpp
@@ -123,8 +123,8 @@
     if (type.getWidth() != 0) {
       range = std::make_pair(minValue, maxValue);
     }
-    auto annotationOp = builder.create<IREE::Util::NumericOptionalNarrowOp>(
-        probePoint.getLoc(), probePoint, type, range);
+    auto annotationOp = IREE::Util::NumericOptionalNarrowOp::create(
+        builder, probePoint.getLoc(), probePoint, type, range);
     probePoint.replaceAllUsesExcept(annotationOp, annotationOp);
   }
 };
diff --git a/compiler/src/iree/compiler/GlobalOptimization/Interfaces/HoistableTypeInterface.cpp b/compiler/src/iree/compiler/GlobalOptimization/Interfaces/HoistableTypeInterface.cpp
index 83ba850..14acb1f 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/Interfaces/HoistableTypeInterface.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/Interfaces/HoistableTypeInterface.cpp
@@ -24,8 +24,8 @@
     return global;
   }
   // No dynamic dims because we are always bitcasting constants.
-  return b.create<IREE::TensorExt::BitCastOp>(loc, targetType, global,
-                                              ValueRange(), ValueRange());
+  return IREE::TensorExt::BitCastOp::create(b, loc, targetType, global,
+                                            ValueRange(), ValueRange());
 }
 
 struct HoistableTensorTypeInterface
@@ -103,7 +103,7 @@
         !isa<IndexType>(init.getType())) {
       return init;
     }
-    return builder.create<arith::IndexCastOp>(loc, storageType, init);
+    return arith::IndexCastOp::create(builder, loc, storageType, init);
   }
   static Value decodeStorageType(OpBuilder &builder, Location loc,
                                  Type originalType, Value loadedGlobal) {
@@ -112,7 +112,7 @@
         !isa<IntegerType>(loadedGlobal.getType())) {
       return loadedGlobal;
     }
-    return builder.create<arith::IndexCastOp>(loc, originalType, loadedGlobal);
+    return arith::IndexCastOp::create(builder, loc, originalType, loadedGlobal);
   }
 };
 
diff --git a/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp b/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp
index fec5542..89273fc 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp
@@ -50,16 +50,16 @@
   if (llvm::isa<FloatType>(origElementType) &&
       llvm::isa<IntegerType>(toElementType)) {
     if (isSigned) {
-      return builder.create<arith::FPToSIOp>(loc, toType, origValue);
+      return arith::FPToSIOp::create(builder, loc, toType, origValue);
     } else {
-      return builder.create<arith::FPToUIOp>(loc, toType, origValue);
+      return arith::FPToUIOp::create(builder, loc, toType, origValue);
     }
   } else if (llvm::isa<IntegerType>(origElementType) &&
              llvm::isa<FloatType>(toElementType)) {
     if (isSigned) {
-      return builder.create<arith::SIToFPOp>(loc, toType, origValue);
+      return arith::SIToFPOp::create(builder, loc, toType, origValue);
     } else {
-      return builder.create<arith::UIToFPOp>(loc, toType, origValue);
+      return arith::UIToFPOp::create(builder, loc, toType, origValue);
     }
   } else {
     // If we need int<->int and float<->float, implement those cases. Since
@@ -153,7 +153,7 @@
                 fillInit)
             .getCasted();
     Value fillResult =
-        rewriter.create<linalg::FillOp>(loc, fillInput, fillInit).result();
+        linalg::FillOp::create(rewriter, loc, fillInput, fillInit).result();
     rewriter.replaceOp(castOp, fillResult);
     return success();
   }
@@ -242,8 +242,8 @@
     Value newAccum =
         castNumeric(accumParams->producer, accumLowPType, isSigned, rewriter);
 
-    auto newMatmulOp = rewriter.create<linalg::MatmulOp>(
-        loc, ValueRange{newLhs, newRhs}, ValueRange{newAccum});
+    auto newMatmulOp = linalg::MatmulOp::create(
+        rewriter, loc, ValueRange{newLhs, newRhs}, ValueRange{newAccum});
     if (!isSigned) {
       newMatmulOp.setCast(linalg::TypeFn::cast_unsigned);
     }
diff --git a/compiler/src/iree/compiler/GlobalOptimization/PropagateLinalgTranspose.cpp b/compiler/src/iree/compiler/GlobalOptimization/PropagateLinalgTranspose.cpp
index fa3343c..b7745b9 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/PropagateLinalgTranspose.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/PropagateLinalgTranspose.cpp
@@ -59,7 +59,7 @@
   applyPermutationToVector(mixedSizes, perm);
   Type elemType = cast<RankedTensorType>(source.getType()).getElementType();
   Value empty =
-      builder.create<tensor::EmptyOp>(source.getLoc(), mixedSizes, elemType)
+      tensor::EmptyOp::create(builder, source.getLoc(), mixedSizes, elemType)
           .getResult();
   return empty;
 }
@@ -72,8 +72,8 @@
     Type elementType = empty.getType().getElementType();
     SmallVector<OpFoldResult> mixedSizes = empty.getMixedSizes();
     applyPermutationToVector(mixedSizes, perm);
-    return builder.create<tensor::EmptyOp>(empty.getLoc(), mixedSizes,
-                                           elementType);
+    return tensor::EmptyOp::create(builder, empty.getLoc(), mixedSizes,
+                                   elementType);
   }
   Value empty = createTransposeInit(builder, source, perm);
   return builder
@@ -253,9 +253,9 @@
     newIndexingMaps[genericOp.getNumDpsInputs() + resultIndex] = transposedMap;
 
     // 3. Create the new generic with the same iteration order.
-    auto newGenericOp = rewriter.create<linalg::GenericOp>(
-        genericOp.getLoc(), resultTypes, genericOp.getDpsInputs(), newInit,
-        newIndexingMaps, genericOp.getIteratorTypesArray(),
+    auto newGenericOp = linalg::GenericOp::create(
+        rewriter, genericOp.getLoc(), resultTypes, genericOp.getDpsInputs(),
+        newInit, newIndexingMaps, genericOp.getIteratorTypesArray(),
         /*bodyBuild=*/nullptr, linalg::getPrunedAttributeList(genericOp));
     rewriter.cloneRegionBefore(genericOp.getRegion(), newGenericOp.getRegion(),
                                newGenericOp.getRegion().begin());
@@ -355,9 +355,9 @@
 
     Value newTranspose =
         createTranspose(rewriter, collapseOp.getSrc(), newPerm);
-    Value newReshape = rewriter.create<tensor::CollapseShapeOp>(
-        collapseOp.getLoc(), transposeOp.getResultTypes()[0], newTranspose,
-        newReassociations);
+    Value newReshape = tensor::CollapseShapeOp::create(
+        rewriter, collapseOp.getLoc(), transposeOp.getResultTypes()[0],
+        newTranspose, newReassociations);
     rewriter.replaceOp(transposeOp, newReshape);
     return success();
   }
@@ -488,9 +488,9 @@
 
     RankedTensorType sliceType = getPermutedTensorType(
         cast<RankedTensorType>(extractOp.getType()), rankReducedInvPerm);
-    Value slice = rewriter.create<tensor::ExtractSliceOp>(
-        extractOp.getLoc(), sliceType, transposeOp.getInput(), offsets, sizes,
-        strides);
+    Value slice = tensor::ExtractSliceOp::create(
+        rewriter, extractOp.getLoc(), sliceType, transposeOp.getInput(),
+        offsets, sizes, strides);
     // Transpose back to the original slice.
     if (!isIdentityPermutation(rankReducedPerm)) {
       slice = createTranspose(rewriter, slice, rankReducedPerm);
@@ -555,8 +555,8 @@
 
     RankedTensorType expandedType = getPermutedTensorType(
         cast<RankedTensorType>(expandOp.getType()), newInvPerm);
-    Value transposedReshape = rewriter.create<tensor::ExpandShapeOp>(
-        expandOp.getLoc(), expandedType, transposeOp.getInput(),
+    Value transposedReshape = tensor::ExpandShapeOp::create(
+        rewriter, expandOp.getLoc(), expandedType, transposeOp.getInput(),
         newReassociations);
     Value originalReshape =
         createTranspose(rewriter, transposedReshape, newPerm);
diff --git a/compiler/src/iree/compiler/GlobalOptimization/QuantizedConvToConv.cpp b/compiler/src/iree/compiler/GlobalOptimization/QuantizedConvToConv.cpp
index b2c8d06..6a2995b 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/QuantizedConvToConv.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/QuantizedConvToConv.cpp
@@ -33,18 +33,18 @@
   Type eTy = getElementTypeOrSelf(value.getType());
   SmallVector<OpFoldResult> mixedSizes =
       tensor::getMixedSizes(rewriter, rewriter.getLoc(), value);
-  return rewriter.create<tensor::EmptyOp>(mixedSizes, eTy);
+  return tensor::EmptyOp::create(rewriter, mixedSizes, eTy);
 }
 
 // Creates an zero initialized tensor of given shape and type.
 Value emptyZero(ImplicitLocOpBuilder &builder, RankedTensorType ty,
                 llvm::SmallVector<Value> dyn) {
   Value empty =
-      builder.create<tensor::EmptyOp>(ty.getShape(), ty.getElementType(), dyn);
+      tensor::EmptyOp::create(builder, ty.getShape(), ty.getElementType(), dyn);
 
   TypedAttr attr = builder.getZeroAttr(ty.getElementType());
-  Value cnst = builder.create<arith::ConstantOp>(attr);
-  return builder.create<linalg::FillOp>(ValueRange{cnst}, ValueRange{empty})
+  Value cnst = arith::ConstantOp::create(builder, attr);
+  return linalg::FillOp::create(builder, ValueRange{cnst}, ValueRange{empty})
       .result();
 }
 
@@ -74,9 +74,9 @@
           init.getType(), ValueRange{conv, sum}, ValueRange{init}, affineMaps,
           iterators,
           [=](OpBuilder &b, Location loc, ValueRange args) {
-            Value mul = b.create<arith::MulIOp>(loc, args[1], zp);
-            Value sum = b.create<arith::SubIOp>(loc, args[0], mul);
-            b.create<linalg::YieldOp>(loc, sum);
+            Value mul = arith::MulIOp::create(b, loc, args[1], zp);
+            Value sum = arith::SubIOp::create(b, loc, args[0], mul);
+            linalg::YieldOp::create(b, loc, sum);
           })
       .getResult(0);
 }
@@ -93,8 +93,8 @@
           init.getType(), ValueRange{value}, ValueRange{init},
           ArrayRef<AffineMap>{map, map}, iterators,
           [=](OpBuilder &b, Location loc, ValueRange args) {
-            Value add = b.create<arith::AddIOp>(loc, args[0], scalar);
-            b.create<linalg::YieldOp>(loc, add);
+            Value add = arith::AddIOp::create(b, loc, args[0], scalar);
+            linalg::YieldOp::create(b, loc, add);
           })
       .getResult(0);
 }
@@ -106,16 +106,16 @@
   ShapedType ty = llvm::cast<ShapedType>(value.getType());
   dims.push_back(ty.getDimSize(dim));
   if (ty && ty.isDynamicDim(dim))
-    dynDims.push_back(builder.create<tensor::DimOp>(value, dim));
+    dynDims.push_back(tensor::DimOp::create(builder, value, dim));
 }
 
 Value multiplyDims(ImplicitLocOpBuilder &builder, Value value,
                    llvm::ArrayRef<int64_t> dims) {
-  Value count = builder.create<tensor::DimOp>(value, dims.front());
+  Value count = tensor::DimOp::create(builder, value, dims.front());
 
   for (auto d : dims.drop_front()) {
-    Value dim = builder.create<tensor::DimOp>(value, d);
-    count = builder.create<arith::MulIOp>(count, dim);
+    Value dim = tensor::DimOp::create(builder, value, d);
+    count = arith::MulIOp::create(builder, count, dim);
   }
 
   return count;
@@ -189,8 +189,8 @@
           RankedTensorType::get({inputTy.getDimSize(0), inputTy.getDimSize(1),
                                  inputTy.getDimSize(2), 1},
                                 accETy);
-      inputSum = builder.create<tensor::ExpandShapeOp>(expandTy, inputSum,
-                                                       reassociationMap);
+      inputSum = tensor::ExpandShapeOp::create(builder, expandTy, inputSum,
+                                               reassociationMap);
 
       llvm::SmallVector<int64_t> poolDims;
       llvm::SmallVector<Value> poolDynDims;
@@ -208,7 +208,7 @@
       llvm::SmallVector<Value> kDyn;
       GetDynamicDym(builder, kDims, kDyn, filter, 0);
       GetDynamicDym(builder, kDims, kDyn, filter, 1);
-      Value poolInit = builder.create<tensor::EmptyOp>(kDims, accETy, kDyn);
+      Value poolInit = tensor::EmptyOp::create(builder, kDims, accETy, kDyn);
 
       inputSum = builder
                      .create<linalg::PoolingNhwcSumOp>(
@@ -219,8 +219,8 @@
       // Collapse the length-1 ending dimension away.
       auto collapseTy =
           RankedTensorType::get(poolTy.getShape().drop_back(), accETy);
-      inputSum = builder.create<tensor::CollapseShapeOp>(collapseTy, inputSum,
-                                                         reassociationMap);
+      inputSum = tensor::CollapseShapeOp::create(builder, collapseTy, inputSum,
+                                                 reassociationMap);
 
       // Apply the zero-point update based on the input sum.
       newConv = applyZeroPoint(builder, newConv, inputSum, fZp, {0, 1, 2});
@@ -229,9 +229,9 @@
     // Apply the final update that occurs when there are multiple zero-points.
     if (!iZpIsZero && !fZpIsZero) {
       Value count = multiplyDims(builder, filter, {0, 1, 2});
-      Value cast = builder.create<arith::IndexCastOp>(accETy, count);
-      Value ifZp = builder.create<arith::MulIOp>(iZp, fZp);
-      Value zpUpdate = builder.create<arith::MulIOp>(ifZp, cast);
+      Value cast = arith::IndexCastOp::create(builder, accETy, count);
+      Value ifZp = arith::MulIOp::create(builder, iZp, fZp);
+      Value zpUpdate = arith::MulIOp::create(builder, ifZp, cast);
 
       newConv = addScalar(builder, newConv, zpUpdate);
     }
@@ -308,7 +308,7 @@
       llvm::SmallVector<Value> kDyn;
       GetDynamicDym(builder, kDims, kDyn, filter, 0);
       GetDynamicDym(builder, kDims, kDyn, filter, 1);
-      Value poolInit = builder.create<tensor::EmptyOp>(kDims, accETy, kDyn);
+      Value poolInit = tensor::EmptyOp::create(builder, kDims, accETy, kDyn);
 
       Value inputSum =
           builder
@@ -324,10 +324,10 @@
     // Apply the final update that occurs when there are multiple zero-points.
     if (!iZpIsZero && !fZpIsZero) {
       Value count = multiplyDims(builder, filter, {0, 1});
-      Value cast = builder.create<arith::IndexCastOp>(accETy, count);
+      Value cast = arith::IndexCastOp::create(builder, accETy, count);
 
-      Value ifZp = builder.create<arith::MulIOp>(iZp, fZp);
-      Value zpUpdate = builder.create<arith::MulIOp>(ifZp, cast);
+      Value ifZp = arith::MulIOp::create(builder, iZp, fZp);
+      Value zpUpdate = arith::MulIOp::create(builder, ifZp, cast);
 
       newConv = addScalar(builder, newConv, zpUpdate);
     }
diff --git a/compiler/src/iree/compiler/GlobalOptimization/QuantizedMatmulToMatmul.cpp b/compiler/src/iree/compiler/GlobalOptimization/QuantizedMatmulToMatmul.cpp
index 16754dc..34c3bc9 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/QuantizedMatmulToMatmul.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/QuantizedMatmulToMatmul.cpp
@@ -78,8 +78,9 @@
     }
     // Create the result. No need to zero-fill it as we will overwrite it.
     ShapedType accType = llvm::cast<ShapedType>(acc.getType());
-    Value initResult = builder.create<tensor::EmptyOp>(
-        tensor::getMixedSizes(builder, loc, acc), accType.getElementType());
+    Value initResult = tensor::EmptyOp::create(
+        builder, tensor::getMixedSizes(builder, loc, acc),
+        accType.getElementType());
     // Create the indexing maps for the generic.
     MLIRContext *context = rewriter.getContext();
     AffineExpr b, m, n;
@@ -124,12 +125,13 @@
       indexOfLhsZpInput = addInput(lhsZp, mapToNone);
     }
     if (!lhsZpIsConstantZero && !rhsZpIsConstantZero) {
-      Value lhsZpTimesRhsZp = builder.create<arith::MulIOp>(lhsZp, rhsZp);
+      Value lhsZpTimesRhsZp = arith::MulIOp::create(builder, lhsZp, rhsZp);
 
-      Value kSize = rewriter.create<arith::IndexCastOp>(
-          loc, accElTy, builder.create<tensor::DimOp>(lhs, batch ? 2 : 1));
+      Value kSize = arith::IndexCastOp::create(
+          rewriter, loc, accElTy,
+          tensor::DimOp::create(builder, lhs, batch ? 2 : 1));
       Value lhsZpTimesRhsZpTimesKSize =
-          builder.create<arith::MulIOp>(lhsZpTimesRhsZp, kSize);
+          arith::MulIOp::create(builder, lhsZpTimesRhsZp, kSize);
       indexOfLhsZpTimesRhsZpTimesKSizeInput =
           addInput(lhsZpTimesRhsZpTimesKSize, mapToNone);
     }
@@ -153,22 +155,22 @@
           // times the sums along rows of lhs.
           if (!rhsZpIsConstantZero) {
             Value lhsSumsElTimesRhsZp =
-                b.create<arith::MulIOp>(loc, lhsSumsEl, rhsZp);
-            result = b.create<arith::SubIOp>(loc, result, lhsSumsElTimesRhsZp);
+                arith::MulIOp::create(b, loc, lhsSumsEl, rhsZp);
+            result = arith::SubIOp::create(b, loc, result, lhsSumsElTimesRhsZp);
           }
           // If the lhs zero-point is not a constant zero, we need to add it
           // times the sums along columns of rhs.
           if (!lhsZpIsConstantZero) {
             Value rhsSumsElTimesLhsZp =
-                b.create<arith::MulIOp>(loc, rhsSumsEl, lhsZp);
-            result = b.create<arith::SubIOp>(loc, result, rhsSumsElTimesLhsZp);
+                arith::MulIOp::create(b, loc, rhsSumsEl, lhsZp);
+            result = arith::SubIOp::create(b, loc, result, rhsSumsElTimesLhsZp);
           }
           // Add the final correction term, if neither zero-point is cst zero.
           if (!lhsZpIsConstantZero && !rhsZpIsConstantZero) {
-            result =
-                b.create<arith::AddIOp>(loc, result, lhsZpTimesRhsZpTimesKSize);
+            result = arith::AddIOp::create(b, loc, result,
+                                           lhsZpTimesRhsZpTimesKSize);
           }
-          b.create<linalg::YieldOp>(loc, result);
+          linalg::YieldOp::create(b, loc, result);
         });
 
     return success();
diff --git a/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp b/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp
index a17e9e6..6d3f284 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp
@@ -141,8 +141,8 @@
     }
   };
 
-  linalg::GenericOp newLinalgOp = rewriter.create<linalg::GenericOp>(
-      linalgOp.getLoc(), linalgOp.getResultTypes(), newInputs,
+  linalg::GenericOp newLinalgOp = linalg::GenericOp::create(
+      rewriter, linalgOp.getLoc(), linalgOp.getResultTypes(), newInputs,
       linalgOp.getOutputs(),
       ArrayAttr::get(linalgOp->getContext(), newIndexingMaps),
       linalgOp.getIteratorTypesAttr(), linalgOp.getDocAttr(),
@@ -198,8 +198,8 @@
       offsets.push_back(zero);
       // Get the dim size from the output tensor.
       if (ShapedType::isDynamic(outShape[currOutDim])) {
-        auto dim = rewriter.create<tensor::DimOp>(linalgOp.getLoc(), output,
-                                                  currOutDim);
+        auto dim = tensor::DimOp::create(rewriter, linalgOp.getLoc(), output,
+                                         currOutDim);
         sizes.push_back(dim.getResult());
       } else {
         sizes.push_back(rewriter.getI64IntegerAttr(outShape[currOutDim]));
@@ -220,8 +220,8 @@
   // will always be 1.
   SmallVector<OpFoldResult> strides(inputIndexingMap.getNumResults(), one);
 
-  return rewriter.create<tensor::ExtractSliceOp>(
-      linalgOp.getLoc(), outType, input, offsets, sizes, strides);
+  return tensor::ExtractSliceOp::create(rewriter, linalgOp.getLoc(), outType,
+                                        input, offsets, sizes, strides);
 }
 
 /// Matches a linalg.generic operation with a single input and init output
@@ -630,8 +630,8 @@
           {rewriter.getIndexAttr(dim), size, offset}));
     }
 
-    Value paddingValue = rewriter.create<arith::ConstantOp>(
-        constantDest.getLoc(), denseAttr.getElementType(),
+    Value paddingValue = arith::ConstantOp::create(
+        rewriter, constantDest.getLoc(), denseAttr.getElementType(),
         denseAttr.getSplatValue<TypedAttr>());
     rewriter.replaceOpWithNewOp<tensor::PadOp>(sliceOp, sliceOp.getResultType(),
                                                sliceOp.getSource(), lowPadding,
@@ -891,11 +891,11 @@
                                                          : sliceSize / 2);
   Type expandedType =
       RankedTensorType::get(targetShape, sourceType.getElementType());
-  Value expanded = rewriter.create<tensor::ExpandShapeOp>(loc, expandedType,
-                                                          source, reassoc);
+  Value expanded = tensor::ExpandShapeOp::create(rewriter, loc, expandedType,
+                                                 source, reassoc);
 
-  Value expandedOutTensor = rewriter.create<tensor::ExpandShapeOp>(
-      loc, expandedType, outTensor, reassoc);
+  Value expandedOutTensor = tensor::ExpandShapeOp::create(
+      rewriter, loc, expandedType, outTensor, reassoc);
 
   SmallVector<AffineMap> indexingMaps = {
       rewriter.getMultiDimIdentityMap(targetShape.size())};
@@ -905,29 +905,29 @@
   auto bodyBuilder = [&](OpBuilder &b, Location loc, ValueRange args) {
     SmallVector<Value> extractionIndices;
     for (size_t i = 0, e = targetShape.size(); i < e; ++i) {
-      extractionIndices.push_back(b.create<linalg::IndexOp>(loc, i));
+      extractionIndices.push_back(linalg::IndexOp::create(b, loc, i));
     }
 
     Value c1 =
-        rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(1));
+        arith::ConstantOp::create(rewriter, loc, rewriter.getIndexAttr(1));
 
     // Take the reverse of the second to last iterator. Because we statically
     // guaranteed it to be 2 it just becomes `1 - iters[-2]`.
-    Value reverseSplitIdx = rewriter.create<arith::SubIOp>(
-        loc, c1, extractionIndices[targetShape.size() - 2]);
+    Value reverseSplitIdx = arith::SubIOp::create(
+        rewriter, loc, c1, extractionIndices[targetShape.size() - 2]);
     extractionIndices[targetShape.size() - 2] = reverseSplitIdx;
 
     // Extract the value from input tensor and negate the top half of the result
     // slice (lower half of the input slice).
     Value inputVal =
-        b.create<tensor::ExtractOp>(loc, expanded, extractionIndices);
-    Value maybeNegate = b.create<arith::NegFOp>(loc, inputVal);
+        tensor::ExtractOp::create(b, loc, expanded, extractionIndices);
+    Value maybeNegate = arith::NegFOp::create(b, loc, inputVal);
 
-    Value isEqual = b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
-                                            reverseSplitIdx, c1);
+    Value isEqual = arith::CmpIOp::create(b, loc, arith::CmpIPredicate::eq,
+                                          reverseSplitIdx, c1);
     Value select =
-        rewriter.create<arith::SelectOp>(loc, isEqual, maybeNegate, inputVal);
-    b.create<linalg::YieldOp>(loc, select);
+        arith::SelectOp::create(rewriter, loc, isEqual, maybeNegate, inputVal);
+    linalg::YieldOp::create(b, loc, select);
   };
 
   Value result =
@@ -937,8 +937,8 @@
                                      indexingMaps, iteratorTypes, bodyBuilder)
           .getResult(0);
 
-  return rewriter.create<tensor::CollapseShapeOp>(loc, outTensor.getType(),
-                                                  result, reassoc);
+  return tensor::CollapseShapeOp::create(rewriter, loc, outTensor.getType(),
+                                         result, reassoc);
 }
 
 static Value rewriteCatNegateAndSlice(RewriterBase &rewriter,
@@ -954,9 +954,9 @@
                                       tensor::ConcatOp concatOp, Value source) {
   rewriter.setInsertionPoint(concatOp);
   Type elemType = cast<RankedTensorType>(source.getType()).getElementType();
-  Value outTensor = rewriter.create<tensor::EmptyOp>(
-      source.getLoc(), tensor::getMixedSizes(rewriter, source.getLoc(), source),
-      elemType);
+  Value outTensor = tensor::EmptyOp::create(
+      rewriter, source.getLoc(),
+      tensor::getMixedSizes(rewriter, source.getLoc(), source), elemType);
   return createCatNegateAndSlice(rewriter, outTensor, source);
 }
 
diff --git a/compiler/src/iree/compiler/GlobalOptimization/RemoveZeroExtentTensors.cpp b/compiler/src/iree/compiler/GlobalOptimization/RemoveZeroExtentTensors.cpp
index c4b93a5..01155c0 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/RemoveZeroExtentTensors.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/RemoveZeroExtentTensors.cpp
@@ -51,8 +51,8 @@
       Operation *owner = operand.getOwner();
       int operandNum = operand.getOperandNumber();
       auto shape = tensor::getMixedSizes(rewriter, loc, operand.get());
-      auto emptyTensorOp = rewriter.create<tensor::EmptyOp>(
-          loc, shape, operandType->getElementType());
+      auto emptyTensorOp = tensor::EmptyOp::create(
+          rewriter, loc, shape, operandType->getElementType());
       rewriter.modifyOpInPlace(
           owner, [&]() { owner->setOperand(operandNum, emptyTensorOp); });
       didUpdate = true;
diff --git a/compiler/src/iree/compiler/GlobalOptimization/Utils.cpp b/compiler/src/iree/compiler/GlobalOptimization/Utils.cpp
index 8d562de..8c5edac 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/Utils.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/Utils.cpp
@@ -87,11 +87,10 @@
   auto castedType = inputType.clone(elementType);
   SmallVector<OpFoldResult> inputMixedSizes =
       tensor::getMixedSizes(builder, loc, input);
-  Value init =
-      encoding
-          ? builder.create<tensor::EmptyOp>(loc, inputMixedSizes, elementType,
-                                            *encoding)
-          : builder.create<tensor::EmptyOp>(loc, inputMixedSizes, elementType);
+  Value init = encoding ? tensor::EmptyOp::create(builder, loc, inputMixedSizes,
+                                                  elementType, *encoding)
+                        : tensor::EmptyOp::create(builder, loc, inputMixedSizes,
+                                                  elementType);
   return builder
       .create<linalg::GenericOp>(
           loc, castedType, input, init, maps, iteratorTypes,
@@ -100,7 +99,7 @@
                 b.create(nestedLoc, castOp->getName().getIdentifier(), args[0],
                          elementType)
                     ->getResult(0);
-            b.create<linalg::YieldOp>(nestedLoc, castRes);
+            linalg::YieldOp::create(b, nestedLoc, castRes);
           },
           attrs)
       .getResult(0);
@@ -119,16 +118,16 @@
 
     staticSizes.push_back(ty.getDimSize(i));
     if (ty.isDynamicDim(i)) {
-      dynSizes.push_back(rewriter.create<tensor::DimOp>(val, i));
+      dynSizes.push_back(tensor::DimOp::create(rewriter, val, i));
     }
   }
 
   // Create a zero-filled accumulator.
   Value initAcc =
-      rewriter.create<tensor::EmptyOp>(staticSizes, accETy, dynSizes);
-  Value zeroInt = rewriter.create<arith::ConstantIntOp>(accETy, 0).getResult();
+      tensor::EmptyOp::create(rewriter, staticSizes, accETy, dynSizes);
+  Value zeroInt = arith::ConstantIntOp::create(rewriter, accETy, 0).getResult();
   Value zeroAcc =
-      rewriter.create<linalg::FillOp>(zeroInt, initAcc).getResult(0);
+      linalg::FillOp::create(rewriter, zeroInt, initAcc).getResult(0);
 
   SmallVector<AffineExpr> filterExprs(ty.getRank());
   SmallVector<AffineExpr> outputExprs;
@@ -161,9 +160,9 @@
           zeroAcc.getType(), ValueRange{val}, ValueRange{zeroAcc}, affineMaps,
           iterators,
           [=](OpBuilder &b, Location loc, ValueRange args) {
-            Value ext = b.create<arith::ExtSIOp>(loc, accETy, args[0]);
-            Value sum = b.create<arith::AddIOp>(loc, ext, args[1]);
-            b.create<linalg::YieldOp>(loc, sum);
+            Value ext = arith::ExtSIOp::create(b, loc, accETy, args[0]);
+            Value sum = arith::AddIOp::create(b, loc, ext, args[1]);
+            linalg::YieldOp::create(b, loc, sum);
           })
       .getResult(0);
 }
diff --git a/compiler/src/iree/compiler/InputConversion/Common/ConvertPrimitiveType.cpp b/compiler/src/iree/compiler/InputConversion/Common/ConvertPrimitiveType.cpp
index fd15bba..eb22472 100644
--- a/compiler/src/iree/compiler/InputConversion/Common/ConvertPrimitiveType.cpp
+++ b/compiler/src/iree/compiler/InputConversion/Common/ConvertPrimitiveType.cpp
@@ -46,11 +46,11 @@
     return nullptr;
 
   if (inputETy.getIntOrFloatBitWidth() > eTy.getIntOrFloatBitWidth()) {
-    return builder.create<arith::TruncFOp>(loc, type, inputs[0]);
+    return arith::TruncFOp::create(builder, loc, type, inputs[0]);
   }
 
   if (inputETy.getIntOrFloatBitWidth() < eTy.getIntOrFloatBitWidth()) {
-    return builder.create<arith::ExtFOp>(loc, type, inputs[0]);
+    return arith::ExtFOp::create(builder, loc, type, inputs[0]);
   }
 
   return nullptr;
@@ -68,15 +68,15 @@
   int64_t outBitwidth = eTy.getIntOrFloatBitWidth();
 
   if (inBitwidth > outBitwidth) {
-    return builder.create<arith::TruncIOp>(loc, type, inputs[0]);
+    return arith::TruncIOp::create(builder, loc, type, inputs[0]);
   }
 
   if (inBitwidth < outBitwidth && isUnsigned) {
-    return builder.create<arith::ExtUIOp>(loc, type, inputs[0]);
+    return arith::ExtUIOp::create(builder, loc, type, inputs[0]);
   }
 
   if (inBitwidth < outBitwidth && !isUnsigned) {
-    return builder.create<arith::ExtSIOp>(loc, type, inputs[0]);
+    return arith::ExtSIOp::create(builder, loc, type, inputs[0]);
   }
 
   return nullptr;
diff --git a/compiler/src/iree/compiler/InputConversion/Common/ImportMLProgram.cpp b/compiler/src/iree/compiler/InputConversion/Common/ImportMLProgram.cpp
index b7350dc..0f324fd 100644
--- a/compiler/src/iree/compiler/InputConversion/Common/ImportMLProgram.cpp
+++ b/compiler/src/iree/compiler/InputConversion/Common/ImportMLProgram.cpp
@@ -175,11 +175,11 @@
       FunctionType funcType =
           rewriter.getFunctionType(/*input=*/TypeRange{}, /*outputs=*/newType);
       ImplicitLocOpBuilder b(globalOp.getLoc(), rewriter);
-      auto funcOp = b.create<IREE::Util::FuncOp>(getterName, funcType);
+      auto funcOp = IREE::Util::FuncOp::create(b, getterName, funcType);
       funcOp.setPublic();
       b.setInsertionPointToStart(funcOp.addEntryBlock());
       auto val = globalOp.createLoadOp(globalOp.getLoc(), b);
-      b.create<IREE::Util::ReturnOp>(val.getLoadedGlobalValue());
+      IREE::Util::ReturnOp::create(b, val.getLoadedGlobalValue());
     }
 
     if (!setterName.empty() && isMutable) {
@@ -187,11 +187,11 @@
       FunctionType funcType =
           rewriter.getFunctionType(/*input=*/newType, /*outputs=*/TypeRange{});
       ImplicitLocOpBuilder b(globalOp.getLoc(), rewriter);
-      auto funcOp = b.create<IREE::Util::FuncOp>(setterName, funcType);
+      auto funcOp = IREE::Util::FuncOp::create(b, setterName, funcType);
       funcOp.setPublic();
       b.setInsertionPointToStart(funcOp.addEntryBlock());
       globalOp.createStoreOp(globalOp.getLoc(), funcOp.getArgument(0), b);
-      b.create<IREE::Util::ReturnOp>();
+      IREE::Util::ReturnOp::create(b);
     }
 
     return success();
@@ -215,19 +215,18 @@
           IREE::Util::VariantType::get(context))},
       /*outputs=*/{});
   auto funcOp =
-      b.create<IREE::Util::FuncOp>("ireeMlProgramGlobalsInit", funcType);
+      IREE::Util::FuncOp::create(b, "ireeMlProgramGlobalsInit", funcType);
   funcOp.setPublic();
   b.setInsertionPointToStart(funcOp.addEntryBlock());
 
   for (auto it : llvm::enumerate(externGlobals)) {
-    auto val = b.create<IREE::Util::ListGetOp>(
-        it.value().newType, funcOp.getArgument(0),
-        b.create<arith::ConstantIndexOp>(it.index()));
-    b.create<IREE::Util::GlobalStoreOp>(val, it.value().name);
+    auto val = IREE::Util::ListGetOp::create(
+        b, it.value().newType, funcOp.getArgument(0),
+        arith::ConstantIndexOp::create(b, it.index()));
+    IREE::Util::GlobalStoreOp::create(b, val, it.value().name);
   }
 
-  b.create<IREE::Util::ReturnOp>();
-
+  IREE::Util::ReturnOp::create(b);
   return success();
 }