Upgrade Preprocessing and Modules to free create functions. NFC. (#21877)

The builder create methods are deprecated:
https://mlir.llvm.org/deprecation/. See
https://discourse.llvm.org/t/psa-opty-create-now-with-100-more-tab-complete/87339.

The main benefit of free functions is better tab completion with
LSP/IDE.

I'm splitting the upgrade in chunks going by project directories.
diff --git a/compiler/src/iree/compiler/Modules/Check/Conversion/ConversionPatterns.cpp b/compiler/src/iree/compiler/Modules/Check/Conversion/ConversionPatterns.cpp
index d9db0b3..ae76182 100644
--- a/compiler/src/iree/compiler/Modules/Check/Conversion/ConversionPatterns.cpp
+++ b/compiler/src/iree/compiler/Modules/Check/Conversion/ConversionPatterns.cpp
@@ -26,21 +26,21 @@
   LogicalResult
   matchAndRewrite(T op, typename T::Adaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    auto hasImport = rewriter.create<IREE::VM::ImportResolvedOp>(
-        op.getLoc(), rewriter.getI32Type(), this->importOp.getName());
+    auto hasImport = IREE::VM::ImportResolvedOp::create(
+        rewriter, op.getLoc(), rewriter.getI32Type(), this->importOp.getName());
     auto *followingBlock = rewriter.splitBlock(rewriter.getInsertionBlock(),
                                                rewriter.getInsertionPoint());
     auto *callBlock = rewriter.createBlock(followingBlock);
     rewriter.setInsertionPointAfter(hasImport);
-    rewriter.create<IREE::VM::CondBranchOp>(op.getLoc(), hasImport, callBlock,
-                                            followingBlock);
+    IREE::VM::CondBranchOp::create(rewriter, op.getLoc(), hasImport, callBlock,
+                                   followingBlock);
     rewriter.setInsertionPointToStart(callBlock);
     auto results = rewriteToCall(op, adaptor, this->importOp,
                                  *this->getTypeConverter(), rewriter);
     if (!results.has_value())
       return failure();
     rewriter.replaceOp(op, results.value());
-    rewriter.create<IREE::VM::BranchOp>(op.getLoc(), followingBlock);
+    IREE::VM::BranchOp::create(rewriter, op.getLoc(), followingBlock);
     return success();
   }
 };
diff --git a/compiler/src/iree/compiler/Modules/Check/IR/CheckOps.cpp b/compiler/src/iree/compiler/Modules/Check/IR/CheckOps.cpp
index 72bbbf7..9ee1193 100644
--- a/compiler/src/iree/compiler/Modules/Check/IR/CheckOps.cpp
+++ b/compiler/src/iree/compiler/Modules/Check/IR/CheckOps.cpp
@@ -19,7 +19,7 @@
   using OpRewritePattern::OpRewritePattern;
   LogicalResult matchAndRewrite(ExpectEqConstOp op,
                                 PatternRewriter &rewriter) const override {
-    auto rhs = rewriter.create<arith::ConstantOp>(op.getLoc(), op.getValue());
+    auto rhs = arith::ConstantOp::create(rewriter, op.getLoc(), op.getValue());
     rewriter.replaceOpWithNewOp<ExpectEqOp>(op, op.getDevice(), op.getLhs(),
                                             rhs);
     return success();
@@ -32,7 +32,7 @@
   using OpRewritePattern::OpRewritePattern;
   LogicalResult matchAndRewrite(ExpectAlmostEqConstOp op,
                                 PatternRewriter &rewriter) const override {
-    auto rhs = rewriter.create<arith::ConstantOp>(op.getLoc(), op.getValue());
+    auto rhs = arith::ConstantOp::create(rewriter, op.getLoc(), op.getValue());
     rewriter.replaceOpWithNewOp<ExpectAlmostEqOp>(
         op, op.getDevice(), op.getLhs(), rhs, op.getAtolAttr(),
         op.getRtolAttr());
diff --git a/compiler/src/iree/compiler/Modules/HAL/Inline/Conversion/HALToHALInline/Patterns.cpp b/compiler/src/iree/compiler/Modules/HAL/Inline/Conversion/HALToHALInline/Patterns.cpp
index 8f6e8ae..eaddf5a 100644
--- a/compiler/src/iree/compiler/Modules/HAL/Inline/Conversion/HALToHALInline/Patterns.cpp
+++ b/compiler/src/iree/compiler/Modules/HAL/Inline/Conversion/HALToHALInline/Patterns.cpp
@@ -110,8 +110,8 @@
     Value storageBuffer =
         rewriter.createOrFold<IREE::HAL::Inline::BufferStorageOp>(
             op.getLoc(), adaptor.getSourceBuffer());
-    Value storageSize = rewriter.create<IREE::HAL::Inline::BufferLengthOp>(
-        op.getLoc(), adaptor.getSourceBuffer());
+    Value storageSize = IREE::HAL::Inline::BufferLengthOp::create(
+        rewriter, op.getLoc(), adaptor.getSourceBuffer());
     auto loadType = getTypeConverter()->convertType(op.getResult().getType());
     auto elementSize =
         rewriter.createOrFold<IREE::Util::SizeOfOp>(op.getLoc(), loadType);
@@ -131,8 +131,8 @@
     Value storageBuffer =
         rewriter.createOrFold<IREE::HAL::Inline::BufferStorageOp>(
             op.getLoc(), adaptor.getTargetBuffer());
-    Value storageSize = rewriter.create<IREE::HAL::Inline::BufferLengthOp>(
-        op.getLoc(), adaptor.getTargetBuffer());
+    Value storageSize = IREE::HAL::Inline::BufferLengthOp::create(
+        rewriter, op.getLoc(), adaptor.getTargetBuffer());
     auto elementSize = rewriter.createOrFold<IREE::Util::SizeOfOp>(
         op.getLoc(), adaptor.getValue().getType());
     rewriter.replaceOpWithNewOp<IREE::Util::BufferStoreOp>(
diff --git a/compiler/src/iree/compiler/Modules/HAL/Inline/Conversion/StreamToHALInline/Patterns.cpp b/compiler/src/iree/compiler/Modules/HAL/Inline/Conversion/StreamToHALInline/Patterns.cpp
index 9406ece..079072d 100644
--- a/compiler/src/iree/compiler/Modules/HAL/Inline/Conversion/StreamToHALInline/Patterns.cpp
+++ b/compiler/src/iree/compiler/Modules/HAL/Inline/Conversion/StreamToHALInline/Patterns.cpp
@@ -66,11 +66,11 @@
 
     // For now we don't have this information and assume something conservative.
     Value minAlignment =
-        rewriter.create<arith::ConstantIndexOp>(allocOp.getLoc(), 64);
+        arith::ConstantIndexOp::create(rewriter, allocOp.getLoc(), 64);
 
-    auto allocateOp = rewriter.create<IREE::HAL::Inline::BufferAllocateOp>(
-        allocOp.getLoc(), deviceBufferType, hostBufferType, minAlignment,
-        adaptor.getStorageSize());
+    auto allocateOp = IREE::HAL::Inline::BufferAllocateOp::create(
+        rewriter, allocOp.getLoc(), deviceBufferType, hostBufferType,
+        minAlignment, adaptor.getStorageSize());
     rewriter.replaceOp(allocOp, allocateOp.getResult());
 
     return success();
@@ -88,13 +88,13 @@
 
     // For now we don't have this information and assume something conservative.
     Value minAlignment =
-        rewriter.create<arith::ConstantIndexOp>(allocaOp.getLoc(), 64);
-    auto allocateOp = rewriter.create<IREE::HAL::Inline::BufferAllocateOp>(
-        allocaOp.getLoc(), deviceBufferType, hostBufferType, minAlignment,
-        adaptor.getStorageSize());
+        arith::ConstantIndexOp::create(rewriter, allocaOp.getLoc(), 64);
+    auto allocateOp = IREE::HAL::Inline::BufferAllocateOp::create(
+        rewriter, allocaOp.getLoc(), deviceBufferType, hostBufferType,
+        minAlignment, adaptor.getStorageSize());
 
     auto resolvedTimepoint =
-        rewriter.create<arith::ConstantIntOp>(allocaOp.getLoc(), 0, 64)
+        arith::ConstantIntOp::create(rewriter, allocaOp.getLoc(), 0, 64)
             .getResult();
 
     rewriter.replaceOp(allocaOp, {allocateOp.getResult(), resolvedTimepoint});
@@ -111,7 +111,7 @@
                   ConversionPatternRewriter &rewriter) const override {
     // TODO(benvanik): discard op?
     auto resolvedTimepoint =
-        rewriter.create<arith::ConstantIntOp>(deallocaOp.getLoc(), 0, 64)
+        arith::ConstantIntOp::create(rewriter, deallocaOp.getLoc(), 0, 64)
             .getResult();
     rewriter.replaceOp(deallocaOp, {resolvedTimepoint});
     return success();
@@ -177,12 +177,12 @@
   LogicalResult
   matchAndRewrite(IREE::Stream::ResourceTryMapOp tryMapOp, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    Value subspan = rewriter.create<IREE::Util::BufferSubspanOp>(
-        tryMapOp.getLoc(), adaptor.getSource(),
+    Value subspan = IREE::Util::BufferSubspanOp::create(
+        rewriter, tryMapOp.getLoc(), adaptor.getSource(),
         getResourceSize(tryMapOp.getLoc(), adaptor.getSource(), rewriter),
         adaptor.getSourceOffset(), adaptor.getResultSize());
     Value didMap =
-        rewriter.create<arith::ConstantIntOp>(tryMapOp.getLoc(), 1, 1);
+        arith::ConstantIntOp::create(rewriter, tryMapOp.getLoc(), 1, 1);
     rewriter.replaceOp(tryMapOp, {didMap, subspan});
     return success();
   }
@@ -267,17 +267,17 @@
   LogicalResult
   matchAndRewrite(IREE::Stream::FileReadOp readOp, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    Value sourceSize = rewriter.create<IREE::Util::BufferSizeOp>(
-        readOp.getLoc(), adaptor.getSource());
-    rewriter.create<IREE::Util::BufferCopyOp>(
-        readOp.getLoc(), adaptor.getSource(), sourceSize,
+    Value sourceSize = IREE::Util::BufferSizeOp::create(
+        rewriter, readOp.getLoc(), adaptor.getSource());
+    IREE::Util::BufferCopyOp::create(
+        rewriter, readOp.getLoc(), adaptor.getSource(), sourceSize,
         rewriter.createOrFold<arith::IndexCastOp>(readOp.getLoc(),
                                                   rewriter.getIndexType(),
                                                   adaptor.getSourceOffset()),
         adaptor.getTarget(), adaptor.getTargetSize(), adaptor.getTargetOffset(),
         adaptor.getLength());
     auto resolvedTimepoint =
-        rewriter.create<arith::ConstantIntOp>(readOp.getLoc(), 0, 64)
+        arith::ConstantIntOp::create(rewriter, readOp.getLoc(), 0, 64)
             .getResult();
     rewriter.replaceOp(readOp, resolvedTimepoint);
     return success();
@@ -290,17 +290,18 @@
   LogicalResult
   matchAndRewrite(IREE::Stream::FileWriteOp writeOp, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    Value targetSize = rewriter.create<IREE::Util::BufferSizeOp>(
-        writeOp.getLoc(), adaptor.getTarget());
-    rewriter.create<IREE::Util::BufferCopyOp>(
-        writeOp.getLoc(), adaptor.getSource(), adaptor.getSourceSize(),
-        adaptor.getSourceOffset(), adaptor.getTarget(), targetSize,
+    Value targetSize = IREE::Util::BufferSizeOp::create(
+        rewriter, writeOp.getLoc(), adaptor.getTarget());
+    IREE::Util::BufferCopyOp::create(
+        rewriter, writeOp.getLoc(), adaptor.getSource(),
+        adaptor.getSourceSize(), adaptor.getSourceOffset(), adaptor.getTarget(),
+        targetSize,
         rewriter.createOrFold<arith::IndexCastOp>(writeOp.getLoc(),
                                                   rewriter.getIndexType(),
                                                   adaptor.getTargetOffset()),
         adaptor.getLength());
     auto resolvedTimepoint =
-        rewriter.create<arith::ConstantIntOp>(writeOp.getLoc(), 0, 64)
+        arith::ConstantIntOp::create(rewriter, writeOp.getLoc(), 0, 64)
             .getResult();
     rewriter.replaceOp(writeOp, resolvedTimepoint);
     return success();
@@ -376,10 +377,10 @@
 
     // NOTE: we should have verified supported encodings/types at entry into the
     // HAL pipeline.
-    auto encodingType = rewriter.create<IREE::HAL::EncodingTypeOp>(
-        loc, tensorType.getEncoding());
-    auto elementType = rewriter.create<IREE::HAL::ElementTypeOp>(
-        loc, tensorType.getElementType());
+    auto encodingType = IREE::HAL::EncodingTypeOp::create(
+        rewriter, loc, tensorType.getEncoding());
+    auto elementType = IREE::HAL::ElementTypeOp::create(
+        rewriter, loc, tensorType.getElementType());
 
     // Flatten static + dynamic shape dimensions.
     SmallVector<Value> dims;
@@ -388,14 +389,14 @@
       if (tensorType.isDynamicDim(idx)) {
         dims.push_back(dynamicDims[dynamicIdx++]);
       } else {
-        dims.push_back(rewriter.create<arith::ConstantIndexOp>(
-            loc, tensorType.getDimSize(idx)));
+        dims.push_back(arith::ConstantIndexOp::create(
+            rewriter, loc, tensorType.getDimSize(idx)));
       }
     }
 
     rewriter.replaceOpWithNewOp<IREE::HAL::Inline::BufferViewCreateOp>(
         exportOp, adaptor.getSource(),
-        rewriter.create<arith::ConstantIndexOp>(loc, 0),
+        arith::ConstantIndexOp::create(rewriter, loc, 0),
         adaptor.getSourceSize(), elementType, encodingType, dims);
     return success();
   }
@@ -409,22 +410,23 @@
                   ConversionPatternRewriter &rewriter) const override {
     auto bufferType = rewriter.getType<IREE::HAL::BufferType>();
     auto bufferViewType = rewriter.getType<IREE::HAL::BufferViewType>();
-    auto zero = rewriter.create<arith::ConstantIndexOp>(traceOp.getLoc(), 0);
+    auto zero = arith::ConstantIndexOp::create(rewriter, traceOp.getLoc(), 0);
     auto resourceEncodingDims = adaptor.getResourceEncodingDims();
     SmallVector<Value> bufferViews;
     for (auto [resource, resourceSize, resourceEncoding] : llvm::zip_equal(
              adaptor.getResources(), adaptor.getResourceSizes(),
              adaptor.getResourceEncodings().getAsRange<TypeAttr>())) {
-      Value resourceBuffer = rewriter.create<IREE::HAL::Inline::BufferWrapOp>(
-          traceOp.getLoc(), bufferType, resource,
+      Value resourceBuffer = IREE::HAL::Inline::BufferWrapOp::create(
+          rewriter, traceOp.getLoc(), bufferType, resource,
           /*offset=*/
           zero,
           /*length=*/resourceSize);
       int64_t dynamicDimCount =
           cast<ShapedType>(resourceEncoding.getValue()).getNumDynamicDims();
-      bufferViews.push_back(rewriter.create<IREE::Stream::TensorExportOp>(
-          traceOp.getLoc(), bufferViewType, resourceBuffer, resourceEncoding,
-          resourceEncodingDims.take_front(dynamicDimCount), resourceSize,
+      bufferViews.push_back(IREE::Stream::TensorExportOp::create(
+          rewriter, traceOp.getLoc(), bufferViewType, resourceBuffer,
+          resourceEncoding, resourceEncodingDims.take_front(dynamicDimCount),
+          resourceSize,
           /*affinity=*/IREE::Stream::AffinityAttr{}));
       resourceEncodingDims = resourceEncodingDims.drop_front(dynamicDimCount);
     }
@@ -620,7 +622,7 @@
                                adaptor.getResourceOperands());
     // Immediately resolve the timepoint.
     auto resolvedTimepoint =
-        rewriter.create<arith::ConstantIntOp>(executeOp.getLoc(), 0, 64)
+        arith::ConstantIntOp::create(rewriter, executeOp.getLoc(), 0, 64)
             .getResult();
     rewriter.replaceOp(executeOp, resolvedTimepoint);
     return success();
@@ -740,8 +742,8 @@
                   ConversionPatternRewriter &rewriter) const override {
     rewriter.replaceOp(barrierOp, {
                                       adaptor.getResource(),
-                                      rewriter.create<arith::ConstantIntOp>(
-                                          barrierOp.getLoc(), 0, 64),
+                                      arith::ConstantIntOp::create(
+                                          rewriter, barrierOp.getLoc(), 0, 64),
                                   });
     return success();
   }
diff --git a/compiler/src/iree/compiler/Modules/HAL/Inline/Transforms/InlineExecutables.cpp b/compiler/src/iree/compiler/Modules/HAL/Inline/Transforms/InlineExecutables.cpp
index a4f60ef..437a1c4 100644
--- a/compiler/src/iree/compiler/Modules/HAL/Inline/Transforms/InlineExecutables.cpp
+++ b/compiler/src/iree/compiler/Modules/HAL/Inline/Transforms/InlineExecutables.cpp
@@ -235,8 +235,8 @@
       newArgTypes.push_back(indexType);
       auto oldArg = entryBlock->getArgument(argOffset++);
       auto newArg = entryBlock->addArgument(indexType, oldArg.getLoc());
-      oldArg.replaceAllUsesWith(entryBuilder.create<arith::IndexCastOp>(
-          oldArg.getLoc(), i32Type, newArg));
+      oldArg.replaceAllUsesWith(arith::IndexCastOp::create(
+          entryBuilder, oldArg.getLoc(), i32Type, newArg));
     }
 
     // Erase the original args.
@@ -355,13 +355,13 @@
     for (unsigned i = 0; i < workloadArgCount; ++i) {
       workload.push_back(dispatchFuncOp.getArgument(argOffset++));
     }
-    Value device = builder.create<IREE::Util::NullOp>(
-        loc, builder.getType<IREE::HAL::DeviceType>());
+    Value device = IREE::Util::NullOp::create(
+        builder, loc, builder.getType<IREE::HAL::DeviceType>());
     std::array<Value, 3> workgroupCount =
         exportOp.calculateWorkgroupCount(loc, device, workload, builder);
 
     // For now we don't handle local memory.
-    Value localMemory = builder.create<IREE::Util::NullOp>(loc, bufferType);
+    Value localMemory = IREE::Util::NullOp::create(builder, loc, bufferType);
     workgroupArgs.push_back(localMemory);
 
     // Pass all constants through.
@@ -378,9 +378,10 @@
       auto bindingLength = dispatchFuncOp.getArgument(argOffset + bindingCount +
                                                       bindingCount + i);
       Value bufferSize =
-          builder.create<IREE::Util::BufferSizeOp>(loc, bindingBuffer);
-      Value bindingView = builder.create<IREE::Util::BufferSubspanOp>(
-          loc, bindingBuffer, bufferSize, bindingOffset, bindingLength);
+          IREE::Util::BufferSizeOp::create(builder, loc, bindingBuffer);
+      Value bindingView = IREE::Util::BufferSubspanOp::create(
+          builder, loc, bindingBuffer, bufferSize, bindingOffset,
+          bindingLength);
       workgroupArgs.push_back(bindingView);
     }
 
@@ -390,33 +391,34 @@
     llvm::append_range(workgroupArgs, workgroupCount); // workgroup_count_xyz
 
     // Z -> Y -> Z loop nest.
-    builder.create<scf::ForOp>(
-        loc, indexSet.get(0), workgroupCount[2], indexSet.get(1), ValueRange{},
+    scf::ForOp::create(
+        builder, loc, indexSet.get(0), workgroupCount[2], indexSet.get(1),
+        ValueRange{},
         [&](OpBuilder &forZBuilder, Location loc, Value iz, ValueRange iters) {
           workgroupArgs[workgroupXYZOffset + 2] = iz;
-          forZBuilder.create<scf::ForOp>(
-              loc, indexSet.get(0), workgroupCount[1], indexSet.get(1),
-              ValueRange{},
+          scf::ForOp::create(
+              forZBuilder, loc, indexSet.get(0), workgroupCount[1],
+              indexSet.get(1), ValueRange{},
               [&](OpBuilder &forYBuilder, Location loc, Value iy,
                   ValueRange iters) {
                 workgroupArgs[workgroupXYZOffset + 1] = iy;
-                forYBuilder.create<scf::ForOp>(
-                    loc, indexSet.get(0), workgroupCount[0], indexSet.get(1),
-                    ValueRange{},
+                scf::ForOp::create(
+                    forYBuilder, loc, indexSet.get(0), workgroupCount[0],
+                    indexSet.get(1), ValueRange{},
                     [&](OpBuilder &forXBuilder, Location loc, Value ix,
                         ValueRange iters) {
                       workgroupArgs[workgroupXYZOffset + 0] = ix;
-                      forXBuilder.create<func::CallOp>(
-                          loc, bodyFuncOp.getNameAttr(),
+                      func::CallOp::create(
+                          forXBuilder, loc, bodyFuncOp.getNameAttr(),
                           bodyFuncOp.getResultTypes(), workgroupArgs);
-                      forXBuilder.create<scf::YieldOp>(loc);
+                      scf::YieldOp::create(forXBuilder, loc);
                     });
-                forYBuilder.create<scf::YieldOp>(loc);
+                scf::YieldOp::create(forYBuilder, loc);
               });
-          forZBuilder.create<scf::YieldOp>(loc);
+          scf::YieldOp::create(forZBuilder, loc);
         });
 
-    builder.create<IREE::Util::ReturnOp>(loc);
+    IREE::Util::ReturnOp::create(builder, loc);
   }
 };
 
diff --git a/compiler/src/iree/compiler/Modules/HAL/Loader/Conversion/HALLoaderToVM/Patterns.cpp b/compiler/src/iree/compiler/Modules/HAL/Loader/Conversion/HALLoaderToVM/Patterns.cpp
index 687830d..31f4008 100644
--- a/compiler/src/iree/compiler/Modules/HAL/Loader/Conversion/HALLoaderToVM/Patterns.cpp
+++ b/compiler/src/iree/compiler/Modules/HAL/Loader/Conversion/HALLoaderToVM/Patterns.cpp
@@ -44,8 +44,8 @@
   matchAndRewrite(IREE::HAL::Loader::ExecutableLoadOp loadOp, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     // Get format string as a rodata blob.
-    auto executableFormatStr = rewriter.create<IREE::VM::RodataInlineOp>(
-        loadOp.getLoc(), loadOp.getFormatAttr());
+    auto executableFormatStr = IREE::VM::RodataInlineOp::create(
+        rewriter, loadOp.getLoc(), loadOp.getFormatAttr());
 
     // Pack constants, if any.
     auto constantBuffer = createPackedConstantBuffer(
diff --git a/compiler/src/iree/compiler/Modules/HAL/Loader/Conversion/StreamToHALLoader/Patterns.cpp b/compiler/src/iree/compiler/Modules/HAL/Loader/Conversion/StreamToHALLoader/Patterns.cpp
index dec7815..f62a54b 100644
--- a/compiler/src/iree/compiler/Modules/HAL/Loader/Conversion/StreamToHALLoader/Patterns.cpp
+++ b/compiler/src/iree/compiler/Modules/HAL/Loader/Conversion/StreamToHALLoader/Patterns.cpp
@@ -71,8 +71,8 @@
     }
 
     // Lookup executable reference.
-    auto lookupOp = rewriter.create<IREE::HAL::Loader::ExecutableLookupOp>(
-        loc, rewriter.getType<IREE::HAL::ExecutableType>(),
+    auto lookupOp = IREE::HAL::Loader::ExecutableLookupOp::create(
+        rewriter, loc, rewriter.getType<IREE::HAL::ExecutableType>(),
         executableOp.getName());
 
     // TODO(benvanik): use scf.index_switch as with the full HAL.
@@ -128,15 +128,14 @@
         SymbolRefAttr::get(builder.getContext(), executableOp.getName(),
                            {SymbolRefAttr::get(exportOp->getParentOp()),
                             SymbolRefAttr::get(exportOp)});
-    Value ordinal =
-        builder.create<IREE::HAL::Loader::ExecutableExportOrdinalOp>(
-            loc, builder.getIndexType(), entryPointAttr);
+    Value ordinal = IREE::HAL::Loader::ExecutableExportOrdinalOp::create(
+        builder, loc, builder.getIndexType(), entryPointAttr);
 
     // Dispatch with a target-specific workgroup count.
     auto workgroupCount = exportOp.calculateWorkgroupCount(
         loc, /*device=*/nullptr, adaptor.getWorkload(), builder);
-    builder.create<IREE::HAL::Loader::ExecutableDispatchOp>(
-        loc, executable, ordinal, workgroupCount[0], workgroupCount[1],
+    IREE::HAL::Loader::ExecutableDispatchOp::create(
+        builder, loc, executable, ordinal, workgroupCount[0], workgroupCount[1],
         workgroupCount[2], pushConstants, bindingBuffers, bindingOffsets,
         bindingLengths);
   }
diff --git a/compiler/src/iree/compiler/Modules/HAL/Loader/Transforms/MaterializeExecutables.cpp b/compiler/src/iree/compiler/Modules/HAL/Loader/Transforms/MaterializeExecutables.cpp
index 1003fcf..0975996 100644
--- a/compiler/src/iree/compiler/Modules/HAL/Loader/Transforms/MaterializeExecutables.cpp
+++ b/compiler/src/iree/compiler/Modules/HAL/Loader/Transforms/MaterializeExecutables.cpp
@@ -34,12 +34,13 @@
   // This matches the executable name and is used to directly access the
   // executable reference during dispatches.
   auto executableType = moduleBuilder.getType<IREE::HAL::ExecutableType>();
-  auto globalOp = moduleBuilder.create<IREE::Util::GlobalOp>(
-      loc, executableOp.getName(), /*isMutable=*/false, executableType);
+  auto globalOp =
+      IREE::Util::GlobalOp::create(moduleBuilder, loc, executableOp.getName(),
+                                   /*isMutable=*/false, executableType);
   globalOp.setPrivate();
 
   // Create initializer that selects the right binary and loads it.
-  auto initializerOp = moduleBuilder.create<IREE::Util::InitializerOp>(loc);
+  auto initializerOp = IREE::Util::InitializerOp::create(moduleBuilder, loc);
   auto entryBuilder = OpBuilder::atBlockBegin(initializerOp.addEntryBlock());
 
   // Reserve one block per attempt to load a binary.
@@ -56,13 +57,14 @@
   auto *failBlock = initializerOp.addBlock();
   {
     auto failBuilder = OpBuilder::atBlockBegin(failBlock);
-    Value status = failBuilder.create<arith::ConstantIntOp>(
-        loc, static_cast<int>(IREE::Util::StatusCode::Unavailable), 32);
-    failBuilder.create<IREE::Util::StatusCheckOkOp>(
-        loc, status,
+    Value status = arith::ConstantIntOp::create(
+        failBuilder, loc, static_cast<int>(IREE::Util::StatusCode::Unavailable),
+        32);
+    IREE::Util::StatusCheckOkOp::create(
+        failBuilder, loc, status,
         "none of the executable binaries in the module are supported by the "
         "runtime");
-    failBuilder.create<IREE::Util::ReturnOp>(loc);
+    IREE::Util::ReturnOp::create(failBuilder, loc);
   }
 
   // Exit block takes the loaded executable and stores it.
@@ -71,14 +73,14 @@
     auto exitBuilder = OpBuilder::atBlockBegin(exitBlock);
     auto executableArg = exitBlock->addArgument(executableType, loc);
     globalOp.createStoreOp(loc, executableArg, exitBuilder);
-    exitBuilder.create<IREE::Util::ReturnOp>(loc);
+    IREE::Util::ReturnOp::create(exitBuilder, loc);
   }
 
   // Start with the first try.
   if (!queryBlocks.empty()) {
-    entryBuilder.create<cf::BranchOp>(loc, queryBlocks[0]);
+    cf::BranchOp::create(entryBuilder, loc, queryBlocks[0]);
   } else {
-    entryBuilder.create<cf::BranchOp>(loc, failBlock);
+    cf::BranchOp::create(entryBuilder, loc, failBlock);
   }
 
   // Build the full chain of try ops. An scf.switch would be nice...
@@ -102,27 +104,27 @@
     // it is. Otherwise we go to the next query block or fail if at the end.
     auto queryBuilder = OpBuilder::atBlockBegin(queryBlocks[i]);
     auto *nextBlock = i + 1 < binaryOps.size() ? queryBlocks[i + 1] : failBlock;
-    Value isSupported =
-        queryBuilder.create<IREE::HAL::Loader::ExecutableQuerySupportOp>(
-            binaryLoc, queryBuilder.getI1Type(), binaryOp.getFormatAttr());
-    queryBuilder.create<cf::CondBranchOp>(binaryLoc, isSupported, loadBlocks[i],
-                                          ValueRange{}, nextBlock,
-                                          ValueRange{});
+    Value isSupported = IREE::HAL::Loader::ExecutableQuerySupportOp::create(
+        queryBuilder, binaryLoc, queryBuilder.getI1Type(),
+        binaryOp.getFormatAttr());
+    cf::CondBranchOp::create(queryBuilder, binaryLoc, isSupported,
+                             loadBlocks[i], ValueRange{}, nextBlock,
+                             ValueRange{});
 
     // Load the executable. This may still fail but it'll propagate the error
     // up to the user with the full status message instead of continuing
     // execution.
     auto loadBuilder = OpBuilder::atBlockBegin(loadBlocks[i]);
     auto alignmentAttr = loadBuilder.getIndexAttr(64);
-    Value binaryData = loadBuilder.create<IREE::Util::BufferConstantOp>(
-        binaryLoc, binaryOp.getNameAttr(), binaryOp.getData(), alignmentAttr,
-        binaryOp.getMimeTypeAttr());
+    Value binaryData = IREE::Util::BufferConstantOp::create(
+        loadBuilder, binaryLoc, binaryOp.getNameAttr(), binaryOp.getData(),
+        alignmentAttr, binaryOp.getMimeTypeAttr());
     SmallVector<Value> constants; // TBD
-    Value executable = loadBuilder.create<IREE::HAL::Loader::ExecutableLoadOp>(
-        binaryLoc, executableType, binaryOp.getFormatAttr(), binaryData,
-        constants);
-    loadBuilder.create<cf::BranchOp>(binaryLoc, exitBlock,
-                                     ValueRange{executable});
+    Value executable = IREE::HAL::Loader::ExecutableLoadOp::create(
+        loadBuilder, binaryLoc, executableType, binaryOp.getFormatAttr(),
+        binaryData, constants);
+    cf::BranchOp::create(loadBuilder, binaryLoc, exitBlock,
+                         ValueRange{executable});
   }
 
   // Stash for faster lookup when replacing using.
diff --git a/compiler/src/iree/compiler/Modules/IO/Parameters/Conversion/ParamsToVM/Patterns.cpp b/compiler/src/iree/compiler/Modules/IO/Parameters/Conversion/ParamsToVM/Patterns.cpp
index 7910685..f8380cb 100644
--- a/compiler/src/iree/compiler/Modules/IO/Parameters/Conversion/ParamsToVM/Patterns.cpp
+++ b/compiler/src/iree/compiler/Modules/IO/Parameters/Conversion/ParamsToVM/Patterns.cpp
@@ -27,16 +27,17 @@
 static Value getStringRodata(Location loc, StringAttr attr,
                              OpBuilder &builder) {
   if (!attr) {
-    return builder.create<IREE::VM::ConstRefZeroOp>(
-        loc, IREE::VM::RefType::get(builder.getType<IREE::VM::BufferType>()));
+    return IREE::VM::ConstRefZeroOp::create(
+        builder, loc,
+        IREE::VM::RefType::get(builder.getType<IREE::VM::BufferType>()));
   }
-  return builder.create<IREE::VM::RodataInlineOp>(loc, attr);
+  return IREE::VM::RodataInlineOp::create(builder, loc, attr);
 }
 
 static std::pair<Value, Value> buildKeyTable(Location loc, ArrayAttr keysAttr,
                                              OpBuilder &builder) {
-  auto tableOp = builder.create<IREE::VM::RodataTableInlineOp>(
-      loc, builder.getIntegerType(32), keysAttr);
+  auto tableOp = IREE::VM::RodataTableInlineOp::create(
+      builder, loc, builder.getIntegerType(32), keysAttr);
   return {tableOp.getTableResult(), tableOp.getDataResult()};
 }
 
@@ -64,8 +65,9 @@
     recordValue(bufferOffset);
     recordValue(bufferLength);
   }
-  Value rodataBuffer = builder.create<IREE::VM::RodataInlineOp>(
-      loc, IREE::VM::RefType::get(builder.getType<IREE::VM::BufferType>()),
+  Value rodataBuffer = IREE::VM::RodataInlineOp::create(
+      builder, loc,
+      IREE::VM::RefType::get(builder.getType<IREE::VM::BufferType>()),
       builder.getI64VectorAttr(values));
   if (dynamicUpdates.empty()) {
     // Fast-path for all constant data.
@@ -73,18 +75,19 @@
   }
 
   // Clone the rodata so we can mutate it.
-  Value rodataSize = builder.create<IREE::VM::BufferLengthOp>(
-      loc, builder.getI64Type(), rodataBuffer);
-  Value clonedBuffer = builder.create<IREE::VM::BufferCloneOp>(
-      loc, IREE::VM::RefType::get(builder.getType<IREE::VM::BufferType>()),
-      rodataBuffer, builder.create<IREE::VM::ConstI32ZeroOp>(loc), rodataSize,
-      builder.create<IREE::VM::ConstI32Op>(loc, sizeof(uint32_t)));
+  Value rodataSize = IREE::VM::BufferLengthOp::create(
+      builder, loc, builder.getI64Type(), rodataBuffer);
+  Value clonedBuffer = IREE::VM::BufferCloneOp::create(
+      builder, loc,
+      IREE::VM::RefType::get(builder.getType<IREE::VM::BufferType>()),
+      rodataBuffer, IREE::VM::ConstI32ZeroOp::create(builder, loc), rodataSize,
+      IREE::VM::ConstI32Op::create(builder, loc, sizeof(uint32_t)));
 
   // Perform all updates.
   for (auto [index, value] : dynamicUpdates) {
-    builder.create<IREE::VM::BufferStoreI64Op>(
-        loc, clonedBuffer, builder.create<IREE::VM::ConstI64Op>(loc, index),
-        value);
+    IREE::VM::BufferStoreI64Op::create(
+        builder, loc, clonedBuffer,
+        IREE::VM::ConstI64Op::create(builder, loc, index), value);
   }
 
   return clonedBuffer;
@@ -105,15 +108,15 @@
         buildKeyTable(loadOp.getLoc(), adaptor.getSourceKeysAttr(), rewriter);
     SmallVector<Value> targetOffsets(
         adaptor.getSourceOffsets().size(),
-        rewriter.create<IREE::VM::ConstI64Op>(loadOp.getLoc(), 0));
+        IREE::VM::ConstI64Op::create(rewriter, loadOp.getLoc(), 0));
     auto spans =
         buildIndirectSpans(loadOp.getLoc(), adaptor.getSourceOffsets(),
                            targetOffsets, adaptor.getLengths(), rewriter);
     auto bufferType =
         IREE::VM::RefType::get(rewriter.getType<IREE::HAL::BufferType>());
     auto listType = IREE::VM::RefType::get(IREE::VM::ListType::get(bufferType));
-    auto callOp = rewriter.create<IREE::VM::CallOp>(
-        loadOp.getLoc(), importOp.getSymNameAttr(),
+    auto callOp = IREE::VM::CallOp::create(
+        rewriter, loadOp.getLoc(), importOp.getSymNameAttr(),
         TypeRange{
             listType,
         },
@@ -137,9 +140,9 @@
     SmallVector<Value> buffers;
     buffers.reserve(targetOffsets.size());
     for (size_t i = 0; i < targetOffsets.size(); ++i) {
-      buffers.push_back(rewriter.create<IREE::VM::ListGetRefOp>(
-          loadOp.getLoc(), bufferType, callOp.getResult(0),
-          rewriter.create<IREE::VM::ConstI32Op>(loadOp.getLoc(), (int32_t)i)));
+      buffers.push_back(IREE::VM::ListGetRefOp::create(
+          rewriter, loadOp.getLoc(), bufferType, callOp.getResult(0),
+          IREE::VM::ConstI32Op::create(rewriter, loadOp.getLoc(), (int32_t)i)));
     }
     rewriter.replaceOp(loadOp, buffers);
     return success();
diff --git a/compiler/src/iree/compiler/Modules/IO/Parameters/Conversion/StreamToParams/Patterns.cpp b/compiler/src/iree/compiler/Modules/IO/Parameters/Conversion/StreamToParams/Patterns.cpp
index 7dd0642..6e2ee00 100644
--- a/compiler/src/iree/compiler/Modules/IO/Parameters/Conversion/StreamToParams/Patterns.cpp
+++ b/compiler/src/iree/compiler/Modules/IO/Parameters/Conversion/StreamToParams/Patterns.cpp
@@ -34,11 +34,10 @@
     auto resourceType =
         cast<IREE::Stream::ResourceType>(loadOp.getResults().front().getType());
 
-    auto resolveOp =
-        rewriter.create<IREE::HAL::AllocatorResolveMemoryPropertiesOp>(
-            loc, rewriter.getI32Type(), rewriter.getI32Type(),
-            IREE::Stream::AffinityAttr::lookupOrDefault(loadOp),
-            static_cast<IREE::HAL::Lifetime>(resourceType.getLifetime()));
+    auto resolveOp = IREE::HAL::AllocatorResolveMemoryPropertiesOp::create(
+        rewriter, loc, rewriter.getI32Type(), rewriter.getI32Type(),
+        IREE::Stream::AffinityAttr::lookupOrDefault(loadOp),
+        static_cast<IREE::HAL::Lifetime>(resourceType.getLifetime()));
 
     auto [device, queueAffinity] =
         lookupDeviceAndQueueAffinityFor(loadOp, resolveOp.getMemoryTypes(),
@@ -53,9 +52,9 @@
     // Queue operation, which acts like an allocation.
     SmallVector<Type> newResultTypes(loadOp.getResults().size(),
                                      rewriter.getType<IREE::HAL::BufferType>());
-    auto newOp = rewriter.create<IREE::IO::Parameters::LoadOp>(
-        loc, newResultTypes, device, queueAffinity, waitFence, signalFence,
-        adaptor.getSourceScopeAttr(), adaptor.getSourceKeysAttr(),
+    auto newOp = IREE::IO::Parameters::LoadOp::create(
+        rewriter, loc, newResultTypes, device, queueAffinity, waitFence,
+        signalFence, adaptor.getSourceScopeAttr(), adaptor.getSourceKeysAttr(),
         adaptor.getSourceOffsets(), resolveOp.getMemoryTypes(),
         resolveOp.getBufferUsage(), adaptor.getResultSizes());
 
@@ -84,8 +83,8 @@
         loc, device, readOp.getResultTimepoint(), rewriter);
 
     // Queue operation (a read is just a gather with a single span).
-    rewriter.create<IREE::IO::Parameters::GatherOp>(
-        loc, device, queueAffinity, waitFence, signalFence,
+    IREE::IO::Parameters::GatherOp::create(
+        rewriter, loc, device, queueAffinity, waitFence, signalFence,
         adaptor.getSourceScopeAttr(),
         rewriter.getArrayAttr(adaptor.getSourceKeyAttr()),
         ValueRange{adaptor.getSourceOffset()}, adaptor.getTarget(),
@@ -114,9 +113,9 @@
         loc, device, writeOp.getResultTimepoint(), rewriter);
 
     // Queue operation (a write is just a scatter with a single span).
-    rewriter.create<IREE::IO::Parameters::ScatterOp>(
-        loc, device, queueAffinity, waitFence, signalFence, adaptor.getSource(),
-        ValueRange{adaptor.getSourceOffset()},
+    IREE::IO::Parameters::ScatterOp::create(
+        rewriter, loc, device, queueAffinity, waitFence, signalFence,
+        adaptor.getSource(), ValueRange{adaptor.getSourceOffset()},
         ValueRange{adaptor.getSourceLength()}, adaptor.getTargetScopeAttr(),
         rewriter.getArrayAttr(adaptor.getTargetKeyAttr()),
         ValueRange{adaptor.getTargetOffset()});
@@ -143,8 +142,8 @@
         loc, device, gatherOp.getResultTimepoint(), rewriter);
 
     // Queue operation.
-    rewriter.create<IREE::IO::Parameters::GatherOp>(
-        loc, device, queueAffinity, waitFence, signalFence,
+    IREE::IO::Parameters::GatherOp::create(
+        rewriter, loc, device, queueAffinity, waitFence, signalFence,
         adaptor.getSourceScopeAttr(), adaptor.getSourceKeysAttr(),
         adaptor.getSourceOffsets(), adaptor.getTarget(),
         adaptor.getTargetOffsets(), adaptor.getTargetLengths());
@@ -171,11 +170,11 @@
         loc, device, scatterOp.getResultTimepoint(), rewriter);
 
     // Queue operation.
-    rewriter.create<IREE::IO::Parameters::ScatterOp>(
-        loc, device, queueAffinity, waitFence, signalFence, adaptor.getSource(),
-        adaptor.getSourceOffsets(), adaptor.getSourceLengths(),
-        adaptor.getTargetScopeAttr(), adaptor.getTargetKeysAttr(),
-        adaptor.getTargetOffsets());
+    IREE::IO::Parameters::ScatterOp::create(
+        rewriter, loc, device, queueAffinity, waitFence, signalFence,
+        adaptor.getSource(), adaptor.getSourceOffsets(),
+        adaptor.getSourceLengths(), adaptor.getTargetScopeAttr(),
+        adaptor.getTargetKeysAttr(), adaptor.getTargetOffsets());
 
     rewriter.replaceOp(scatterOp, {signalFence});
     return success();
diff --git a/compiler/src/iree/compiler/Preprocessing/Common/ApplyPDLPatterns.cpp b/compiler/src/iree/compiler/Preprocessing/Common/ApplyPDLPatterns.cpp
index 2c45038..627e1a1 100644
--- a/compiler/src/iree/compiler/Preprocessing/Common/ApplyPDLPatterns.cpp
+++ b/compiler/src/iree/compiler/Preprocessing/Common/ApplyPDLPatterns.cpp
@@ -109,10 +109,10 @@
                              RankedTensorType tensorType, Value value,
                              Value bindingOffset, ValueRange dynamicDims) {
   auto memrefType = getMemRefTypeFor(rewriter.getContext(), tensorType);
-  Value memrefVal = rewriter.create<IREE::Stream::BindingSubspanOp>(
-      loc, memrefType, value, bindingOffset, dynamicDims);
+  Value memrefVal = IREE::Stream::BindingSubspanOp::create(
+      rewriter, loc, memrefType, value, bindingOffset, dynamicDims);
   auto extractMetadataOp =
-      rewriter.create<IREE::Codegen::ExtractStridedMetadataOp>(loc, memrefVal);
+      IREE::Codegen::ExtractStridedMetadataOp::create(rewriter, loc, memrefVal);
   return std::make_pair<Value, Value>(extractMetadataOp.getResult(0),
                                       extractMetadataOp.getResult(1));
 }
@@ -165,7 +165,7 @@
   auto entryPointFnType = FunctionType::get(context, entryPointInputTypes,
                                             /*results=*/TypeRange{});
   auto entryPointFn =
-      rewriter.create<func::FuncOp>(loc, entryPointFnName, entryPointFnType);
+      func::FuncOp::create(rewriter, loc, entryPointFnName, entryPointFnType);
   Region &body = entryPointFn.getBody();
   SmallVector<Location> locs(entryPointInputTypes.size(), loc);
   rewriter.createBlock(&body, body.begin(), entryPointInputTypes, locs);
@@ -175,7 +175,7 @@
   auto scalarArgs = entryPointArgs.slice(numTensorOperands, numScalarOperands);
   auto dynamicDimArgs = entryPointArgs.take_back(totalNumDynamicDims);
   SmallVector<Value> callOperands;
-  Value zero = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+  Value zero = arith::ConstantIndexOp::create(rewriter, loc, 0);
 
   // Method to marshal tensor types into call operands.
   auto marshalTensorTypes = [&](RankedTensorType tensorType) {
@@ -204,8 +204,8 @@
   });
   llvm::for_each(otherOperandTypes, marshalInputTypes);
 
-  rewriter.create<func::CallOp>(loc, externalFn, callOperands);
-  rewriter.create<func::ReturnOp>(loc, /*operands=*/ValueRange{});
+  func::CallOp::create(rewriter, loc, externalFn, callOperands);
+  func::ReturnOp::create(rewriter, loc, /*operands=*/ValueRange{});
   return entryPointFn;
 }
 
@@ -263,13 +263,13 @@
       externalFnName.str(), inputTypes, resultTypes, otherOperandTypes);
   std::string executableOpName = uniqueExternalFnName + "_executable";
   auto executableOp =
-      rewriter.create<IREE::Stream::ExecutableOp>(loc, executableOpName);
+      IREE::Stream::ExecutableOp::create(rewriter, loc, executableOpName);
   executableOp.setPrivate();
   Block &executableOpBody = executableOp.getBlock();
   rewriter.setInsertionPointToStart(&executableOpBody);
 
   // Create the dispatch inner module.
-  auto innerModule = rewriter.create<ModuleOp>(loc);
+  auto innerModule = ModuleOp::create(rewriter, loc);
   Block *moduleBody = innerModule.getBody();
   rewriter.setInsertionPointToStart(moduleBody);
 
@@ -278,7 +278,7 @@
   FunctionType externalFnCallType = getExternalFunctionCallType(
       context, loc, inputTypes, resultTypes, otherOperandTypes);
   func::FuncOp externalFnCall =
-      rewriter.create<func::FuncOp>(loc, externalFnName, externalFnCallType);
+      func::FuncOp::create(rewriter, loc, externalFnName, externalFnCallType);
   externalFnCall.setPrivate();
   externalFnCall->setAttr("llvm.bareptr", rewriter.getBoolArrayAttr(true));
 
@@ -290,8 +290,9 @@
 
   // Create the export operation.
   rewriter.setInsertionPoint(innerModule);
-  auto exportOp = rewriter.create<IREE::Stream::ExecutableExportOp>(
-      loc, entryPointName, FlatSymbolRefAttr::get(context, entryPointName));
+  auto exportOp = IREE::Stream::ExecutableExportOp::create(
+      rewriter, loc, entryPointName,
+      FlatSymbolRefAttr::get(context, entryPointName));
 
   // Create the body of the export operation.
   // TODO(MaheshRavishankar): This represents the number of workgroups to use.
@@ -301,8 +302,8 @@
   Block *exportOpBody =
       rewriter.createBlock(&exportOpRegion, exportOpRegion.begin());
   rewriter.setInsertionPointToStart(exportOpBody);
-  Value one = rewriter.create<arith::ConstantIndexOp>(loc, 1);
-  rewriter.create<IREE::Stream::ReturnOp>(loc, ValueRange{one, one, one});
+  Value one = arith::ConstantIndexOp::create(rewriter, loc, 1);
+  IREE::Stream::ReturnOp::create(rewriter, loc, ValueRange{one, one, one});
   return SymbolRefAttr::get(rewriter.getStringAttr(executableOpName),
                             SymbolRefAttr::get(entryFn));
 }
@@ -326,7 +327,7 @@
       if (ShapedType::isStatic(shape))
         continue;
 
-      Value dim = rewriter.create<tensor::DimOp>(loc, operand, index);
+      Value dim = tensor::DimOp::create(rewriter, loc, operand, index);
       operandDynamicDims.push_back(dim);
     }
   }
@@ -336,8 +337,8 @@
   operandsVec.append(resultDynamicDims.begin(), resultDynamicDims.end());
 
   // Insert the `flow.dispatch`.
-  auto dispatchOp = rewriter.create<IREE::Flow::DispatchOp>(
-      loc, exportOp,
+  auto dispatchOp = IREE::Flow::DispatchOp::create(
+      rewriter, loc, exportOp,
       /*workload=*/ValueRange{}, resultTypes, resultDynamicDims, operandsVec,
       operandDynamicDims, /*tiedOperands=*/nullptr);
   return dispatchOp;
diff --git a/compiler/src/iree/compiler/Preprocessing/Common/ConvertConv2DToImg2Col.cpp b/compiler/src/iree/compiler/Preprocessing/Common/ConvertConv2DToImg2Col.cpp
index 8f7fd83..19cfc6a 100644
--- a/compiler/src/iree/compiler/Preprocessing/Common/ConvertConv2DToImg2Col.cpp
+++ b/compiler/src/iree/compiler/Preprocessing/Common/ConvertConv2DToImg2Col.cpp
@@ -31,15 +31,15 @@
 static Value createAdd(Location loc, Value x, Value y, bool isInt,
                        OpBuilder &builder) {
   if (isInt)
-    return builder.create<arith::AddIOp>(loc, x, y);
-  return builder.create<arith::AddFOp>(loc, x, y);
+    return arith::AddIOp::create(builder, loc, x, y);
+  return arith::AddFOp::create(builder, loc, x, y);
 }
 
 static Value createMul(Location loc, Value x, Value y, bool isInt,
                        OpBuilder &builder) {
   if (isInt)
-    return builder.create<arith::MulIOp>(loc, x, y);
-  return builder.create<arith::MulFOp>(loc, x, y);
+    return arith::MulIOp::create(builder, loc, x, y);
+  return arith::MulFOp::create(builder, loc, x, y);
 }
 
 namespace {
@@ -122,8 +122,8 @@
 
     SmallVector<int64_t> colTensorShape = {n, oh, ow, fh, fw, ic};
 
-    Value colTensor = rewriter.create<tensor::EmptyOp>(
-        loc, colTensorShape, inputType.getElementType());
+    Value colTensor = tensor::EmptyOp::create(rewriter, loc, colTensorShape,
+                                              inputType.getElementType());
 
     AffineExpr nDim, ohDim, owDim, khDim, kwDim, icDim;
     bindDims(getContext(), nDim, ohDim, owDim, khDim, kwDim, icDim);
@@ -146,12 +146,12 @@
         AffineMap::get(nloops, 0, inputExprs, rewriter.getContext()),
         AffineMap::getMultiDimIdentityMap(nloops, rewriter.getContext())};
 
-    auto img2ColTensor = rewriter.create<linalg::GenericOp>(
-        loc, colTensor.getType(),
+    auto img2ColTensor = linalg::GenericOp::create(
+        rewriter, loc, colTensor.getType(),
         /*inputs=*/input, /*outputs=*/colTensor, img2colIndexingMaps,
         img2colIterators,
         [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
-          nestedBuilder.create<linalg::YieldOp>(nestedLoc, args[0]);
+          linalg::YieldOp::create(nestedBuilder, nestedLoc, args[0]);
         });
 
     SmallVector<ReassociationIndices> img2ColTensorReassocIndices;
@@ -179,20 +179,20 @@
     auto reshapedFilterType =
         RankedTensorType::get({fh * fw * ic, oc}, inputType.getElementType());
 
-    Value reshapedImg2ColTensor = rewriter.create<tensor::CollapseShapeOp>(
-        loc, reshapedImg2ColTensorType, img2ColTensor.getResult(0),
+    Value reshapedImg2ColTensor = tensor::CollapseShapeOp::create(
+        rewriter, loc, reshapedImg2ColTensorType, img2ColTensor.getResult(0),
         img2ColTensorReassocIndices);
 
-    Value reshapedFilter = rewriter.create<tensor::CollapseShapeOp>(
-        loc, reshapedFilterType, filter, filterReassocIndices);
+    Value reshapedFilter = tensor::CollapseShapeOp::create(
+        rewriter, loc, reshapedFilterType, filter, filterReassocIndices);
 
-    Value reshapedOutput = rewriter.create<tensor::CollapseShapeOp>(
-        loc, reshapedOutputType, output, outputReassocIndices);
+    Value reshapedOutput = tensor::CollapseShapeOp::create(
+        rewriter, loc, reshapedOutputType, output, outputReassocIndices);
 
     Value result;
     if (n == 1) {
-      auto matmulOp = rewriter.create<linalg::MatmulOp>(
-          loc, reshapedOutputType,
+      auto matmulOp = linalg::MatmulOp::create(
+          rewriter, loc, reshapedOutputType,
           ArrayRef<Value>{reshapedImg2ColTensor, reshapedFilter},
           ArrayRef<Value>{reshapedOutput});
       result = matmulOp.getResults().front();
@@ -210,21 +210,21 @@
       SmallVector<utils::IteratorType> genericIterators = {parallel, parallel,
                                                            parallel, reduction};
       bool isInt = llvm::isa<IntegerType>(outputType.getElementType());
-      auto genericOp = rewriter.create<linalg::GenericOp>(
-          loc, reshapedOutputType,
+      auto genericOp = linalg::GenericOp::create(
+          rewriter, loc, reshapedOutputType,
           /*inputs=*/ValueRange{reshapedImg2ColTensor, reshapedFilter},
           /*outputs=*/ValueRange{reshapedOutput},
           ArrayRef<AffineMap>{lhsMap, rhsMap, resultMap}, genericIterators,
           [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
             Value mul = createMul(loc, args[0], args[1], isInt, nestedBuilder);
             Value add = createAdd(loc, mul, args[2], isInt, nestedBuilder);
-            nestedBuilder.create<linalg::YieldOp>(nestedLoc, add);
+            linalg::YieldOp::create(nestedBuilder, nestedLoc, add);
           });
       result = genericOp.getResults().front();
     }
 
-    auto reshapedResult = rewriter.create<tensor::ExpandShapeOp>(
-        loc, outputType, result, outputReassocIndices);
+    auto reshapedResult = tensor::ExpandShapeOp::create(
+        rewriter, loc, outputType, result, outputReassocIndices);
 
     rewriter.replaceOp(convOp, ArrayRef<Value>{reshapedResult});
 
@@ -279,8 +279,8 @@
       SmallVector<int64_t> targetShape = llvm::map_to_vector(
           indices, [&](int64_t index) -> int64_t { return inputShape[index]; });
 
-      Value outputTensor = rewriter.create<tensor::EmptyOp>(
-          loc, targetShape, operandTensorType.getElementType());
+      Value outputTensor = tensor::EmptyOp::create(
+          rewriter, loc, targetShape, operandTensorType.getElementType());
 
       SmallVector<utils::IteratorType> loopAttributeTypes(
           nloops, utils::IteratorType::parallel);
@@ -290,12 +290,12 @@
               AffineMap::get(nloops, 0, exprs, rewriter.getContext())),
           AffineMap::getMultiDimIdentityMap(nloops, rewriter.getContext())};
 
-      auto transposedOp = rewriter.create<linalg::GenericOp>(
-          loc, outputTensor.getType(),
+      auto transposedOp = linalg::GenericOp::create(
+          rewriter, loc, outputTensor.getType(),
           /*inputs=*/operand, /*outputs=*/outputTensor, indexingMaps,
           loopAttributeTypes,
           [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
-            nestedBuilder.create<linalg::YieldOp>(nestedLoc, args[0]);
+            linalg::YieldOp::create(nestedBuilder, nestedLoc, args[0]);
           });
 
       return transposedOp.getResult(0);
@@ -342,15 +342,15 @@
         AffineMap::get(nloops, 0, inputExprs, rewriter.getContext()),
         AffineMap::getMultiDimIdentityMap(nloops, rewriter.getContext())};
 
-    Value colTensor = rewriter.create<tensor::EmptyOp>(
-        loc, colTensorShape, inputType.getElementType());
+    Value colTensor = tensor::EmptyOp::create(rewriter, loc, colTensorShape,
+                                              inputType.getElementType());
 
-    auto img2ColTensor = rewriter.create<linalg::GenericOp>(
-        loc, colTensor.getType(),
+    auto img2ColTensor = linalg::GenericOp::create(
+        rewriter, loc, colTensor.getType(),
         /*inputs=*/inputT, /*outputs=*/colTensor, indexingMaps,
         loopAttributeTypes,
         [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
-          nestedBuilder.create<linalg::YieldOp>(nestedLoc, args[0]);
+          linalg::YieldOp::create(nestedBuilder, nestedLoc, args[0]);
         });
 
     SmallVector<ReassociationIndices> img2ColTensorReassocIndices = {
@@ -366,26 +366,27 @@
     auto reshapedOutputTensorType =
         RankedTensorType::get({n * c, oh * ow}, outputType.getElementType());
 
-    Value reshapedImg2ColTensor = rewriter.create<tensor::CollapseShapeOp>(
-        loc, reshapedImg2ColTensorType, img2ColTensor.getResult(0),
+    Value reshapedImg2ColTensor = tensor::CollapseShapeOp::create(
+        rewriter, loc, reshapedImg2ColTensorType, img2ColTensor.getResult(0),
         img2ColTensorReassocIndices);
-    Value reshapedFilterTensor = rewriter.create<tensor::CollapseShapeOp>(
-        loc, reshapedFilterTensorType, filterT, filterReassociationIndice);
-    Value reshapedoutputTensor = rewriter.create<tensor::CollapseShapeOp>(
-        loc, reshapedOutputTensorType, transposedOutputTensor,
+    Value reshapedFilterTensor =
+        tensor::CollapseShapeOp::create(rewriter, loc, reshapedFilterTensorType,
+                                        filterT, filterReassociationIndice);
+    Value reshapedoutputTensor = tensor::CollapseShapeOp::create(
+        rewriter, loc, reshapedOutputTensorType, transposedOutputTensor,
         outputReassociationIndice);
 
-    auto batchMatVecResult = rewriter.create<linalg::BatchMatvecOp>(
-        loc, TypeRange{reshapedoutputTensor.getType()},
+    auto batchMatVecResult = linalg::BatchMatvecOp::create(
+        rewriter, loc, TypeRange{reshapedoutputTensor.getType()},
         ValueRange{reshapedImg2ColTensor, reshapedFilterTensor},
         ValueRange{reshapedoutputTensor});
 
     SmallVector<ReassociationIndices> batchMatVecReassociationIndice = {{0, 1},
                                                                         {2, 3}};
 
-    Value batchMatVecResultReshaped = rewriter.create<tensor::ExpandShapeOp>(
-        loc, transposedOutputTensor.getType(), batchMatVecResult.getResult(0),
-        batchMatVecReassociationIndice);
+    Value batchMatVecResultReshaped = tensor::ExpandShapeOp::create(
+        rewriter, loc, transposedOutputTensor.getType(),
+        batchMatVecResult.getResult(0), batchMatVecReassociationIndice);
 
     auto transposedResult =
         transposeOperand(batchMatVecResultReshaped, {0, 2, 3, 1});
@@ -443,8 +444,8 @@
 
     SmallVector<int64_t> colTensorShape = {n, ic, fh, fw, oh, ow};
 
-    Value colTensor = rewriter.create<tensor::EmptyOp>(
-        loc, colTensorShape, inputType.getElementType());
+    Value colTensor = tensor::EmptyOp::create(rewriter, loc, colTensorShape,
+                                              inputType.getElementType());
 
     AffineExpr nDim, icDim, khDim, kwDim, ohDim, owDim;
     bindDims(getContext(), nDim, icDim, khDim, kwDim, ohDim, owDim);
@@ -467,19 +468,19 @@
         AffineMap::get(nloops, 0, inputExprs, rewriter.getContext()),
         AffineMap::getMultiDimIdentityMap(nloops, rewriter.getContext())};
 
-    auto img2ColTensor = rewriter.create<linalg::GenericOp>(
-        loc, colTensor.getType(),
+    auto img2ColTensor = linalg::GenericOp::create(
+        rewriter, loc, colTensor.getType(),
         /*inputs=*/input, /*outputs=*/colTensor, img2colIndexingMaps,
         img2colIterators,
         [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
-          nestedBuilder.create<linalg::YieldOp>(nestedLoc, args[0]);
+          linalg::YieldOp::create(nestedBuilder, nestedLoc, args[0]);
         });
 
     SmallVector<ReassociationIndices> filterReassocIndices = {{0}, {1, 2, 3}};
     auto reshapedFilterType =
         RankedTensorType::get({oc, fh * fw * ic}, inputType.getElementType());
-    Value reshapedFilter = rewriter.create<tensor::CollapseShapeOp>(
-        loc, reshapedFilterType, filter, filterReassocIndices);
+    Value reshapedFilter = tensor::CollapseShapeOp::create(
+        rewriter, loc, reshapedFilterType, filter, filterReassocIndices);
 
     SmallVector<ReassociationIndices> img2ColTensorReassocIndices;
     SmallVector<ReassociationIndices> outputReassocIndices;
@@ -502,17 +503,17 @@
           RankedTensorType::get({n, oc, oh * ow}, outputType.getElementType());
     }
 
-    Value reshapedImg2ColTensor = rewriter.create<tensor::CollapseShapeOp>(
-        loc, reshapedImg2ColTensorType, img2ColTensor.getResult(0),
+    Value reshapedImg2ColTensor = tensor::CollapseShapeOp::create(
+        rewriter, loc, reshapedImg2ColTensorType, img2ColTensor.getResult(0),
         img2ColTensorReassocIndices);
 
-    Value reshapedOutput = rewriter.create<tensor::CollapseShapeOp>(
-        loc, reshapedOutputType, output, outputReassocIndices);
+    Value reshapedOutput = tensor::CollapseShapeOp::create(
+        rewriter, loc, reshapedOutputType, output, outputReassocIndices);
 
     Value result;
     if (n == 1) {
-      auto matmulOp = rewriter.create<linalg::MatmulOp>(
-          loc, reshapedOutputType,
+      auto matmulOp = linalg::MatmulOp::create(
+          rewriter, loc, reshapedOutputType,
           ArrayRef<Value>{reshapedFilter, reshapedImg2ColTensor},
           ArrayRef<Value>{reshapedOutput});
       result = matmulOp.getResults().front();
@@ -530,21 +531,21 @@
       SmallVector<utils::IteratorType> genericIterators = {parallel, parallel,
                                                            parallel, reduction};
       bool isInt = llvm::isa<IntegerType>(outputType.getElementType());
-      auto genericOp = rewriter.create<linalg::GenericOp>(
-          loc, reshapedOutputType,
+      auto genericOp = linalg::GenericOp::create(
+          rewriter, loc, reshapedOutputType,
           /*inputs=*/ValueRange{reshapedFilter, reshapedImg2ColTensor},
           /*outputs=*/ValueRange{reshapedOutput},
           ArrayRef<AffineMap>{lhsMap, rhsMap, resultMap}, genericIterators,
           [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
             Value mul = createMul(loc, args[0], args[1], isInt, nestedBuilder);
             Value add = createAdd(loc, mul, args[2], isInt, nestedBuilder);
-            nestedBuilder.create<linalg::YieldOp>(nestedLoc, add);
+            linalg::YieldOp::create(nestedBuilder, nestedLoc, add);
           });
       result = genericOp.getResults().front();
     }
 
-    auto reshapedResult = rewriter.create<tensor::ExpandShapeOp>(
-        loc, outputType, result, outputReassocIndices);
+    auto reshapedResult = tensor::ExpandShapeOp::create(
+        rewriter, loc, outputType, result, outputReassocIndices);
 
     rewriter.replaceOp(convOp, ArrayRef<Value>{reshapedResult});
 
diff --git a/compiler/src/iree/compiler/Preprocessing/Common/ConvertConvFilterToChannelsLast.cpp b/compiler/src/iree/compiler/Preprocessing/Common/ConvertConvFilterToChannelsLast.cpp
index 30f0f2a..cabe4a9 100644
--- a/compiler/src/iree/compiler/Preprocessing/Common/ConvertConvFilterToChannelsLast.cpp
+++ b/compiler/src/iree/compiler/Preprocessing/Common/ConvertConvFilterToChannelsLast.cpp
@@ -45,9 +45,9 @@
   applyPermutationToVector(dimSizes, perm);
 
   auto tensorType = cast<RankedTensorType>(tensor.getType());
-  auto emptyTensor = rewriter.create<tensor::EmptyOp>(
-      loc, dimSizes, tensorType.getElementType());
-  return rewriter.create<linalg::TransposeOp>(loc, tensor, emptyTensor, perm)
+  auto emptyTensor = tensor::EmptyOp::create(rewriter, loc, dimSizes,
+                                             tensorType.getElementType());
+  return linalg::TransposeOp::create(rewriter, loc, tensor, emptyTensor, perm)
       .getResult()[0];
 }
 
@@ -70,9 +70,10 @@
 
   SmallVector<utils::IteratorType> iterators = convOp.getIteratorTypesArray();
 
-  auto genericOp = rewriter.create<linalg::GenericOp>(
-      loc, output.getType(), ValueRange{input, transposedFilter}, output,
-      ArrayRef<AffineMap>{inputMap, transposedFilterMap, outputMap}, iterators);
+  auto genericOp = linalg::GenericOp::create(
+      rewriter, loc, output.getType(), ValueRange{input, transposedFilter},
+      output, ArrayRef<AffineMap>{inputMap, transposedFilterMap, outputMap},
+      iterators);
 
   // Reuse the same payload as the original convolution op.
   rewriter.inlineRegionBefore(convOp->getRegion(0), genericOp.getRegion(),
diff --git a/compiler/src/iree/compiler/Preprocessing/Common/ConvertConvToChannelsLast.cpp b/compiler/src/iree/compiler/Preprocessing/Common/ConvertConvToChannelsLast.cpp
index 125da40..2cbc995 100644
--- a/compiler/src/iree/compiler/Preprocessing/Common/ConvertConvToChannelsLast.cpp
+++ b/compiler/src/iree/compiler/Preprocessing/Common/ConvertConvToChannelsLast.cpp
@@ -66,8 +66,8 @@
   }
   SmallVector<utils::IteratorType> iterators = srcConv.getIteratorTypesArray();
   iterators.append(newIteratorTypes);
-  auto genericConv = b.create<linalg::GenericOp>(
-      loc, output.getType(), ValueRange{input, filter}, output,
+  auto genericConv = linalg::GenericOp::create(
+      b, loc, output.getType(), ValueRange{input, filter}, output,
       ArrayRef<AffineMap>{newInputMap, newFilterMap, newOutputMap}, iterators);
   IRMapping mapper;
   srcConv->getRegion(0).cloneInto(&genericConv.getRegion(), mapper);
@@ -200,7 +200,7 @@
     for (auto [index, i] : llvm::enumerate(targetIndices)) {
       if (ShapedType::isDynamic(inputShape[i])) {
         transposedTileSizes[index] =
-            rewriter.create<tensor::DimOp>(loc, input, i).getResult();
+            tensor::DimOp::create(rewriter, loc, input, i).getResult();
       } else {
         transposedTileSizes[index] = rewriter.getIndexAttr(inputShape[i]);
       }
@@ -211,8 +211,8 @@
   auto empty = linalg::PackOp::createDestinationTensor(
       rewriter, loc, input, transposedTileSizes, targetIndices,
       SmallVector<int64_t>{});
-  auto packedInput = rewriter.create<linalg::PackOp>(
-      loc, input, empty, targetIndices, transposedTileSizes,
+  auto packedInput = linalg::PackOp::create(
+      rewriter, loc, input, empty, targetIndices, transposedTileSizes,
       /*padding=*/std::nullopt, SmallVector<int64_t>{});
 
   SmallVector<AffineExpr> mapResults(inputMap.getResults());
@@ -292,8 +292,8 @@
       rewriter, loc, packedOutput, packOp.getMixedTiles(),
       packOp.getInnerDimsPos(), packOp.getOuterDimsPerm());
 
-  auto unpackedOutput = rewriter.create<linalg::UnPackOp>(
-      loc, packedOutput, empty, packOp.getInnerDimsPos(),
+  auto unpackedOutput = linalg::UnPackOp::create(
+      rewriter, loc, packedOutput, empty, packOp.getInnerDimsPos(),
       packOp.getMixedTiles(), packOp.getOuterDimsPerm());
   return unpackedOutput.getResult();
 }
@@ -564,8 +564,8 @@
     SmallVector<OpFoldResult> mixedSizes =
         tensor::getMixedSizes(rewriter, loc, packOp.getSource());
     applyPermutationToVector(mixedSizes, perm);
-    Value empty = rewriter.create<tensor::EmptyOp>(loc, mixedSizes,
-                                                   destType.getElementType());
+    Value empty = tensor::EmptyOp::create(rewriter, loc, mixedSizes,
+                                          destType.getElementType());
     Value transposed =
         rewriter
             .create<linalg::TransposeOp>(loc, packOp.getSource(), empty, perm)
@@ -643,8 +643,8 @@
     auto collapsedType = RankedTensorType::get(
         applyPermutation(destShape, perm), destType.getElementType());
 
-    auto collapse = rewriter.create<tensor::CollapseShapeOp>(
-        loc, collapsedType, unpackOp.getSource(),
+    auto collapse = tensor::CollapseShapeOp::create(
+        rewriter, loc, collapsedType, unpackOp.getSource(),
         getTilingReassociationMap(destType.getRank(), innerDims));
     rewriter.replaceOpWithNewOp<linalg::TransposeOp>(
         unpackOp, collapse, unpackOp.getDest(), invertPermutationVector(perm));
diff --git a/compiler/src/iree/compiler/Preprocessing/Common/FoldAttentionWithTranspose.cpp b/compiler/src/iree/compiler/Preprocessing/Common/FoldAttentionWithTranspose.cpp
index 9ee889c..f085742 100644
--- a/compiler/src/iree/compiler/Preprocessing/Common/FoldAttentionWithTranspose.cpp
+++ b/compiler/src/iree/compiler/Preprocessing/Common/FoldAttentionWithTranspose.cpp
@@ -172,8 +172,8 @@
         getIndexingMap(6, {d0, d2, d1, d5})};
     ArrayAttr newIndexingMapsAttr =
         rewriter.getAffineMapArrayAttr(newIndexingMaps);
-    auto newAttentionOp = rewriter.create<IREE::LinalgExt::AttentionOp>(
-        attentionOp.getLoc(), expandedInit.getType(), expandedQuery,
+    auto newAttentionOp = IREE::LinalgExt::AttentionOp::create(
+        rewriter, attentionOp.getLoc(), expandedInit.getType(), expandedQuery,
         expandedKey, expandedValue, attentionOp.getScale(), expandedInit,
         newIndexingMapsAttr);
     rewriter.replaceOp(transposeLikeOp, newAttentionOp);
diff --git a/compiler/src/iree/compiler/Preprocessing/Common/MakeSingleDispatchForFunction.cpp b/compiler/src/iree/compiler/Preprocessing/Common/MakeSingleDispatchForFunction.cpp
index 792253c..02644c1 100644
--- a/compiler/src/iree/compiler/Preprocessing/Common/MakeSingleDispatchForFunction.cpp
+++ b/compiler/src/iree/compiler/Preprocessing/Common/MakeSingleDispatchForFunction.cpp
@@ -138,8 +138,8 @@
 
   auto resultTypes =
       llvm::map_to_vector(results, [](Value v) -> Type { return v.getType(); });
-  auto dispatchRegionOp = rewriter.create<IREE::Flow::DispatchRegionOp>(
-      funcOp.getLoc(), resultTypes,
+  auto dispatchRegionOp = IREE::Flow::DispatchRegionOp::create(
+      rewriter, funcOp.getLoc(), resultTypes,
       /*result_dims=*/ValueRange{}, /*workload=*/ValueRange{});
   Region &regionOpBody = dispatchRegionOp.getBody();
   Block *newBlock = rewriter.createBlock(&regionOpBody, regionOpBody.begin());
@@ -147,8 +147,8 @@
     rewriter.moveOpBefore(op, newBlock, newBlock->end());
   }
   rewriter.setInsertionPointToEnd(newBlock);
-  rewriter.create<IREE::Flow::ReturnOp>(dispatchRegionOp.getLoc(),
-                                        results.getArrayRef());
+  IREE::Flow::ReturnOp::create(rewriter, dispatchRegionOp.getLoc(),
+                               results.getArrayRef());
   rewriter.replaceUsesWithIf(
       results.getArrayRef(), dispatchRegionOp->getResults(),
       [&](OpOperand &use) {
diff --git a/compiler/src/iree/compiler/Preprocessing/Common/PadLinalgOps.cpp b/compiler/src/iree/compiler/Preprocessing/Common/PadLinalgOps.cpp
index 38f2a81..959958f 100644
--- a/compiler/src/iree/compiler/Preprocessing/Common/PadLinalgOps.cpp
+++ b/compiler/src/iree/compiler/Preprocessing/Common/PadLinalgOps.cpp
@@ -80,11 +80,11 @@
     auto rhsPaddedType = RankedTensorType::get(
         getFullShape({newKSize, newNSize}), rhsType.getElementType());
 
-    Value lhsPaddingValue = rewriter.create<arith::ConstantOp>(
-        loc, rewriter.getZeroAttr(lhsType.getElementType()));
+    Value lhsPaddingValue = arith::ConstantOp::create(
+        rewriter, loc, rewriter.getZeroAttr(lhsType.getElementType()));
 
-    Value rhsPaddingValue = rewriter.create<arith::ConstantOp>(
-        loc, rewriter.getZeroAttr(rhsType.getElementType()));
+    Value rhsPaddingValue = arith::ConstantOp::create(
+        rewriter, loc, rewriter.getZeroAttr(rhsType.getElementType()));
 
     auto createPadding = [&](ArrayRef<int64_t> padding) {
       SmallVector<OpFoldResult> result;
@@ -99,15 +99,15 @@
 
     Value paddedLhs = lhs;
     if (paddingForM > 0 || paddingForK > 0) {
-      paddedLhs = rewriter.create<tensor::PadOp>(
-          loc, lhsPaddedType, lhs, createPadding({0, 0}),
+      paddedLhs = tensor::PadOp::create(
+          rewriter, loc, lhsPaddedType, lhs, createPadding({0, 0}),
           createPadding({paddingForM, paddingForK}), lhsPaddingValue);
     }
 
     Value paddedRhs = rhs;
     if (paddingForK > 0 || paddingForN > 0) {
-      paddedRhs = rewriter.create<tensor::PadOp>(
-          loc, rhsPaddedType, rhs, createPadding({0, 0}),
+      paddedRhs = tensor::PadOp::create(
+          rewriter, loc, rhsPaddedType, rhs, createPadding({0, 0}),
           createPadding({paddingForK, paddingForN}), rhsPaddingValue);
     }
 
@@ -120,10 +120,10 @@
     } else {
       auto newResultType = RankedTensorType::get(
           getFullShape({newMSize, newNSize}), resultType.getElementType());
-      Value resultPaddingValue = rewriter.create<arith::ConstantOp>(
-          loc, rewriter.getZeroAttr(resultType.getElementType()));
-      Value paddedResult = rewriter.create<tensor::PadOp>(
-          loc, newResultType, result, createPadding({0, 0}),
+      Value resultPaddingValue = arith::ConstantOp::create(
+          rewriter, loc, rewriter.getZeroAttr(resultType.getElementType()));
+      Value paddedResult = tensor::PadOp::create(
+          rewriter, loc, newResultType, result, createPadding({0, 0}),
           createPadding({paddingForM, paddingForN}), resultPaddingValue);
       auto paddedMatmulOp =
           mlir::clone(rewriter, linalgOp, {newResultType},
diff --git a/compiler/src/iree/compiler/Preprocessing/Common/PadToIntrinsics.cpp b/compiler/src/iree/compiler/Preprocessing/Common/PadToIntrinsics.cpp
index 66aa77e..59cda1c 100644
--- a/compiler/src/iree/compiler/Preprocessing/Common/PadToIntrinsics.cpp
+++ b/compiler/src/iree/compiler/Preprocessing/Common/PadToIntrinsics.cpp
@@ -51,11 +51,11 @@
       });
   auto paddedResultType =
       RankedTensorType::get(paddedShape, sourceType.getElementType());
-  Value paddingValue = rewriter.create<arith::ConstantOp>(
-      loc, rewriter.getZeroAttr(sourceType.getElementType()));
+  Value paddingValue = arith::ConstantOp::create(
+      rewriter, loc, rewriter.getZeroAttr(sourceType.getElementType()));
   SmallVector<OpFoldResult> low(padding.size(), rewriter.getIndexAttr(0));
-  Value paddedResult = rewriter.create<tensor::PadOp>(
-      loc, paddedResultType, padSource, low, padding, paddingValue);
+  Value paddedResult = tensor::PadOp::create(
+      rewriter, loc, paddedResultType, padSource, low, padding, paddingValue);
   return paddedResult;
 }
 
@@ -97,8 +97,8 @@
     }
   }
 
-  return rewriter.create<tensor::ExpandShapeOp>(
-      loc, RankedTensorType::Builder(srcType).setShape(expandedShape),
+  return tensor::ExpandShapeOp::create(
+      rewriter, loc, RankedTensorType::Builder(srcType).setShape(expandedShape),
       expandSource, reassoc);
 }
 
@@ -408,7 +408,7 @@
         if (!mOperandDimPair)
           return;
         auto [mOperand, mOperandDim] = mOperandDimPair.value();
-        mSizeExpr = rewriter.create<tensor::DimOp>(loc, mOperand, mOperandDim)
+        mSizeExpr = tensor::DimOp::create(rewriter, loc, mOperand, mOperandDim)
                         .getResult();
         dimsToExpandCandidate.emplace_back(mDim, intrinsic.mSizes[0]);
       }
@@ -422,7 +422,7 @@
         if (!nOperandDimPair)
           return;
         auto [nOperand, nOperandDim] = nOperandDimPair.value();
-        nSizeExpr = rewriter.create<tensor::DimOp>(loc, nOperand, nOperandDim)
+        nSizeExpr = tensor::DimOp::create(rewriter, loc, nOperand, nOperandDim)
                         .getResult();
         dimsToExpandCandidate.emplace_back(nDim, intrinsic.nSizes[0]);
       }
@@ -436,7 +436,7 @@
         if (!kOperandDimPair)
           return;
         auto [kOperand, kOperandDim] = kOperandDimPair.value();
-        kSizeExpr = rewriter.create<tensor::DimOp>(loc, kOperand, kOperandDim)
+        kSizeExpr = tensor::DimOp::create(rewriter, loc, kOperand, kOperandDim)
                         .getResult();
         dimsToExpandCandidate.emplace_back(kDim, intrinsic.kSizes[0]);
       }
@@ -518,17 +518,17 @@
     newOuts = getExpandedValue(rewriter, loc, newOuts, outsMap, dimsToExpand);
 
     // Create expanded contractionOp.
-    auto expandedMatmulOp = rewriter.create<linalg::GenericOp>(
-        loc, newOuts.getType(), ValueRange{newLhs, newRhs}, ValueRange{newOuts},
-        expandedMaps, expandedIterators);
+    auto expandedMatmulOp = linalg::GenericOp::create(
+        rewriter, loc, newOuts.getType(), ValueRange{newLhs, newRhs},
+        ValueRange{newOuts}, expandedMaps, expandedIterators);
     expandedMatmulOp.getRegion().takeBody(linalgOp->getRegion(0));
     paddedCompute = expandedMatmulOp.getResults()[0];
 
     // Collapse back to non expanded shape if required.
     if (auto expandOutsOp =
             dyn_cast<tensor::ExpandShapeOp>(newOuts.getDefiningOp())) {
-      paddedCompute = rewriter.create<tensor::CollapseShapeOp>(
-          loc, expandOutsOp.getSrcType(), paddedCompute,
+      paddedCompute = tensor::CollapseShapeOp::create(
+          rewriter, loc, expandOutsOp.getSrcType(), paddedCompute,
           expandOutsOp.getReassociationIndices());
     }
   }