NFC: retire HALInterfaceToMemrefPass and related utilities

This commit additionally deletes the HALInterfaceToMemrefPass and
some of the utility functions not needed anymore. It removes the
CodegenUtils directory entirely and moves the utility functions
still in use to the use site, in preparation for better directory
organization.

PiperOrigin-RevId: 309513864
diff --git a/iree/compiler/Dialect/HAL/Target/LLVM/BUILD b/iree/compiler/Dialect/HAL/Target/LLVM/BUILD
index 4dbb3f8..6715b26 100644
--- a/iree/compiler/Dialect/HAL/Target/LLVM/BUILD
+++ b/iree/compiler/Dialect/HAL/Target/LLVM/BUILD
@@ -33,7 +33,6 @@
         "//iree/compiler/Dialect/HAL/Target:LegacyUtil",
         "//iree/compiler/Dialect/IREE/IR",
         "//iree/compiler/Translation/CodegenPasses",
-        "//iree/compiler/Translation/CodegenUtils",
         "//iree/schemas:llvmir_executable_def_cc_fbs",
         "@llvm-project//llvm:core",
         "@llvm-project//llvm:support",
diff --git a/iree/compiler/Dialect/HAL/Target/LLVM/CMakeLists.txt b/iree/compiler/Dialect/HAL/Target/LLVM/CMakeLists.txt
index d460149..f2c1dfa 100644
--- a/iree/compiler/Dialect/HAL/Target/LLVM/CMakeLists.txt
+++ b/iree/compiler/Dialect/HAL/Target/LLVM/CMakeLists.txt
@@ -42,7 +42,6 @@
     iree::compiler::Dialect::HAL::Target::LegacyUtil
     iree::compiler::Dialect::IREE::IR
     iree::compiler::Translation::CodegenPasses
-    iree::compiler::Translation::CodegenUtils
     iree::schemas::llvmir_executable_def_cc_fbs
   PUBLIC
 )
diff --git a/iree/compiler/Dialect/HAL/Target/LLVM/LLVMTarget.cpp b/iree/compiler/Dialect/HAL/Target/LLVM/LLVMTarget.cpp
index c8dddbf..354b490 100644
--- a/iree/compiler/Dialect/HAL/Target/LLVM/LLVMTarget.cpp
+++ b/iree/compiler/Dialect/HAL/Target/LLVM/LLVMTarget.cpp
@@ -21,7 +21,6 @@
 #include "iree/compiler/Dialect/HAL/Target/TargetRegistry.h"
 #include "iree/compiler/Dialect/IREE/IR/IREEOps.h"
 #include "iree/compiler/Translation/CodegenPasses/Passes.h"
-#include "iree/compiler/Translation/CodegenUtils/CodegenUtils.h"
 #include "iree/schemas/llvmir_executable_def_generated.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/ScopeExit.h"
diff --git a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/BUILD b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/BUILD
index 1a04108..a5dbaaa 100644
--- a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/BUILD
+++ b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/BUILD
@@ -33,7 +33,6 @@
         "//iree/compiler/Dialect/Vulkan/IR",
         "//iree/compiler/Dialect/Vulkan/Utils",
         "//iree/compiler/Translation/CodegenPasses",
-        "//iree/compiler/Translation/CodegenUtils",
         "//iree/compiler/Translation/SPIRV/LinalgToSPIRV",
         "//iree/schemas:spirv_executable_def_cc_fbs",
         "@com_github_google_flatbuffers//:flatbuffers",
diff --git a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/CMakeLists.txt b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/CMakeLists.txt
index c14806b..07a26d6 100644
--- a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/CMakeLists.txt
+++ b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/CMakeLists.txt
@@ -39,7 +39,6 @@
     iree::compiler::Dialect::Vulkan::IR
     iree::compiler::Dialect::Vulkan::Utils
     iree::compiler::Translation::CodegenPasses
-    iree::compiler::Translation::CodegenUtils
     iree::compiler::Translation::SPIRV::LinalgToSPIRV
     iree::schemas::spirv_executable_def_cc_fbs
     tensorflow::mlir_xla
diff --git a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/VulkanSPIRVTarget.cpp b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/VulkanSPIRVTarget.cpp
index bf200f3..ea1e40c 100644
--- a/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/VulkanSPIRVTarget.cpp
+++ b/iree/compiler/Dialect/HAL/Target/VulkanSPIRV/VulkanSPIRVTarget.cpp
@@ -23,7 +23,6 @@
 #include "iree/compiler/Dialect/Vulkan/IR/VulkanAttributes.h"
 #include "iree/compiler/Dialect/Vulkan/Utils/TargetEnvUtils.h"
 #include "iree/compiler/Translation/CodegenPasses/Passes.h"
-#include "iree/compiler/Translation/CodegenUtils/CodegenUtils.h"
 #include "iree/compiler/Translation/SPIRV/LinalgToSPIRV/LowerToSPIRV.h"
 #include "iree/schemas/spirv_executable_def_generated.h"
 #include "llvm/ADT/STLExtras.h"
diff --git a/iree/compiler/Translation/CodegenPasses/BUILD b/iree/compiler/Translation/CodegenPasses/BUILD
index c81b7dd..65c854e 100644
--- a/iree/compiler/Translation/CodegenPasses/BUILD
+++ b/iree/compiler/Translation/CodegenPasses/BUILD
@@ -38,7 +38,6 @@
 cc_library(
     name = "CodegenPasses",
     srcs = [
-        "HALInterfaceToMemrefPass.cpp",
         "HLOToLinalgOnBuffers.cpp",
         "HLOToLinalgOnTensors.cpp",
         "LinalgFusion.cpp",
diff --git a/iree/compiler/Translation/CodegenPasses/CMakeLists.txt b/iree/compiler/Translation/CodegenPasses/CMakeLists.txt
index 17737fa..dcca954 100644
--- a/iree/compiler/Translation/CodegenPasses/CMakeLists.txt
+++ b/iree/compiler/Translation/CodegenPasses/CMakeLists.txt
@@ -38,7 +38,6 @@
     "Passes.h"
     "LinalgVectorTransformPatterns.h.inc"
   SRCS
-    "HALInterfaceToMemrefPass.cpp"
     "HLOToLinalgOnBuffers.cpp"
     "HLOToLinalgOnTensors.cpp"
     "LinalgFusion.cpp"
@@ -59,7 +58,6 @@
     iree::compiler::Dialect::HAL::IR::HALDialect
     iree::compiler::Dialect::IREE::IR
     iree::compiler::Dialect::Shape::IR
-    iree::compiler::Translation::CodegenUtils
     tensorflow::mlir_xla
   PUBLIC
 )
diff --git a/iree/compiler/Translation/CodegenPasses/HALInterfaceToMemrefPass.cpp b/iree/compiler/Translation/CodegenPasses/HALInterfaceToMemrefPass.cpp
deleted file mode 100644
index 8507108..0000000
--- a/iree/compiler/Translation/CodegenPasses/HALInterfaceToMemrefPass.cpp
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "iree/compiler/Dialect/HAL/IR/HALOps.h"
-#include "iree/compiler/Dialect/IREE/IR/IREEOps.h"
-#include "iree/compiler/Translation/CodegenPasses/Passes.h"
-#include "iree/compiler/Translation/CodegenUtils/CodegenUtils.h"
-#include "llvm/ADT/SetVector.h"
-#include "mlir/Dialect/SPIRV/SPIRVTypes.h"
-#include "mlir/Dialect/SPIRV/TargetAndABI.h"
-#include "mlir/Dialect/StandardOps/IR/Ops.h"
-#include "mlir/Pass/Pass.h"
-#include "mlir/Transforms/DialectConversion.h"
-
-namespace mlir {
-namespace iree_compiler {
-
-namespace {
-
-/// Converts the function call operation that takes and returns tensor
-/// arguments, into one that takes memref arguments.  The number of arguments of
-/// the converted op is equal to the sum of the number of arguments and number
-/// of results of the original operation.
-/// - Arguments that are tensor type are converted to memref type of same shape
-///   and element type.
-/// - Results that are tensor type are converted to memref type of same shape
-///   and element type.
-/// - Results that are not tensor type are converted to memref type of rank-0,
-///   with the type as the element type.
-/// Inserts the `iree.load_input` and `iree.store_output` instructions to allow
-/// the body of the function to contain tensors operations.
-struct FuncOpConversion : OpConversionPattern<FuncOp> {
-  using OpConversionPattern<FuncOp>::OpConversionPattern;
-  LogicalResult matchAndRewrite(
-      FuncOp funcOp, ArrayRef<Value> operands,
-      ConversionPatternRewriter &rewriter) const override;
-};
-
-/// Converts a return statement to iree.store_output statements and empty
-/// return.
-struct ReturnOpConversion : OpConversionPattern<ReturnOp> {
-  using OpConversionPattern<ReturnOp>::OpConversionPattern;
-  LogicalResult matchAndRewrite(
-      ReturnOp returnOp, ArrayRef<Value> operands,
-      ConversionPatternRewriter &rewriter) const override;
-};
-
-/// Converts the call operation to a function that is converted using the FuncOp
-/// conversion pattern implemented above. Will insert hal.interface.get_memref
-/// ops for the memrefs that are to be used as result buffers.
-/// The called function (which is the dispatch function implementation) will be
-/// annotated with
-/// - spv.entry_point_abi attribute to use during SPIR-V lowering.
-/// - spv.interface_var_abi attribute on function arguments to use during SPIR-V
-/// lowering.
-/// - iree.dispatch_fn_name attribute which contains the name of the entry point
-///   function (which is not the implementation function). The generated SPIR-V
-///   binary/LLVM module needs to have this function for the runtime to execute
-///   the kernel.
-// TODO(ravishankarm): The LLVM side doesnt need the spv.* attributes. Maybe
-// make that optional.
-struct CallOpConversion : OpConversionPattern<CallOp> {
-  using OpConversionPattern<CallOp>::OpConversionPattern;
-  LogicalResult matchAndRewrite(
-      CallOp op, ArrayRef<Value> operands,
-      ConversionPatternRewriter &rewriter) const override;
-};
-
-/// Pass to convert from HAL tensor interface to HAL memref interface.
-struct HALInterfaceToMemrefPass
-    : public PassWrapper<HALInterfaceToMemrefPass,
-                         OperationPass<mlir::ModuleOp>> {
-  void runOnOperation() override;
-};
-}  // namespace
-
-/// Convert a ranked tensor type to equivalent memref type.
-static MemRefType convertTensorTypeOrNull(Type t) {
-  auto tensorType = t.dyn_cast<RankedTensorType>();
-  if (tensorType)
-    return MemRefType::get(tensorType.getShape(), tensorType.getElementType());
-  return nullptr;
-}
-
-LogicalResult FuncOpConversion::matchAndRewrite(
-    FuncOp funcOp, ArrayRef<Value> operands,
-    ConversionPatternRewriter &rewriter) const {
-  if (funcOp.empty()) return failure();
-
-  FunctionType fnType = funcOp.getType();
-  Location loc = funcOp.getLoc();
-
-  // Convert all tensor type input arguments to corresponding memref type.
-  TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs());
-  for (auto argType : enumerate(fnType.getInputs())) {
-    MemRefType convertedType = convertTensorTypeOrNull(argType.value());
-    if (!convertedType) {
-      return funcOp.emitError(
-          "expected dispatch function to have all tensor operands");
-    }
-    signatureConverter.addInputs(argType.index(), convertedType);
-  }
-
-  // Convert all tensor type output to corresponding memref type and append as
-  // arguments to the new function. For non-tensor types, append a memref type
-  // with the same element type and {} shape as argument to the new function.
-  for (auto resultType : fnType.getResults()) {
-    MemRefType convertedType = convertTensorTypeOrNull(resultType);
-    if (!convertedType)
-      return funcOp.emitError(
-          "expected dispatch function to have all tensor return values");
-    signatureConverter.addInputs(convertedType);
-  }
-  auto newFuncOp = rewriter.create<FuncOp>(
-      loc, funcOp.getName(),
-      rewriter.getFunctionType(signatureConverter.getConvertedTypes(),
-                               llvm::None),
-      /*attrs=*/ArrayRef<NamedAttribute>());
-  rewriter.inlineRegionBefore(funcOp.getBody(), newFuncOp.getBody(),
-                              newFuncOp.end());
-  rewriter.applySignatureConversion(&newFuncOp.getBody(), signatureConverter);
-
-  // For all inputs, get the tensor value back by inserting iree.load_input.
-  OpBuilder::InsertionGuard insertionGuard(rewriter);
-  rewriter.setInsertionPointToStart(&newFuncOp.getBody().front());
-  for (auto inputType : enumerate(fnType.getInputs())) {
-    Value loadInputVal = rewriter.create<IREE::LoadInputOp>(
-        loc, inputType.value(), newFuncOp.getArgument(inputType.index()));
-    rewriter.replaceUsesOfBlockArgument(
-        newFuncOp.getArgument(inputType.index()), loadInputVal);
-  }
-
-  rewriter.eraseOp(funcOp);
-  return success();
-}
-
-LogicalResult ReturnOpConversion::matchAndRewrite(
-    ReturnOp returnOp, ArrayRef<Value> operands,
-    ConversionPatternRewriter &rewriter) const {
-  auto funcOp = returnOp.getParentOfType<FuncOp>();
-  if (funcOp.getNumResults()) return failure();
-  auto numArguments = funcOp.getNumArguments();
-  auto numReturnVals = operands.size();
-  auto loc = returnOp.getLoc();
-  for (auto resultNum : llvm::seq<unsigned>(0, returnOp.getNumOperands())) {
-    Value dst = funcOp.getArgument(numArguments - numReturnVals + resultNum);
-    rewriter.create<IREE::StoreOutputOp>(loc, operands[resultNum], dst);
-  }
-  rewriter.replaceOpWithNewOp<ReturnOp>(returnOp);
-  return success();
-}
-
-/// Map HAL descriptor type to SPIR-V storage class.
-static Optional<spirv::StorageClass> getSPIRVStorageClass(
-    IREE::HAL::DescriptorType descriptor) {
-  switch (descriptor) {
-    case IREE::HAL::DescriptorType::StorageBuffer:
-      return spirv::StorageClass::StorageBuffer;
-    case IREE::HAL::DescriptorType::UniformBuffer:
-      return spirv::StorageClass::Uniform;
-    default:
-      return {};
-  }
-}
-
-/// Build the spirv::InterfaceVarABIAttr for the binding associated with a
-/// IREE::HAL::InterfaceLoadTensorOp/IREE::HAL::InterfaceStoreTensorOp.
-template <typename InterfaceOp>
-static spirv::InterfaceVarABIAttr getSPIRVInterfaceVarABIAttr(
-    InterfaceOp op, MLIRContext *context) {
-  SymbolRefAttr interface = op.binding();
-  IREE::HAL::InterfaceBindingOp binding = op.queryBindingOp();
-  if (!binding) {
-    op.emitError("unable to resolve binding symbol");
-    return nullptr;
-  }
-  Optional<spirv::StorageClass> storageClass =
-      getSPIRVStorageClass(binding.type());
-  if (!storageClass) {
-    op.emitError("unable to resolve descriptor type");
-    return nullptr;
-  }
-  // TODO(ravishankarm, antiagainst): Setting the storage class for non-scalar
-  // types using spv.interface_var_abi attr is currently an error. This needs to
-  // be addressed for IREE's use case.
-  if (storageClass.getValue() != spirv::StorageClass::StorageBuffer) {
-    op.emitError("unable to handle descriptor type that is not StorageBuffer");
-    return nullptr;
-  }
-  spirv::InterfaceVarABIAttr spvAttr = spirv::getInterfaceVarABIAttr(
-      binding.set().getZExtValue(), binding.binding().getZExtValue(), {},
-      context);
-  if (!spvAttr) {
-    op.emitError("unable to build spv.interface_var_abi attr");
-    return nullptr;
-  }
-  return spvAttr;
-}
-
-LogicalResult CallOpConversion::matchAndRewrite(
-    CallOp op, ArrayRef<Value> operands,
-    ConversionPatternRewriter &rewriter) const {
-  auto callee = dyn_cast_or_null<FuncOp>(
-      SymbolTable::lookupNearestSymbolFrom(op, op.callee()));
-  Location loc = op.getLoc();
-  if (!callee)
-    return op.emitError("unable to resolve function ") << op.callee();
-
-  // Build the interface variable ABI attributes for the arguments.
-  SmallVector<Value, 4> newOperands;
-  SmallVector<spirv::InterfaceVarABIAttr, 4> argAttrs;
-  newOperands.reserve(operands.size() + op.getNumResults());
-  argAttrs.reserve(operands.size() + op.getNumResults());
-  MLIRContext *context = rewriter.getContext();
-  for (auto operand : operands) {
-    // TODO(ravishankarm): In general we might need to traverse a DAG of
-    // operations to get to the defining op. Unsure how that will work out, but
-    // for now just assume that operand is going to be defined by a
-    // hal.interface.load_tensor
-    auto definingOp = dyn_cast_or_null<IREE::HAL::InterfaceLoadTensorOp>(
-        operand.getDefiningOp());
-    if (!definingOp)
-      return op.emitError(
-          "unable to find hal.interface. op while building "
-          "spv.interface_var_abi attr");
-    spirv::InterfaceVarABIAttr abiAttr =
-        getSPIRVInterfaceVarABIAttr(definingOp, context);
-    if (!abiAttr) return failure();
-    argAttrs.push_back(abiAttr);
-    MemRefType convertedType = convertTensorTypeOrNull(operand.getType());
-    if (!convertedType)
-      return op.emitError(
-          "unable to convert type of operand to call operation");
-    // TODO(ravishankarm): Convert this to use dynamically sizes allocation.
-    if (!convertedType.hasStaticShape())
-      return op.emitError("unable to handle dynamically sized tensors");
-
-    // TODO(ravishankarm): We can create an alloc here cause finally only need
-    // the binary that is created from this module. The alloc has no effect in
-    // the end as of now. But that might change, so modify approach to account
-    // for that.
-    auto alloc = rewriter.create<AllocOp>(loc, convertedType);
-    // Store the tensor value into the alloc.
-    rewriter.create<TensorStoreOp>(loc, operand, alloc);
-    newOperands.push_back(alloc);
-  }
-
-  // Build the interface varialbe ABI attributes for the result.
-  SmallVector<Value, 1> resultBuffers;
-  resultBuffers.reserve(op.getNumResults());
-  for (Value result : op.getResults()) {
-    if (!result.hasOneUse())
-      return op.emitError(
-          "unhandled multiple uses of result of dispatch impl function");
-    // TODO(ravishankarm): In general we might need to traverse a DAG of
-    // operations to get to the hal.interface.store_tensor op. Unsure how that
-    // will work out, but for now just assume that the use is a
-    // hal.interface.store_tensor op.
-    auto storeTensorOp = dyn_cast<IREE::HAL::InterfaceStoreTensorOp>(
-        result.use_begin()->getOwner());
-    if (!storeTensorOp)
-      return op.emitError(
-          "expected result of dispatch impl function call to be used in a "
-          "hal.interface.store_tensor op");
-    spirv::InterfaceVarABIAttr abiAttr =
-        getSPIRVInterfaceVarABIAttr(storeTensorOp, context);
-    if (!abiAttr) return failure();
-    argAttrs.push_back(abiAttr);
-    MemRefType convertedType = convertTensorTypeOrNull(result.getType());
-    if (!convertedType)
-      return op.emitError("unable to convert type of result of call operation");
-
-    // TODO(ravishankarm): Convert this to use dynamically sizes allocation.
-    if (!convertedType.hasStaticShape())
-      return op.emitError("unable to handle dynamically sized tensors");
-
-    // TODO(ravishankarm): We can create an alloc here cause finally only need
-    // the binary that is created from this module. The alloc has no effect in
-    // the end as of now. But that might change, so modify approach to account
-    // for that.
-    auto alloc = rewriter.create<AllocOp>(loc, convertedType);
-    newOperands.push_back(alloc);
-    resultBuffers.push_back(alloc);
-  }
-
-  rewriter.create<CallOp>(loc, op.callee(), ArrayRef<Type>(), newOperands);
-
-  // Load the tensor values from the result buffers.
-  SmallVector<Value, 1> resultValues;
-  resultValues.reserve(resultBuffers.size());
-  for (auto buffer : resultBuffers)
-    resultValues.push_back(rewriter.create<TensorLoadOp>(loc, buffer));
-  rewriter.replaceOp(op, resultValues);
-
-  // Set the entry point attribute for the callee and the interface variable ABI
-  // attr for the callee arguments.
-  SmallVector<int32_t, 3> localSize = {1, 1, 1};
-  callee.setAttr(spirv::getEntryPointABIAttrName(),
-                 spirv::getEntryPointABIAttr(localSize, context));
-  for (auto argAttr : enumerate(argAttrs))
-    callee.setArgAttr(argAttr.index(), spirv::getInterfaceVarABIAttrName(),
-                      argAttr.value());
-  // Set the name of the entry point function to use.
-  callee.setAttr(
-      getDispatchFuncAttrName(),
-      rewriter.getStringAttr(op.getParentOfType<FuncOp>().getName()));
-  return success();
-}
-
-static void populateImplFunctionConversionPatterns(
-    MLIRContext *context, OwningRewritePatternList &patterns) {
-  patterns.insert<FuncOpConversion, ReturnOpConversion>(context);
-}
-
-static void populateHALInterfaceToMemrefPatterns(
-    MLIRContext *context, OwningRewritePatternList &patterns) {
-  patterns.insert<CallOpConversion>(context);
-}
-
-void HALInterfaceToMemrefPass::runOnOperation() {
-  MLIRContext *context = &getContext();
-
-  // Collect the dispatch functions within the flow.executable op and the
-  // implementation function.
-  mlir::ModuleOp moduleOp = getOperation();
-  SmallVector<Operation *, 1> dispatchFns;
-  llvm::SetVector<Operation *> implFns;
-  SymbolTable symbolTable(moduleOp);
-  for (auto funcOp : moduleOp.getOps<mlir::FuncOp>()) {
-    if (SymbolTable::getSymbolVisibility(funcOp) ==
-        SymbolTable::Visibility::Public) {
-      dispatchFns.push_back(funcOp);
-      auto walkResult =
-          funcOp.walk([&implFns, &symbolTable](CallOp callOp) -> WalkResult {
-            auto implFn = symbolTable.lookup<FuncOp>(callOp.callee());
-            if (!implFn) {
-              callOp.emitError("unable to find definition of function ")
-                  << callOp.callee();
-              return WalkResult::interrupt();
-            }
-            implFns.insert(implFn);
-            return WalkResult::advance();
-          });
-      if (walkResult.wasInterrupted()) {
-        return signalPassFailure();
-      }
-    }
-  }
-
-  if (dispatchFns.empty() || implFns.empty()) return;
-
-  // First convert all the functions that are invoked with the dispatch region
-  // to operate on tensors.
-  OwningRewritePatternList patterns;
-  populateImplFunctionConversionPatterns(context, patterns);
-  ConversionTarget implFnConversion(*context);
-  implFnConversion.markUnknownOpDynamicallyLegal(
-      [](Operation *op) -> bool { return true; });
-  implFnConversion.addDynamicallyLegalOp<FuncOp>([](FuncOp funcOp) -> bool {
-    auto fnType = funcOp.getType();
-    return llvm::all_of(
-               fnType.getInputs(),
-               [](Type t) -> bool { return !t.isa<RankedTensorType>(); }) &&
-           fnType.getNumResults() == 0;
-  });
-  implFnConversion.addDynamicallyLegalOp<ReturnOp>(
-      [](ReturnOp returnOp) -> bool { return returnOp.getNumOperands() == 0; });
-  populateHALInterfaceToMemrefPatterns(context, patterns);
-  if (failed(applyFullConversion(implFns.getArrayRef(), implFnConversion,
-                                 patterns, nullptr)))
-    return signalPassFailure();
-
-  // Convert the dispatch functions.
-  patterns.clear();
-  populateHALInterfaceToMemrefPatterns(context, patterns);
-  ConversionTarget dispatchFnConversion(*context);
-  dispatchFnConversion.markUnknownOpDynamicallyLegal(
-      [](Operation *op) -> bool { return true; });
-  dispatchFnConversion.addDynamicallyLegalOp<CallOp>([](CallOp op) -> bool {
-    return llvm::all_of(
-               op.getOperandTypes(),
-               [](Type t) -> bool { return !t.isa<RankedTensorType>(); }) &&
-           op.getNumResults() == 0;
-  });
-  if (failed(applyFullConversion(dispatchFns, dispatchFnConversion, patterns,
-                                 nullptr)))
-    return signalPassFailure();
-}
-
-std::unique_ptr<OperationPass<mlir::ModuleOp>>
-createHALInterfaceToMemrefPass() {
-  return std::make_unique<HALInterfaceToMemrefPass>();
-}
-
-static PassRegistration<HALInterfaceToMemrefPass> pass(
-    "iree-convert-hal-interface-to-memref",
-    "Converts the HAL interface to use memrefs (instead of tensors)");
-
-}  // namespace iree_compiler
-}  // namespace mlir
diff --git a/iree/compiler/Translation/CodegenPasses/HLOToLinalgOnBuffers.cpp b/iree/compiler/Translation/CodegenPasses/HLOToLinalgOnBuffers.cpp
index 493878a..ee80d6b 100644
--- a/iree/compiler/Translation/CodegenPasses/HLOToLinalgOnBuffers.cpp
+++ b/iree/compiler/Translation/CodegenPasses/HLOToLinalgOnBuffers.cpp
@@ -26,7 +26,6 @@
 #include "iree/compiler/Dialect/IREE/IR/IREEOps.h"
 #include "iree/compiler/Dialect/Shape/IR/ShapeOps.h"
 #include "iree/compiler/Translation/CodegenPasses/Passes.h"
-#include "iree/compiler/Translation/CodegenUtils/CodegenUtils.h"
 #include "iree/compiler/Translation/CodegenUtils/MarkerUtils.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/SetVector.h"
diff --git a/iree/compiler/Translation/CodegenPasses/HLOToLinalgOnTensors.cpp b/iree/compiler/Translation/CodegenPasses/HLOToLinalgOnTensors.cpp
index 5dac1a7..a910719 100644
--- a/iree/compiler/Translation/CodegenPasses/HLOToLinalgOnTensors.cpp
+++ b/iree/compiler/Translation/CodegenPasses/HLOToLinalgOnTensors.cpp
@@ -22,7 +22,6 @@
 #include <memory>
 
 #include "iree/compiler/Translation/CodegenPasses/Passes.h"
-#include "iree/compiler/Translation/CodegenUtils/CodegenUtils.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/StandardOps/IR/Ops.h"
 #include "mlir/IR/Attributes.h"
diff --git a/iree/compiler/Translation/CodegenPasses/Passes.h b/iree/compiler/Translation/CodegenPasses/Passes.h
index f09a36a..25a779b 100644
--- a/iree/compiler/Translation/CodegenPasses/Passes.h
+++ b/iree/compiler/Translation/CodegenPasses/Passes.h
@@ -35,10 +35,6 @@
 /// IREE::HAL::ExecutableTargetOp.
 void addHLOToLinalgOnBuffersPasses(OpPassManager &pm);
 
-/// Creates a pass to convert HAL interface on tensors to HAL interface on
-/// memrefs.
-std::unique_ptr<OperationPass<mlir::ModuleOp>> createHALInterfaceToMemrefPass();
-
 /// Creates XLA-HLO to Linalg on buffers transformation pass.
 std::unique_ptr<OperationPass<FuncOp>> createHLOToLinalgOnBuffersPass();
 
@@ -74,7 +70,6 @@
 
 /// Register all Codegen passes
 inline void registerCodegenPasses() {
-  createHALInterfaceToMemrefPass();
   createHLOToLinalgOnBuffersPass();
   createHLOToLinalgOnTensorsPass();
   createLinalgOnTensorsFusionPass();
diff --git a/iree/compiler/Translation/CodegenPasses/test/memref_abi.mlir b/iree/compiler/Translation/CodegenPasses/test/memref_abi.mlir
deleted file mode 100644
index 2d5d926..0000000
--- a/iree/compiler/Translation/CodegenPasses/test/memref_abi.mlir
+++ /dev/null
@@ -1,53 +0,0 @@
-// RUN: iree-opt -pass-pipeline='hal.executable(hal.executable.target(module(iree-convert-hal-interface-to-memref)))' %s
-
-hal.executable @pw_add_ex_dispatch_0 {
-  hal.interface @legacy_io {
-    hal.interface.binding @arg0, set=0, binding=0, type="StorageBuffer", access="Read"
-    hal.interface.binding @arg1, set=0, binding=1, type="StorageBuffer", access="Read"
-    hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer", access="Write|Discard"
-  }
-  hal.executable.entry_point @pw_add_ex_dispatch_0 attributes {interface = @legacy_io, ordinal = 0 : i32, signature = (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>}
-  hal.executable.target "vulkan-spirv" {
-    module {
-      // CHECK-LABEL: func @pw_add_ex_dispatch_0(
-      func @pw_add_ex_dispatch_0() {
-        %c0_i32 = constant 0 : index
-        // CHECK: %[[TENSOR0:.+]] = hal.interface.load.tensor
-        // CHECK: %[[MEMREF0:.+]] = alloc : memref<4x8xi32>
-        // CHECK: tensor_store %[[TENSOR0]], %[[MEMREF0]]
-        // CHECK: %[[TENSOR1:.+]] = hal.interface.load.tensor
-        // CHECK: %[[MEMREF1:.+]] = alloc : memref<4x8xi32>
-        // CHECK: tensor_store %[[TENSOR1]], %[[MEMREF1]]
-        // CHECK: %[[MEMREF2:.+]] = alloc : memref<4x8xi32>
-        // CHECK: call @pw_add_ex_dispatch_0_impl(%[[MEMREF0]], %[[MEMREF1]], %[[MEMREF2]])
-        // CHECK: %[[RESULT:.+]] = tensor_load %[[MEMREF2]]
-        // CHECK: hal.interface.store.tensor %[[RESULT]]
-        %0 = hal.interface.load.tensor @legacy_io::@arg0, offset = %c0_i32 : tensor<4x8xi32>
-        %1 = hal.interface.load.tensor @legacy_io::@arg1, offset = %c0_i32 : tensor<4x8xi32>
-        %2 = call @pw_add_ex_dispatch_0_impl(%0, %1) : (tensor<4x8xi32>, tensor<4x8xi32>) -> tensor<4x8xi32>
-        hal.interface.store.tensor %2, @legacy_io::@ret0, offset = %c0_i32 : tensor<4x8xi32>
-        return
-      }
-      //  CHECK-DAG: func @pw_add_ex_dispatch_0_impl(
-      // CHECK-SAME: %[[ARG0:[a-zA-Z0-9$._-]+]]: memref<4x8xi32>
-      // CHECK-SAME: %[[ARG1:[a-zA-Z0-9$._-]+]]: memref<4x8xi32>
-      // CHECK-SAME: %[[ARG2:[a-zA-Z0-9$._-]+]]: memref<4x8xi32>)
-      func @pw_add_ex_dispatch_0_impl(%arg0: tensor<4x8xi32>, %arg1: tensor<4x8xi32>) -> tensor<4x8xi32> attributes {sym_visibility = "private"} {
-        //  CHECK-DAG: %[[T0:.+]] = iree.load_tensor(%[[ARG0]]
-        //  CHECK-DAG: %[[T1:.+]] = iree.load_tensor(%[[ARG1]]
-        //      CHECK: %[[T2:.+]] = xla_hlo.add %[[T0]], %[[T1]]
-        //      CHECK: iree.store_output(
-        // CHECK-SAME: %[[T2]]
-        // CHECK-SAME: %[[ARG2]]
-        //      CHECK: return
-        %0 = xla_hlo.add %arg0, %arg1 : tensor<4x8xi32>
-        return %0 : tensor<4x8xi32>
-      }
-      hal.interface @legacy_io attributes {sym_visibility = "private"} {
-        hal.interface.binding @arg0, set=0, binding=0, type="StorageBuffer", access="Read"
-        hal.interface.binding @arg1, set=0, binding=1, type="StorageBuffer", access="Read"
-        hal.interface.binding @ret0, set=0, binding=2, type="StorageBuffer", access="Write|Discard"
-      }
-    }
-  }
-}
diff --git a/iree/compiler/Translation/CodegenUtils/BUILD b/iree/compiler/Translation/CodegenUtils/BUILD
index cc29727..f90041d 100644
--- a/iree/compiler/Translation/CodegenUtils/BUILD
+++ b/iree/compiler/Translation/CodegenUtils/BUILD
@@ -22,21 +22,15 @@
 cc_library(
     name = "CodegenUtils",
     srcs = [
-        "CodegenUtils.cpp",
         "MarkerUtils.cpp",
     ],
     hdrs = [
-        "CodegenUtils.h",
         "MarkerUtils.h",
     ],
     deps = [
-        "//iree/compiler/Dialect/HAL/IR",
-        "//iree/compiler/Dialect/IREE/IR",
         "@llvm-project//llvm:support",
         "@llvm-project//mlir:IR",
         "@llvm-project//mlir:LinalgTransforms",
-        "@llvm-project//mlir:SPIRVDialect",
-        "@llvm-project//mlir:StandardOps",
         "@llvm-project//mlir:Support",
     ],
 )
diff --git a/iree/compiler/Translation/CodegenUtils/CMakeLists.txt b/iree/compiler/Translation/CodegenUtils/CMakeLists.txt
index dfdfec0..4003fe2 100644
--- a/iree/compiler/Translation/CodegenUtils/CMakeLists.txt
+++ b/iree/compiler/Translation/CodegenUtils/CMakeLists.txt
@@ -18,19 +18,13 @@
   NAME
     CodegenUtils
   HDRS
-    "CodegenUtils.h"
     "MarkerUtils.h"
   SRCS
-    "CodegenUtils.cpp"
     "MarkerUtils.cpp"
   DEPS
     LLVMSupport
     MLIRIR
     MLIRLinalgTransforms
-    MLIRSPIRV
-    MLIRStandardOps
     MLIRSupport
-    iree::compiler::Dialect::HAL::IR
-    iree::compiler::Dialect::IREE::IR
   PUBLIC
 )
diff --git a/iree/compiler/Translation/CodegenUtils/CodegenUtils.cpp b/iree/compiler/Translation/CodegenUtils/CodegenUtils.cpp
deleted file mode 100644
index f61e3ab..0000000
--- a/iree/compiler/Translation/CodegenUtils/CodegenUtils.cpp
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "iree/compiler/Translation/CodegenUtils/CodegenUtils.h"
-
-#include "iree/compiler/Dialect/HAL/IR/HALOps.h"
-#include "iree/compiler/Dialect/IREE/IR/IREEOps.h"
-#include "mlir/Dialect/SPIRV/TargetAndABI.h"
-#include "mlir/Dialect/StandardOps/IR/Ops.h"
-#include "mlir/IR/StandardTypes.h"
-
-namespace mlir {
-namespace iree_compiler {
-
-ArrayRef<int64_t> dropTrailingOnes(ArrayRef<int64_t> vector) {
-  if (vector.empty()) return vector;
-  auto numTrailingOnes = 0;
-  for (unsigned i = vector.size() - 1; i > 0; --i) {
-    if (vector[i] != 1) {
-      break;
-    }
-    numTrailingOnes++;
-  }
-  return vector.drop_back(numTrailingOnes);
-}
-
-StringRef getDispatchFuncAttrName() { return "iree.dispatch_fn_name"; }
-
-Optional<StringRef> getDispatchFuncName(Operation *op) {
-  FuncOp funcOp =
-      (isa<FuncOp>(op) ? cast<FuncOp>(op) : op->getParentOfType<FuncOp>());
-  if (!funcOp) return {};
-  auto dispatchFnAttr =
-      funcOp.getAttrOfType<StringAttr>(getDispatchFuncAttrName());
-  if (!dispatchFnAttr) return {};
-  return dispatchFnAttr.getValue();
-}
-
-bool isDispatchFuncImpl(FuncOp funcOp) {
-  return !!(funcOp.getAttrOfType<StringAttr>(getDispatchFuncAttrName()));
-}
-
-/// Helper function to check shapes are equal. Only care that the number of
-/// elements be equal.
-static bool areShapesEqual(ArrayRef<int64_t> lhs, ArrayRef<int64_t> rhs) {
-  auto reduceFn = [](ArrayRef<int64_t> vector) -> int64_t {
-    int64_t init = 1;
-    for (auto val : vector) init *= val;
-    return init;
-  };
-  return reduceFn(lhs) == reduceFn(rhs);
-}
-
-/// Get the shape to use for a type. For now this is returning shapes as static
-/// value.
-// TODO(ravishankarm) : Modify this to return the Values to use for the extent
-// to handle dynamic shapes.
-static LogicalResult getExtentFromStoreOpSrc(IREE::StoreOutputOp storeOp,
-                                             SmallVectorImpl<int64_t> &extent) {
-  Value srcVal = storeOp.src();
-  if (srcVal.getType().isSignlessIntOrFloat()) {
-    extent.clear();
-    extent.push_back(1);
-    return success();
-  }
-  if (auto shapedType = srcVal.getType().dyn_cast<ShapedType>()) {
-    if (shapedType.hasStaticShape()) {
-      extent.assign(shapedType.getShape().rbegin(),
-                    shapedType.getShape().rend());
-      if (extent.empty()) {
-        extent.clear();
-        extent.push_back(1);
-      }
-      return success();
-    }
-  }
-  return storeOp.emitError(
-      "unable to extract domain size from store operation");
-}
-
-// TODO(ravishankarm) : Modify this to return the Values to support dynamic
-// shapes.
-LogicalResult getLaunchSize(FuncOp funcOp,
-                            SmallVectorImpl<int64_t> &launchSize) {
-  auto &body = funcOp.getBody();
-  if (!llvm::hasSingleElement(body)) {
-    return funcOp.emitError(
-        "unhandled multiple blocks within dispatch function");
-  }
-  auto storeOps = body.front().getOps<IREE::StoreOutputOp>();
-  if (storeOps.empty())
-    return funcOp.emitError(
-        "expected dispatch function to have at least one iree.store_output "
-        "instruction");
-
-  IREE::StoreOutputOp firstStoreOp = *storeOps.begin();
-  if (failed(getExtentFromStoreOpSrc(firstStoreOp, launchSize))) {
-    return firstStoreOp.emitError("unhandled type of the output tensor");
-  }
-  for (auto it = std::next(storeOps.begin()), ie = storeOps.end(); it != ie;
-       ++it) {
-    SmallVector<int64_t, 3> checkShape;
-    IREE::StoreOutputOp storeOp = *it;
-    if (failed(getExtentFromStoreOpSrc(storeOp, checkShape))) {
-      return storeOp.emitError("unhandled type of the output tensor");
-    }
-    if (!areShapesEqual(launchSize, checkShape)) {
-      return storeOp.emitError("mismatch in shapes of the output tensors");
-    }
-  }
-  return success();
-}
-
-template <typename intType>
-LogicalResult getWorkGroupSize(FuncOp funcOp,
-                               SmallVectorImpl<intType> &workGroupSize) {
-  auto entryPointABIAttr = spirv::lookupEntryPointABI(funcOp);
-  if (!entryPointABIAttr) {
-    return funcOp.emitError(
-        "expected operation to be in dispatch function to get workgroup size");
-  }
-  DenseIntElementsAttr workGroupSizeAttr = entryPointABIAttr.local_size();
-  workGroupSize.clear();
-  for (const APInt &value : workGroupSizeAttr.getValues<APInt>()) {
-    workGroupSize.push_back(value.getSExtValue());
-  }
-  return success();
-}
-
-LogicalResult updateWorkGroupSize(Operation *op,
-                                  ArrayRef<int64_t> workGroupSize) {
-  // Need to update both the surrounding FuncOp that has the spv.entry_point_abi
-  // attribute, and the hal.executable.
-  FuncOp funcOp =
-      (isa<FuncOp>(op) ? cast<FuncOp>(op) : op->getParentOfType<FuncOp>());
-  MLIRContext *context = op->getContext();
-  SmallVector<int32_t, 3> workGroupSizeVec(llvm::map_range(
-      workGroupSize,
-      [](int64_t value) { return static_cast<int32_t>(value); }));
-  workGroupSizeVec.resize(3, 1);
-  funcOp.setAttr(spirv::getEntryPointABIAttrName(),
-                 spirv::getEntryPointABIAttr(workGroupSizeVec, context));
-  return success();
-}
-
-template LogicalResult getWorkGroupSize<int32_t>(
-    FuncOp funcOp, SmallVectorImpl<int32_t> &workGroupSize);
-template LogicalResult getWorkGroupSize<int64_t>(
-    FuncOp funcOp, SmallVectorImpl<int64_t> &workGroupSize);
-
-}  // namespace iree_compiler
-}  // namespace mlir
diff --git a/iree/compiler/Translation/CodegenUtils/CodegenUtils.h b/iree/compiler/Translation/CodegenUtils/CodegenUtils.h
deleted file mode 100644
index affa51e..0000000
--- a/iree/compiler/Translation/CodegenUtils/CodegenUtils.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef IREE_COMPILER_TRANSLATION_CODEGENUTILS_CODEGENUTILS_H_
-#define IREE_COMPILER_TRANSLATION_CODEGENUTILS_CODEGENUTILS_H_
-
-#include "mlir/IR/Function.h"
-#include "mlir/Support/LogicalResult.h"
-
-namespace mlir {
-namespace iree_compiler {
-
-/// Drop trailing ones.
-ArrayRef<int64_t> dropTrailingOnes(ArrayRef<int64_t> vector);
-
-/// Get the name of the attr used to propogate the dispatch function name.
-StringRef getDispatchFuncAttrName();
-
-/// Get the name to use for the dispatch function by looking at the attribute on
-/// the surrounding FuncOp.
-Optional<StringRef> getDispatchFuncName(Operation *op);
-
-/// Checks that a given function is a dispatch function implementaiton.
-bool isDispatchFuncImpl(FuncOp funcOp);
-
-/// The launch size is the size of the outputs of the kernel. For now all
-/// outputs have to be the same shape and static shaped.
-// TODO(ravishankarm) : Modify this to return the Values to support dynamic
-// shapes.
-LogicalResult getLaunchSize(FuncOp funcOp,
-                            SmallVectorImpl<int64_t> &launchSize);
-
-/// Gets the workgroup size. Has to be a static constant.
-template <typename intType>
-LogicalResult getWorkGroupSize(FuncOp funcOp,
-                               SmallVectorImpl<intType> &workGroupSize);
-
-/// Updates the workgroup size used for the dispatch region.
-LogicalResult updateWorkGroupSize(Operation *op,
-                                  ArrayRef<int64_t> workGroupSize);
-
-}  // namespace iree_compiler
-}  // namespace mlir
-
-#endif  // IREE_COMPILER_TRANSLATION_CODEGENUTILS_CODEGENUTILS_H_
diff --git a/iree/compiler/Translation/SPIRV/LinalgToSPIRV/ConvertToGPUPass.cpp b/iree/compiler/Translation/SPIRV/LinalgToSPIRV/ConvertToGPUPass.cpp
index 4cb4cb6..98e1cd6 100644
--- a/iree/compiler/Translation/SPIRV/LinalgToSPIRV/ConvertToGPUPass.cpp
+++ b/iree/compiler/Translation/SPIRV/LinalgToSPIRV/ConvertToGPUPass.cpp
@@ -17,7 +17,6 @@
 // Partition computation within dispatch function to workgroups/workitems.
 //
 //===----------------------------------------------------------------------===//
-#include "iree/compiler/Translation/CodegenUtils/CodegenUtils.h"
 #include "iree/compiler/Translation/CodegenUtils/MarkerUtils.h"
 #include "iree/compiler/Translation/SPIRV/LinalgToSPIRV/Passes.h"
 #include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
diff --git a/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LinalgTileAndFusePass.cpp b/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LinalgTileAndFusePass.cpp
index dd4a6ad..36f8651 100644
--- a/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LinalgTileAndFusePass.cpp
+++ b/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LinalgTileAndFusePass.cpp
@@ -17,11 +17,11 @@
 // Implements a pass to tile and fuse linalg operations on buffers.
 //
 //===----------------------------------------------------------------------===//
-#include "iree/compiler/Translation/CodegenUtils/CodegenUtils.h"
 #include "iree/compiler/Translation/CodegenUtils/MarkerUtils.h"
 #include "iree/compiler/Translation/SPIRV/LinalgToSPIRV/Passes.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/Linalg/Transforms/LinalgTransforms.h"
+#include "mlir/Dialect/SPIRV/TargetAndABI.h"
 #include "mlir/IR/Function.h"
 #include "mlir/IR/PatternMatch.h"
 #include "mlir/Pass/Pass.h"
@@ -34,6 +34,35 @@
 
 static constexpr unsigned kMaxWorkgroupRank = 3;
 
+ArrayRef<int64_t> dropTrailingOnes(ArrayRef<int64_t> vector) {
+  if (vector.empty()) return vector;
+  auto numTrailingOnes = 0;
+  for (unsigned i = vector.size() - 1; i > 0; --i) {
+    if (vector[i] != 1) {
+      break;
+    }
+    numTrailingOnes++;
+  }
+  return vector.drop_back(numTrailingOnes);
+}
+
+/// Updates the workgroup size used for the dispatch region.
+LogicalResult updateWorkGroupSize(Operation *op,
+                                  ArrayRef<int64_t> workGroupSize) {
+  // Need to update both the surrounding FuncOp that has the spv.entry_point_abi
+  // attribute, and the hal.executable.
+  FuncOp funcOp =
+      (isa<FuncOp>(op) ? cast<FuncOp>(op) : op->getParentOfType<FuncOp>());
+  MLIRContext *context = op->getContext();
+  SmallVector<int32_t, 3> workGroupSizeVec(llvm::map_range(
+      workGroupSize,
+      [](int64_t value) { return static_cast<int32_t>(value); }));
+  workGroupSizeVec.resize(3, 1);
+  funcOp.setAttr(spirv::getEntryPointABIAttrName(),
+                 spirv::getEntryPointABIAttr(workGroupSizeVec, context));
+  return success();
+}
+
 /// Returns the tile sizes to use by default based on number of dimension of
 /// parallelism.
 static void getDefaultTileSizes(unsigned numDims,
diff --git a/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LowerToSPIRV.cpp b/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LowerToSPIRV.cpp
index 27cf277..b6ed199 100644
--- a/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LowerToSPIRV.cpp
+++ b/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LowerToSPIRV.cpp
@@ -22,7 +22,6 @@
 
 #include "iree/compiler/Dialect/Shape/Transforms/Passes.h"
 #include "iree/compiler/Translation/CodegenPasses/Passes.h"
-#include "iree/compiler/Translation/CodegenUtils/CodegenUtils.h"
 #include "iree/compiler/Translation/SPIRV/LinalgToSPIRV/Passes.h"
 #include "mlir/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.h"
 #include "mlir/Conversion/LoopsToGPU/LoopsToGPUPass.h"