Merge google -> main (#4659)

* 697c30859 Synchronize submodules with LLVM at llvm/llvm-project@b92a39ac1319
* 19c7fb7bb Integrate LLVM at llvm/llvm-project@b92a39ac1319
* fc17abeac Integrate LLVM at llvm/llvm-project@081c1db02dd2
* aec5174d1 Merge pull request #4655 from NatashaKnk:main-to-google
* ee9a9cb6b Integrate LLVM at llvm/llvm-project@e29552c5aff6
diff --git a/SUBMODULE_VERSIONS.txt b/SUBMODULE_VERSIONS.txt
index e51f3d9..e619cd0 100644
--- a/SUBMODULE_VERSIONS.txt
+++ b/SUBMODULE_VERSIONS.txt
@@ -4,16 +4,16 @@
 4fb0ff7069bd88ee85902f4d0bb62794e5f6d021 third_party/flatcc
 b1fbd33c06cdb0024c67733c6fdec2009d17b384 third_party/googletest
 88b845dee001723c4a0db1fe5477de735b6d3bb0 third_party/liburing
-5e89086c53b991b0c72bd009e7fe852789967868 third_party/llvm-bazel
-c85b6bf33c473633c9873b600f8a31fa55464e1e third_party/llvm-project
+77871f43e449ad492bf8b94dee453670ac15e158 third_party/llvm-bazel
+b92a39ac1319c796777bca19a3af2856acbc69c1 third_party/llvm-project
 4e501d8c6e2d834999301a2492adefe5ddbdc0cb third_party/mlir-emitc
-30ce82790d4ffef48f70e99a6f96f13ddbe857d8 third_party/mlir-hlo
+2b72ddc6b2b4d670bcd1ffa3f4652468b419f986 third_party/mlir-hlo
 2b2bd45bbf9be04fd22ece5cc1f54679202e9257 third_party/pffft
 d8c7ee00a687ac369e62e2032514a93a9b413502 third_party/pybind11
 2887692065c38ef6617f423feafc6b69dd0a0681 third_party/ruy
 685f86471e9d26b3eb7676695a2e2cefb4551ae9 third_party/spirv_cross
 f8bf11a0253a32375c32cad92c841237b96696c0 third_party/spirv_headers
-9fd861b9a1d777daef138a2e06d7f50b7ea63744 third_party/tensorflow
+16613a70ef36b103e7c1ffa903d541814b62c109 third_party/tensorflow
 9c3dac3ed2bd647b8d63f197fed058fee97a7e1e third_party/tracy
 9bd3f561bcee3f01d22912de10bb07ce4e23d378 third_party/vulkan_headers
 3528e2aed3e8808f33e1e7d63eeb1560456a605a third_party/vulkan_memory_allocator
diff --git a/docs/design_docs/codegen_passes.md b/docs/design_docs/codegen_passes.md
index 4a1335c..b36db1c 100644
--- a/docs/design_docs/codegen_passes.md
+++ b/docs/design_docs/codegen_passes.md
@@ -415,11 +415,12 @@
 while the former are to be executed collectively by workitems within a
 workgroup, the latter have to be executed by all workitems across workgroups.
 One way to distinguish these two operations is to use the marker mechanism in
-Linalg ([LinalgMarker][LinalgTilingPatterns]). This is a `StrAttr` whose value
-can be used to encode the scope of the operation. For example, in Snippet 7
-above, the tiled `linalg.matmul` operation has a marker `workgroup` to indicate
-that this operation needs to be executed by a workgroup in a collective manner.
-At this time, the code-generation pipeline uses only the `workgroup` marker.
+Linalg ([LinalgTransformationFilter][LinalgTilingPatterns]). This is a `StrAttr`
+whose value can be used to encode the scope of the operation. For example, in
+Snippet 7 above, the tiled `linalg.matmul` operation has a marker `workgroup` to
+indicate that this operation needs to be executed by a workgroup in a collective
+manner. At this time, the code-generation pipeline uses only the `workgroup`
+marker.
 
 __Roadmap Note__ : Markers are meant to be short-lived, ideally set and consumed
 within the same pass. In the current pipeline the lifetime spans passes to allow
diff --git a/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp b/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp
index 0ddcfbe..02c5102 100644
--- a/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp
+++ b/experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp
@@ -17,7 +17,7 @@
 #include "experimental/ModelBuilder/ModelRunner.h"
 #include "experimental/ModelBuilder/VulkanWrapperPass.h"
 #include "iree/compiler/Conversion/CodegenUtils/ForOpCanonicalization.h"
-#include "iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h"
+#include "iree/compiler/Conversion/CodegenUtils/TransformUtils.h"
 #include "iree/compiler/Conversion/LinalgToSPIRV/MemorySpace.h"
 #include "iree/compiler/Conversion/LinalgToSPIRV/Passes.h"
 #include "iree/compiler/Conversion/LinalgToSPIRV/Utils.h"
@@ -30,6 +30,7 @@
 #include "mlir/Dialect/GPU/Passes.h"
 #include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
 #include "mlir/Dialect/Linalg/Passes.h"
+#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h"
 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
 #include "mlir/Dialect/SPIRV/IR/SPIRVOps.h"
 #include "mlir/Dialect/SPIRV/IR/TargetAndABI.h"
@@ -250,8 +251,8 @@
   return a == b;
 }
 
-static MatmulCodegenStrategy createPowerVRStrategy(int tileM, int tileN,
-                                                   int tileK, int warpSize) {
+static linalg::CodegenStrategy createPowerVRStrategy(int tileM, int tileN,
+                                                     int tileK, int warpSize) {
   const std::array<int64_t, 3> nativeSize = {1, 1, 1};
   linalg::LinalgLoopDistributionOptions WIDistribute;
   linalg::LinalgLoopDistributionOptions WGDistribute;
@@ -274,7 +275,7 @@
                    b.create<ConstantIndexOp>(loc, 1)};
     return procInfo;
   };
-  MatmulCodegenStrategy strategy;
+  linalg::CodegenStrategy strategy;
   SmallVector<int64_t, 2> promotionList;
   // promote matrix B
   promotionList.push_back(1);
@@ -301,13 +302,16 @@
           .setLoopType(linalg::LinalgTilingLoopType::ParallelLoops)
           .setTileSizes({1, tileN, tileK})
           .setDistributionOptions(WIDistribute));
-  strategy.vectorize<linalg::MatmulOp>().unrollVector<vector::ContractionOp>(
-      nativeSize);
+  strategy.vectorize<linalg::MatmulOp>()
+      // TODO: Upstream to core.
+      // .unrollVector<vector::ContractionOp>(nativeSize)
+      ;
+  (void)nativeSize;
   return strategy;
 }
 
-static MatmulCodegenStrategy createMaliStrategy(int tileM, int tileN, int tileK,
-                                                int warpSize) {
+static linalg::CodegenStrategy createMaliStrategy(int tileM, int tileN,
+                                                  int tileK, int warpSize) {
   const std::array<int64_t, 3> nativeSize = {1, 4, 1};
   linalg::LinalgLoopDistributionOptions WIDistribute;
   linalg::LinalgLoopDistributionOptions WGDistribute;
@@ -330,7 +334,7 @@
                    b.create<ConstantIndexOp>(loc, 1)};
     return procInfo;
   };
-  MatmulCodegenStrategy strategy;
+  linalg::CodegenStrategy strategy;
   strategy
       .tile<linalg::MatmulOp>(
           linalg::LinalgTilingOptions()
@@ -344,13 +348,16 @@
           .setTileSizes({tileM, tileN / warpSize, tileK})
           .setDistributionOptions(WIDistribute));
   strategy.vectorize<linalg::MatmulOp>()
-      .unrollVector<vector::TransferReadOp>({1, 4})
-      .unrollVector<vector::ContractionOp>(nativeSize);
+      // TODO: Upstream to core.
+      // .unrollVector<vector::TransferReadOp>({1, 4})
+      // .unrollVector<vector::ContractionOp>(nativeSize)
+      ;
+  (void)nativeSize;
   return strategy;
 }
 
-static MatmulCodegenStrategy createTuringStrategy(int tileM, int tileN,
-                                                  int tileK) {
+static linalg::CodegenStrategy createTuringStrategy(int tileM, int tileN,
+                                                    int tileK) {
   std::array<int64_t, 3> nativeSize;
   if (matType == "i8xi8xi32")
     nativeSize = {16, 16, 32};
@@ -372,7 +379,7 @@
       linalg::DistributionMethod::CyclicNumProcsEqNumIters};
   SGDistribute.procInfo = getSubgroupIds;
 
-  MatmulCodegenStrategy strategy;
+  linalg::CodegenStrategy strategy;
   strategy
       .tile<linalg::MatmulOp>(
           linalg::LinalgTilingOptions()
@@ -398,8 +405,11 @@
                     {tileM / numSubgroupY, tileN / numSubgroupX, tileK})
                 .setDistributionOptions(SGDistribute));
   }
-  strategy.vectorize<linalg::MatmulOp>().unrollVector<vector::ContractionOp>(
-      nativeSize);
+  strategy.vectorize<linalg::MatmulOp>()
+      // TODO: Upstream to core.
+      // .unrollVector<vector::ContractionOp>(nativeSize)
+      ;
+  (void)nativeSize;
   return strategy;
 }
 
@@ -449,7 +459,7 @@
                      ModelRunner::Target::GPUTarget);
   CompilationOptions options;
   options.loweringPasses = [&](mlir::PassManager &pm) {
-    MatmulCodegenStrategy strategy;
+    linalg::CodegenStrategy strategy;
 
     if (target == "powerVR") {
       strategy = createPowerVRStrategy(tileM, tileN, tileK, warpSize);
diff --git a/experimental/ModelBuilder/test/TestVectorToGPU.cpp b/experimental/ModelBuilder/test/TestVectorToGPU.cpp
index 9848826..bbc0b63 100644
--- a/experimental/ModelBuilder/test/TestVectorToGPU.cpp
+++ b/experimental/ModelBuilder/test/TestVectorToGPU.cpp
@@ -19,33 +19,34 @@
 
 // clang-format on
 #include <string>
-#include "iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h"
-#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
-#include "mlir/Dialect/Vector/VectorOps.h"
-#include "mlir/ExecutionEngine/CRunnerUtils.h"
-#include "mlir/ExecutionEngine/RunnerUtils.h"
+
 #include "experimental/ModelBuilder/ModelBuilder.h"
 #include "experimental/ModelBuilder/ModelRunner.h"
 #include "experimental/ModelBuilder/VulkanWrapperPass.h"
+#include "iree/compiler/Conversion/CodegenUtils/TransformUtils.h"
+#include "iree/compiler/Conversion/LinalgToSPIRV/Passes.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/InitLLVM.h"
+#include "mlir/Conversion/GPUToVulkan/ConvertGPUToVulkanPass.h"
+#include "mlir/Conversion/LinalgToLLVM/LinalgToLLVM.h"
 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
+#include "mlir/Conversion/StandardToSPIRV/StandardToSPIRVPass.h"
+#include "mlir/Dialect/GPU/Passes.h"
+#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
+#include "mlir/Dialect/Linalg/Passes.h"
+#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
+#include "mlir/Dialect/SPIRV/IR/SPIRVOps.h"
 #include "mlir/Dialect/SPIRV/IR/TargetAndABI.h"
+#include "mlir/Dialect/SPIRV/Transforms/Passes.h"
+#include "mlir/Dialect/Vector/VectorOps.h"
+#include "mlir/ExecutionEngine/CRunnerUtils.h"
+#include "mlir/ExecutionEngine/RunnerUtils.h"
 #include "mlir/IR/Builders.h"
 #include "mlir/IR/MLIRContext.h"
 #include "mlir/IR/OperationSupport.h"
 #include "mlir/Parser.h"
-#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
-#include "mlir/Pass/PassManager.h"
-#include "iree/compiler/Conversion/LinalgToSPIRV/Passes.h"
-#include "mlir/Conversion/GPUToVulkan/ConvertGPUToVulkanPass.h"
-#include "mlir/Conversion/LinalgToLLVM/LinalgToLLVM.h"
-#include "mlir/Conversion/StandardToSPIRV/StandardToSPIRVPass.h"
-#include "mlir/Dialect/GPU/Passes.h"
-#include "mlir/Dialect/Linalg/Passes.h"
-#include "mlir/Dialect/SPIRV/Transforms/Passes.h"
-#include "mlir/Dialect/SPIRV/IR/SPIRVOps.h"
 #include "mlir/Pass/Pass.h"
+#include "mlir/Pass/PassManager.h"
 #include "mlir/Transforms/Passes.h"
 
 using namespace mlir;                    // NOLINT
diff --git a/iree/compiler/Conversion/CodegenUtils/BUILD b/iree/compiler/Conversion/CodegenUtils/BUILD
index 4b58a54..ea3c83c 100644
--- a/iree/compiler/Conversion/CodegenUtils/BUILD
+++ b/iree/compiler/Conversion/CodegenUtils/BUILD
@@ -27,14 +27,14 @@
         "FunctionUtils.cpp",
         "GetNumWorkgroups.cpp",
         "MarkerUtils.cpp",
-        "MatmulCodegenStrategy.cpp",
+        "TransformUtils.cpp",
     ],
     hdrs = [
         "ForOpCanonicalization.h",
         "FunctionUtils.h",
         "GetNumWorkgroups.h",
         "MarkerUtils.h",
-        "MatmulCodegenStrategy.h",
+        "TransformUtils.h",
     ],
     deps = [
         "//iree/compiler/Dialect/HAL/IR",
diff --git a/iree/compiler/Conversion/CodegenUtils/CMakeLists.txt b/iree/compiler/Conversion/CodegenUtils/CMakeLists.txt
index bf8ddd7..a4589fc 100644
--- a/iree/compiler/Conversion/CodegenUtils/CMakeLists.txt
+++ b/iree/compiler/Conversion/CodegenUtils/CMakeLists.txt
@@ -22,13 +22,13 @@
     "FunctionUtils.h"
     "GetNumWorkgroups.h"
     "MarkerUtils.h"
-    "MatmulCodegenStrategy.h"
+    "TransformUtils.h"
   SRCS
     "ForOpCanonicalization.cpp"
     "FunctionUtils.cpp"
     "GetNumWorkgroups.cpp"
     "MarkerUtils.cpp"
-    "MatmulCodegenStrategy.cpp"
+    "TransformUtils.cpp"
   DEPS
     LLVMSupport
     MLIRAffine
diff --git a/iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.cpp b/iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.cpp
deleted file mode 100644
index b36f4f7..0000000
--- a/iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.cpp
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// -----------------------------------------------------------------------------
-// This code will be removed once this gets upstreamed to common mlir.
-// Please try to limit changes in this code only minor changes.
-
-#include "iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h"
-
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Debug.h"
-#include "mlir/Analysis/SliceAnalysis.h"
-#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
-#include "mlir/Dialect/Affine/IR/AffineOps.h"
-#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
-#include "mlir/Dialect/Linalg/Transforms/Hoisting.h"
-#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
-#include "mlir/Dialect/Linalg/Utils/Utils.h"
-#include "mlir/Dialect/SCF/SCF.h"
-#include "mlir/Dialect/SCF/Utils.h"
-#include "mlir/Dialect/StandardOps/IR/Ops.h"
-#include "mlir/Dialect/Vector/EDSC/Intrinsics.h"
-#include "mlir/Dialect/Vector/VectorOps.h"
-#include "mlir/Dialect/Vector/VectorTransforms.h"
-#include "mlir/IR/AffineExpr.h"
-#include "mlir/IR/Attributes.h"
-#include "mlir/IR/BlockAndValueMapping.h"
-#include "mlir/IR/BuiltinTypes.h"
-#include "mlir/IR/Dominance.h"
-#include "mlir/IR/MLIRContext.h"
-#include "mlir/IR/OperationSupport.h"
-#include "mlir/IR/PatternMatch.h"
-#include "mlir/IR/Value.h"
-#include "mlir/IR/Visitors.h"
-#include "mlir/Pass/Pass.h"
-#include "mlir/Pass/PassManager.h"
-#include "mlir/Support/LogicalResult.h"
-#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
-#include "mlir/Transforms/LoopUtils.h"
-#include "mlir/Transforms/Passes.h"
-
-using namespace mlir;          // NOLINT
-using namespace mlir::linalg;  // NOLINT
-
-#define DEBUG_TYPE "matmul-codegen-strategy"
-
-//===----------------------------------------------------------------------===//
-// TODO: Cleanup and upstream these to go into core. Please ignore for now !
-//===----------------------------------------------------------------------===//
-static void hoistRedundantCopies(FuncOp func) {
-  bool changed = true;
-  while (changed) {
-    changed = false;
-    func.walk([&](linalg::FillOp op) {
-      auto loop = op->getParentOfType<scf::ForOp>();
-      if (!loop) return;
-
-      for (auto operand : op.getOperands())
-        if (!loop.isDefinedOutsideOfLoop(operand)) return;
-
-      // Hoist fill before.
-      op.getOperation()->moveBefore(loop);
-      changed = true;
-    });
-
-    func.walk([&](linalg::CopyOp op) {
-      auto loop = op->getParentOfType<scf::ForOp>();
-      if (!loop) return;
-
-      for (auto operand : op.getOperands())
-        if (!loop.isDefinedOutsideOfLoop(operand)) return;
-
-      Value sourceView = op.getInput(0);
-      while (auto subViewOp = sourceView.getDefiningOp<SubViewOp>())
-        sourceView = subViewOp.getViewSource();
-
-      // Source traces back to a block argument.
-      if (sourceView.isa<BlockArgument>()) {
-        op.getOperation()->moveBefore(loop);
-      } else {
-        assert(sourceView.getDefiningOp<ViewOp>() ||
-               sourceView.getDefiningOp<AllocOp>() ||
-               sourceView.getDefiningOp<AllocaOp>());
-        op.getOperation()->moveAfter(loop);
-      }
-      changed = true;
-    });
-  }
-}
-
-/// Substitute scf.for = %lb to %ub step %step by an AffineExpr expressing:
-///   `%lb + %step * new_dim` where
-/// 1. the AffineExpr for %lb is either an AffineConstantExpr or an
-/// AffineDimExpr depending on whether the value is constant or not.
-/// 2. the AffineExpr for %step is either an AffineConstantExpr or an
-/// AffineSymbolExpr depending on whether the value is constant or not.
-///
-static void substitute(scf::ForOp forOp, SmallVectorImpl<AffineExpr> &exprs,
-                       SmallVectorImpl<Value> &dims,
-                       SmallVectorImpl<Value> &symbols) {
-  MLIRContext *ctx = forOp.getContext();
-  auto lbConstant = forOp.lowerBound().getDefiningOp<ConstantIndexOp>();
-  AffineExpr lb = lbConstant ? getAffineConstantExpr(lbConstant.getValue(), ctx)
-                             : getAffineDimExpr(dims.size(), ctx);
-
-  auto stepConstant = forOp.step().getDefiningOp<ConstantIndexOp>();
-  AffineExpr step = stepConstant
-                        ? getAffineConstantExpr(stepConstant.getValue(), ctx)
-                        : getAffineSymbolExpr(symbols.size(), ctx);
-
-  if (!lbConstant) dims.push_back(forOp.lowerBound());
-  if (!stepConstant) symbols.push_back(forOp.step());
-  exprs.push_back(lb + step * getAffineDimExpr(dims.size(), ctx));
-
-  auto ubConstant = forOp.upperBound().getDefiningOp<ConstantIndexOp>();
-  AffineExpr ub = ubConstant ? getAffineConstantExpr(ubConstant.getValue(), ctx)
-                             : getAffineDimExpr(dims.size(), ctx);
-  if (!ubConstant) dims.push_back(forOp.upperBound());
-  exprs.push_back(ub);
-
-  dims.push_back(forOp.getInductionVar());
-}
-
-/// Substitue dimensions coming from forOp or AffineMin. Return false if it has
-/// unknown dimension operands.
-static bool substitute(AffineMinOp minOp, SmallVectorImpl<AffineExpr> &exprs,
-                       SmallVectorImpl<Value> &dims,
-                       SmallVectorImpl<Value> &symbols) {
-  if (minOp.getDimOperands().empty()) return false;
-  for (Value v : minOp.getDimOperands()) {
-    if (auto forOp = scf::getForInductionVarOwner(v)) {
-      substitute(forOp, exprs, dims, symbols);
-      continue;
-    }
-    if (auto parentMinOp = v.getDefiningOp<AffineMinOp>()) {
-      substitute(parentMinOp, exprs, dims, symbols);
-      continue;
-    }
-    // If couldn't substitue the dimension give up and use the original map.
-    return false;
-  }
-  return true;
-}
-
-LogicalResult AffineMinCanonicalizationPattern::matchAndRewrite(
-    AffineMinOp minOp, PatternRewriter &rewriter) const {
-  LLVM_DEBUG(llvm::dbgs() << "\nCanonicalize AffineMin: "
-                          << *minOp.getOperation() << "\n");
-
-  int64_t min = std::numeric_limits<int64_t>::max();
-  for (auto e : minOp.map().getResults())
-    if (auto cstExpr = e.dyn_cast<AffineConstantExpr>())
-      min = std::min(min, cstExpr.getValue());
-  if (min == std::numeric_limits<int64_t>::max()) return failure();
-
-  MLIRContext *ctx = minOp.getContext();
-  AffineMap map;
-  SmallVector<Value, 4> operands;
-  SmallVector<AffineExpr, 4> exprs;
-  SmallVector<Value, 4> dims, symbols;
-  if (substitute(minOp, exprs, dims, symbols)) {
-    operands = dims;
-    operands.append(symbols.begin(), symbols.end());
-
-    map = AffineMap::get(dims.size(), symbols.size(), exprs, ctx);
-    LLVM_DEBUG(llvm::dbgs() << "Substitution map: " << map << "\n");
-  } else {
-    map = minOp.getAffineMap();
-    operands = minOp.getDimOperands();
-    operands.append(minOp.getSymbolOperands().begin(),
-                    minOp.getSymbolOperands().end());
-  }
-  SmallVector<AffineExpr, 4> modExprs;
-  for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx)
-    modExprs.push_back(getAffineDimExpr(idx, ctx) % min);
-  map = AffineMap::get(map.getNumResults(), 0, modExprs, ctx).compose(map);
-  canonicalizeMapAndOperands(&map, &operands);
-  map = simplifyAffineMap(map);
-
-  LLVM_DEBUG(llvm::dbgs() << "Post mod: " << map << "\n";
-             llvm::interleaveComma(operands, llvm::dbgs()));
-
-  if (!llvm::all_of(map.getResults(), [](AffineExpr e) {
-        if (auto cst = e.dyn_cast<AffineConstantExpr>())
-          return cst.getValue() == 0;
-        return false;
-      }))
-    return failure();
-
-  rewriter.replaceOpWithNewOp<ConstantIndexOp>(minOp, min);
-  return success();
-}
-//===----------------------------------------------------------------------===//
-// END TODO
-//===----------------------------------------------------------------------===//
-
-void MatmulCodegenStrategy::transform(FuncOp func) const {
-  MLIRContext *context = func.getContext();
-  // Emplace patterns one at a time while also maintaining a simple chained
-  // state transition.
-  unsigned stepCount = 0;
-  SmallVector<FrozenRewritePatternList, 4> stage1Patterns;
-  auto zeroState = Identifier::get(std::to_string(stepCount), context);
-  auto currentState = zeroState;
-  for (auto &t : transformationSequence) {
-    auto nextState = Identifier::get(std::to_string(++stepCount), context);
-    auto marker = (currentState == zeroState)
-                      ? linalg::LinalgMarker({}, nextState)
-                      : linalg::LinalgMarker(currentState, nextState);
-    stage1Patterns.emplace_back(t->buildRewritePatterns(context, marker));
-    currentState = nextState;
-  }
-
-  OwningRewritePatternList stage2Patterns =
-      linalg::getLinalgTilingCanonicalizationPatterns(context);
-  // Add extra patterns to canonicalize AffineMin in combination with scf loops
-  // operations after tiling.
-  stage2Patterns.insert<AffineMinCanonicalizationPattern,
-                        AffineMinSCFCanonicalizationPattern>(context);
-
-  auto stage3Transforms = [](Operation *op) {
-    promoteSingleIterationLoops(cast<FuncOp>(op));
-    return success();
-  };
-  linalg::applyStagedPatterns(func, stage1Patterns, std::move(stage2Patterns),
-                              stage3Transforms);
-
-  auto postStageTransforms = [this](Operation *op) {
-    // Run LICM and hoisting patterns after all the stages as we want to
-    // unrolling before moving transfer ops out of the loop.
-    if (hoistInvariantCode) {
-      PassManager pm(op->getContext());
-      pm.addPass(createLoopInvariantCodeMotionPass());
-      if (failed(pm.run(op->getParentOfType<ModuleOp>())))
-        llvm_unreachable("Unexpected failure in cleanup pass pipeline.");
-      hoistViewAllocOps(cast<FuncOp>(op));
-      hoistRedundantVectorTransfers(cast<FuncOp>(op));
-      hoistRedundantCopies(cast<FuncOp>(op));
-    }
-    OwningRewritePatternList patterns;
-    vector::populateVectorSlicesLoweringPatterns(patterns, op->getContext());
-    applyPatternsAndFoldGreedily(op, std::move(patterns));
-  };
-  postStageTransforms(func);
-  if (lowering != nullptr) lowering(func);
-}
-
-// Parametric lowering of vector contract for CPU target.
-static void cpuLowering(
-    FuncOp func, const vector::VectorTransformsOptions &vectorTransformsOptions,
-    const VectorTransferToSCFOptions &vectorToSCFOptions) {
-  // Programmatic controlled lowering of vector.contract only.
-  MLIRContext *context = func.getContext();
-  OwningRewritePatternList vectorContractLoweringPatterns;
-  vectorContractLoweringPatterns
-      .insert<ContractionOpToOuterProductOpLowering,
-              ContractionOpToMatmulOpLowering, ContractionOpLowering>(
-          vectorTransformsOptions, context);
-
-  applyPatternsAndFoldGreedily(func, std::move(vectorContractLoweringPatterns));
-
-  // Programmatic controlled lowering of vector.transfer only.
-  OwningRewritePatternList vectorToLoopsPatterns;
-  populateVectorToSCFConversionPatterns(vectorToLoopsPatterns, context,
-                                        vectorToSCFOptions);
-  applyPatternsAndFoldGreedily(func, std::move(vectorToLoopsPatterns));
-}
-
-MatmulCodegenStrategy &MatmulCodegenStrategy::setDefaultCPULowering() {
-  auto lowering = [this](FuncOp func) {
-    cpuLowering(func, vectorTransformsOptions, vectorToSCFOptions);
-  };
-  return setLoweringFunction(lowering);
-}
diff --git a/iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h b/iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h
deleted file mode 100644
index e93f50b..0000000
--- a/iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef IREE_COMPILER_CONVERSION_CODEGENUTILS_MATMULCODEGENSTRATEGY_H_
-#define IREE_COMPILER_CONVERSION_CODEGENUTILS_MATMULCODEGENSTRATEGY_H_
-
-#include <functional>
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
-#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
-#include "mlir/Dialect/Vector/VectorOps.h"
-#include "mlir/Dialect/Vector/VectorTransforms.h"
-#include "mlir/IR/BuiltinOps.h"
-#include "mlir/Support/LLVM.h"
-
-namespace mlir {
-
-/// Abstract Transformation class applied in a sequence that also handles state
-/// through markers.
-struct Transformation {
-  virtual ~Transformation() = default;
-  virtual OwningRewritePatternList buildRewritePatterns(
-      MLIRContext *context, linalg::LinalgMarker m) = 0;
-  linalg::LinalgMarker marker;
-};
-
-template <typename VectorOpType>
-struct UnrollVector : public Transformation {
-  explicit UnrollVector(ArrayRef<int64_t> targetShape)
-      : targetShape(targetShape.begin(), targetShape.end()) {}
-
-  OwningRewritePatternList buildRewritePatterns(
-      MLIRContext *ctx, linalg::LinalgMarker m) override {
-    OwningRewritePatternList vectorUnrollPatterns;
-    vectorUnrollPatterns.insert<vector::UnrollVectorPattern>(
-        ctx, vector::UnrollVectorOptions()
-                 .setNativeShape(targetShape)
-                 .setFilterConstraint([](Operation *op) {
-                   return success(isa<VectorOpType>(op));
-                 }));
-    vector::populateVectorToVectorCanonicalizationPatterns(vectorUnrollPatterns,
-                                                           ctx);
-    vector::populateVectorToVectorTransformationPatterns(vectorUnrollPatterns,
-                                                         ctx);
-    return vectorUnrollPatterns;
-  }
-
- private:
-  SmallVector<int64_t, 4> targetShape;
-};
-
-/// Promotion transformation enqueues a particular stage-1 pattern for
-/// `Tile<LinalgOpType>`with the appropriate `options`.
-// TODO: variadic LinalgOpTypes.
-template <typename LinalgOpType>
-struct Tile : public Transformation {
-  explicit Tile(linalg::LinalgTilingOptions options) : options(options) {}
-
-  OwningRewritePatternList buildRewritePatterns(
-      MLIRContext *context, linalg::LinalgMarker m) override {
-    OwningRewritePatternList tilingPatterns;
-    tilingPatterns.insert<linalg::LinalgTilingPattern<LinalgOpType>>(
-        context, options, m);
-    return tilingPatterns;
-  }
-
- private:
-  linalg::LinalgTilingOptions options;
-};
-
-/// Promotion transformation enqueues a particular stage-1 pattern for
-/// `Promote<LinalgOpType>`with the appropriate `options`.
-// TODO: variadic LinalgOpTypes.
-template <typename LinalgOpType>
-struct Promote : public Transformation {
-  explicit Promote(linalg::LinalgPromotionOptions options) : options(options) {}
-
-  OwningRewritePatternList buildRewritePatterns(
-      MLIRContext *context, linalg::LinalgMarker m) override {
-    OwningRewritePatternList promotionPatterns;
-    promotionPatterns.insert<linalg::LinalgPromotionPattern<LinalgOpType>>(
-        context, options, m);
-    return promotionPatterns;
-  }
-
- private:
-  linalg::LinalgPromotionOptions options;
-};
-
-/// Vectorization transformation enqueues a particular stage-1 pattern for
-/// `LinalgVectorizationPattern<LinalgOpType>` as well as copy to vector
-/// transfer rewrite forwarding patterns.
-// TODO: variadic LinalgOpTypes.
-template <typename LinalgOpType>
-struct Vectorize : public Transformation {
-  OwningRewritePatternList buildRewritePatterns(
-      MLIRContext *context, linalg::LinalgMarker m) override {
-    OwningRewritePatternList vectorizationPatterns;
-    // FillOp may interfere with forwarding patterns atm, so we bump up the
-    // priority of LinalgCopyVTRForwardingPattern /
-    // LinalgCopyVTWForwardingPattern.
-    vectorizationPatterns
-        .insert<linalg::LinalgVectorizationPattern<LinalgOpType>>(context, m);
-    vectorizationPatterns.insert<linalg::LinalgCopyVTRForwardingPattern,
-                                 linalg::LinalgCopyVTWForwardingPattern>(
-        context, /*benefit=*/2);
-    return vectorizationPatterns;
-  }
-};
-
-/// Matmul-specific strategy object controls how a linalg.matmul is
-/// progressively lowered.
-/// The strategy uses a 3-level staged patterns strategy which allows ordering
-/// transformations by using the Linalg `applyStagedPatterns` function, where:
-///   1. The first stage consists of the successive `tile`, `promote` and
-///   `vectorize` patterns, applied sequentially.
-///   2. The second stage consists of common local canonicalization patterns
-///   that are applied eagerly after each stage-1 pattern.
-///   3. the third stage consists of more global transformation, also applied
-///   eagerly, after all stage-2 patterns. Such more global transformations
-struct MatmulCodegenStrategy {
-  /// Append a pattern to add a level of tiling for `LinalgOpType` with tiling
-  /// `options`.
-  template <typename LinalgOpType>
-  MatmulCodegenStrategy &tile(linalg::LinalgTilingOptions options) {
-    transformationSequence.emplace_back(new Tile<LinalgOpType>(options));
-    return *this;
-  }
-  /// Conditionally append a pattern to add a level of tiling for `LinalgOpType`
-  /// with tiling `options`.
-  template <typename LinalgOpType>
-  MatmulCodegenStrategy &tileIf(bool b, linalg::LinalgTilingOptions options) {
-    return b ? tile<LinalgOpType>(options) : *this;
-  }
-  /// Append a pattern to add a level of promotion for `LinalgOpType` with
-  /// promotion `options`.
-  template <typename LinalgOpType>
-  MatmulCodegenStrategy &promote(linalg::LinalgPromotionOptions options) {
-    transformationSequence.emplace_back(new Promote<LinalgOpType>(options));
-    return *this;
-  }
-  /// Conditionally append a pattern to add a level of promotion for
-  /// `LinalgOpType` with promotion `options`.
-  template <typename LinalgOpType>
-  MatmulCodegenStrategy &promoteIf(bool b,
-                                   linalg::LinalgPromotionOptions options) {
-    return b ? promote<LinalgOpType>(options) : *this;
-    return *this;
-  }
-  /// Append a pattern to rewrite `LinalgOpType` as a vector operation.
-  template <typename LinalgOpType>
-  MatmulCodegenStrategy &vectorize() {
-    transformationSequence.emplace_back(new Vectorize<LinalgOpType>());
-    return *this;
-  }
-  /// Conditionally append a pattern to rewrite `LinalgOpType` as a vector
-  /// operation.
-  template <typename LinalgOpType>
-  MatmulCodegenStrategy &vectorizeIf(bool b) {
-    return b ? vectorize<LinalgOpType>() : *this;
-    return *this;
-  }
-  /// Configure the post staged-patterns late vector transformations.
-  MatmulCodegenStrategy &setVectorTransformsOptions(
-      vector::VectorTransformsOptions options) {
-    vectorTransformsOptions = options;
-    return *this;
-  }
-  /// Configure the post staged-patterns late vector.transfer to scf conversion.
-  MatmulCodegenStrategy &setVectorTransferToSCFOptions(
-      VectorTransferToSCFOptions options) {
-    vectorToSCFOptions = options;
-    return *this;
-  }
-  /// Configure the post staged-patterns late vector.transfer to scf conversion.
-  MatmulCodegenStrategy &setHoistInvariantCode(bool b) {
-    hoistInvariantCode = b;
-    return *this;
-  }
-
-  /// Apply the transformation patterns in sequence with cleanup transformations
-  /// interleaved.
-  void transform(FuncOp func) const;
-
-  /// Set a function applying the lowering strategy. Different target need to
-  /// use different lowering.
-  MatmulCodegenStrategy &setLoweringFunction(std::function<void(FuncOp)> f) {
-    lowering = f;
-    return *this;
-  }
-
-  /// Append a pattern to unroll a `VectorOpType` to smaller vector operations.
-  template <typename VectorOpType>
-  MatmulCodegenStrategy &unrollVector(ArrayRef<int64_t> targetShape) {
-    transformationSequence.emplace_back(
-        new UnrollVector<VectorOpType>(targetShape));
-    return *this;
-  }
-  /// Conditionally append a pattern to rewrite `LinalgOpType` as a vector
-  /// operation.
-  template <typename VectorOpType>
-  MatmulCodegenStrategy &unrollVectorIf(bool b, ArrayRef<int64_t> targetShape) {
-    return b ? unrollVector<VectorOpType>(targetShape) : *this;
-    return *this;
-  }
-
-  // Enable default lowering strategy for CPU.
-  MatmulCodegenStrategy &setDefaultCPULowering();
-
- private:
-  LogicalResult postPatternTransforms(Operation *func) const;
-
-  std::function<void(FuncOp)> lowering = nullptr;
-  bool hoistInvariantCode = false;
-  vector::VectorTransformsOptions vectorTransformsOptions;
-  VectorTransferToSCFOptions vectorToSCFOptions;
-  SmallVector<std::unique_ptr<Transformation>, 4> transformationSequence;
-};
-
-/// Perform folding of chains of AffineMinOp.
-struct AffineMinCanonicalizationPattern
-    : public mlir::OpRewritePattern<mlir::AffineMinOp> {
-  using OpRewritePattern<mlir::AffineMinOp>::OpRewritePattern;
-
-  mlir::LogicalResult matchAndRewrite(
-      mlir::AffineMinOp minOp, mlir::PatternRewriter &rewriter) const override;
-};
-}  // namespace mlir
-
-#endif  // IREE_COMPILER_CONVERSION_CODEGENUTILS_MATMULCODEGENSTRATEGY_H_
diff --git a/iree/compiler/Conversion/CodegenUtils/TransformUtils.cpp b/iree/compiler/Conversion/CodegenUtils/TransformUtils.cpp
new file mode 100644
index 0000000..6aaca75
--- /dev/null
+++ b/iree/compiler/Conversion/CodegenUtils/TransformUtils.cpp
@@ -0,0 +1,166 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// -----------------------------------------------------------------------------
+// This code will be removed once this gets upstreamed to common mlir.
+// Please try to limit changes in this code only minor changes.
+
+#include "iree/compiler/Conversion/CodegenUtils/TransformUtils.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+#include "mlir/Analysis/SliceAnalysis.h"
+#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
+#include "mlir/Dialect/Affine/IR/AffineOps.h"
+#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
+#include "mlir/Dialect/Linalg/Transforms/Hoisting.h"
+#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
+#include "mlir/Dialect/Linalg/Utils/Utils.h"
+#include "mlir/Dialect/SCF/SCF.h"
+#include "mlir/Dialect/SCF/Utils.h"
+#include "mlir/Dialect/StandardOps/IR/Ops.h"
+#include "mlir/Dialect/Vector/EDSC/Intrinsics.h"
+#include "mlir/Dialect/Vector/VectorOps.h"
+#include "mlir/Dialect/Vector/VectorTransforms.h"
+#include "mlir/IR/AffineExpr.h"
+#include "mlir/IR/Attributes.h"
+#include "mlir/IR/BlockAndValueMapping.h"
+#include "mlir/IR/BuiltinTypes.h"
+#include "mlir/IR/Dominance.h"
+#include "mlir/IR/MLIRContext.h"
+#include "mlir/IR/OperationSupport.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/IR/Value.h"
+#include "mlir/IR/Visitors.h"
+#include "mlir/Pass/Pass.h"
+#include "mlir/Pass/PassManager.h"
+#include "mlir/Support/LogicalResult.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+#include "mlir/Transforms/LoopUtils.h"
+#include "mlir/Transforms/Passes.h"
+
+using namespace mlir;          // NOLINT
+using namespace mlir::linalg;  // NOLINT
+
+#define DEBUG_TYPE "linalg-transform-utils"
+
+//===----------------------------------------------------------------------===//
+// TODO: Cleanup and upstream these to go into core. Please ignore for now !
+//===----------------------------------------------------------------------===//
+/// Substitute scf.for = %lb to %ub step %step by an AffineExpr expressing:
+///   `%lb + %step * new_dim` where
+/// 1. the AffineExpr for %lb is either an AffineConstantExpr or an
+/// AffineDimExpr depending on whether the value is constant or not.
+/// 2. the AffineExpr for %step is either an AffineConstantExpr or an
+/// AffineSymbolExpr depending on whether the value is constant or not.
+///
+static void substitute(scf::ForOp forOp, SmallVectorImpl<AffineExpr> &exprs,
+                       SmallVectorImpl<Value> &dims,
+                       SmallVectorImpl<Value> &symbols) {
+  MLIRContext *ctx = forOp.getContext();
+  auto lbConstant = forOp.lowerBound().getDefiningOp<ConstantIndexOp>();
+  AffineExpr lb = lbConstant ? getAffineConstantExpr(lbConstant.getValue(), ctx)
+                             : getAffineDimExpr(dims.size(), ctx);
+
+  auto stepConstant = forOp.step().getDefiningOp<ConstantIndexOp>();
+  AffineExpr step = stepConstant
+                        ? getAffineConstantExpr(stepConstant.getValue(), ctx)
+                        : getAffineSymbolExpr(symbols.size(), ctx);
+
+  if (!lbConstant) dims.push_back(forOp.lowerBound());
+  if (!stepConstant) symbols.push_back(forOp.step());
+  exprs.push_back(lb + step * getAffineDimExpr(dims.size(), ctx));
+
+  auto ubConstant = forOp.upperBound().getDefiningOp<ConstantIndexOp>();
+  AffineExpr ub = ubConstant ? getAffineConstantExpr(ubConstant.getValue(), ctx)
+                             : getAffineDimExpr(dims.size(), ctx);
+  if (!ubConstant) dims.push_back(forOp.upperBound());
+  exprs.push_back(ub);
+
+  dims.push_back(forOp.getInductionVar());
+}
+
+/// Substitue dimensions coming from forOp or AffineMin. Return false if it has
+/// unknown dimension operands.
+static bool substitute(AffineMinOp minOp, SmallVectorImpl<AffineExpr> &exprs,
+                       SmallVectorImpl<Value> &dims,
+                       SmallVectorImpl<Value> &symbols) {
+  if (minOp.getDimOperands().empty()) return false;
+  for (Value v : minOp.getDimOperands()) {
+    if (auto forOp = scf::getForInductionVarOwner(v)) {
+      substitute(forOp, exprs, dims, symbols);
+      continue;
+    }
+    if (auto parentMinOp = v.getDefiningOp<AffineMinOp>()) {
+      substitute(parentMinOp, exprs, dims, symbols);
+      continue;
+    }
+    // If couldn't substitue the dimension give up and use the original map.
+    return false;
+  }
+  return true;
+}
+
+LogicalResult AffineMinCanonicalizationPattern::matchAndRewrite(
+    AffineMinOp minOp, PatternRewriter &rewriter) const {
+  LLVM_DEBUG(llvm::dbgs() << "\nCanonicalize AffineMin: "
+                          << *minOp.getOperation() << "\n");
+
+  int64_t min = std::numeric_limits<int64_t>::max();
+  for (auto e : minOp.map().getResults())
+    if (auto cstExpr = e.dyn_cast<AffineConstantExpr>())
+      min = std::min(min, cstExpr.getValue());
+  if (min == std::numeric_limits<int64_t>::max()) return failure();
+
+  MLIRContext *ctx = minOp.getContext();
+  AffineMap map;
+  SmallVector<Value, 4> operands;
+  SmallVector<AffineExpr, 4> exprs;
+  SmallVector<Value, 4> dims, symbols;
+  if (substitute(minOp, exprs, dims, symbols)) {
+    operands = dims;
+    operands.append(symbols.begin(), symbols.end());
+
+    map = AffineMap::get(dims.size(), symbols.size(), exprs, ctx);
+    LLVM_DEBUG(llvm::dbgs() << "Substitution map: " << map << "\n");
+  } else {
+    map = minOp.getAffineMap();
+    operands = minOp.getDimOperands();
+    operands.append(minOp.getSymbolOperands().begin(),
+                    minOp.getSymbolOperands().end());
+  }
+  SmallVector<AffineExpr, 4> modExprs;
+  for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx)
+    modExprs.push_back(getAffineDimExpr(idx, ctx) % min);
+  map = AffineMap::get(map.getNumResults(), 0, modExprs, ctx).compose(map);
+  canonicalizeMapAndOperands(&map, &operands);
+  map = simplifyAffineMap(map);
+
+  LLVM_DEBUG(llvm::dbgs() << "Post mod: " << map << "\n";
+             llvm::interleaveComma(operands, llvm::dbgs()));
+
+  if (!llvm::all_of(map.getResults(), [](AffineExpr e) {
+        if (auto cst = e.dyn_cast<AffineConstantExpr>())
+          return cst.getValue() == 0;
+        return false;
+      }))
+    return failure();
+
+  rewriter.replaceOpWithNewOp<ConstantIndexOp>(minOp, min);
+  return success();
+}
+//===----------------------------------------------------------------------===//
+// END TODO
+//===----------------------------------------------------------------------===//
diff --git a/iree/compiler/Conversion/CodegenUtils/TransformUtils.h b/iree/compiler/Conversion/CodegenUtils/TransformUtils.h
new file mode 100644
index 0000000..5648fcb
--- /dev/null
+++ b/iree/compiler/Conversion/CodegenUtils/TransformUtils.h
@@ -0,0 +1,39 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef IREE_COMPILER_CONVERSION_CODEGENUTILS_TRANSFORMUTILS_H_
+#define IREE_COMPILER_CONVERSION_CODEGENUTILS_TRANSFORMUTILS_H_
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
+#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
+#include "mlir/Dialect/Vector/VectorOps.h"
+#include "mlir/Dialect/Vector/VectorTransforms.h"
+#include "mlir/IR/BuiltinOps.h"
+#include "mlir/Support/LLVM.h"
+
+namespace mlir {
+
+/// Perform folding of chains of AffineMinOp.
+struct AffineMinCanonicalizationPattern
+    : public mlir::OpRewritePattern<mlir::AffineMinOp> {
+  using OpRewritePattern<mlir::AffineMinOp>::OpRewritePattern;
+
+  mlir::LogicalResult matchAndRewrite(
+      mlir::AffineMinOp minOp, mlir::PatternRewriter &rewriter) const override;
+};
+}  // namespace mlir
+
+#endif  // IREE_COMPILER_CONVERSION_CODEGENUTILS_TRANSFORMUTILS_H_
diff --git a/iree/compiler/Conversion/Common/Transforms.cpp b/iree/compiler/Conversion/Common/Transforms.cpp
index f82b904..d5f3573 100644
--- a/iree/compiler/Conversion/Common/Transforms.cpp
+++ b/iree/compiler/Conversion/Common/Transforms.cpp
@@ -23,11 +23,12 @@
 #include "iree/compiler/Conversion/CodegenUtils/FunctionUtils.h"
 #include "iree/compiler/Conversion/CodegenUtils/GetNumWorkgroups.h"
 #include "iree/compiler/Conversion/CodegenUtils/MarkerUtils.h"
-#include "iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h"
+#include "iree/compiler/Conversion/CodegenUtils/TransformUtils.h"
 #include "iree/compiler/Conversion/Common/Attributes.h"
 #include "mlir/Dialect/GPU/GPUDialect.h"
 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
+#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h"
 #include "mlir/Dialect/Linalg/Utils/Utils.h"
 #include "mlir/IR/BuiltinOps.h"
 #include "mlir/IR/PatternMatch.h"
diff --git a/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndDistributeOnTensorsPass.cpp b/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndDistributeOnTensorsPass.cpp
index d156538..fa52276 100644
--- a/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndDistributeOnTensorsPass.cpp
+++ b/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndDistributeOnTensorsPass.cpp
@@ -13,9 +13,10 @@
 // limitations under the License.
 
 #include "iree/compiler/Conversion/CodegenUtils/MarkerUtils.h"
-#include "iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h"
+#include "iree/compiler/Conversion/CodegenUtils/TransformUtils.h"
 #include "iree/compiler/Dialect/HAL/IR/HALDialect.h"
 #include "iree/compiler/Dialect/HAL/IR/HALOps.h"
+#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h"
 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
 #include "mlir/IR/Builders.h"
 #include "mlir/IR/MLIRContext.h"
@@ -50,7 +51,7 @@
     : public linalg::LinalgBaseTilingPattern {
   using Base = linalg::LinalgBaseTilingPattern;
   TileAndDistributeOnTensorsPattern(linalg::LinalgTilingOptions options,
-                                    linalg::LinalgMarker marker,
+                                    linalg::LinalgTransformationFilter marker,
                                     PatternBenefit benefit = 1)
       : Base(options, marker, benefit) {}
 
@@ -108,8 +109,9 @@
     // SPMD loops.
     patterns.insert<TileAndDistributeOnTensorsPattern>(
         linalgTilingOptions,
-        linalg::LinalgMarker(ArrayRef<Identifier>(),
-                             Identifier::get(getWorkgroupMarker(), context)));
+        linalg::LinalgTransformationFilter(
+            ArrayRef<Identifier>(),
+            Identifier::get(getWorkgroupMarker(), context)));
     // Add canonicalization patterns.
     linalg::populateLinalgTilingCanonicalizationPatterns(patterns, context);
     patterns.insert<AffineMinCanonicalizationPattern>(context);
diff --git a/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndDistributePass.cpp b/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndDistributePass.cpp
index 547ece7..cd1f2b5 100644
--- a/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndDistributePass.cpp
+++ b/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndDistributePass.cpp
@@ -15,12 +15,12 @@
 #include "iree/compiler/Conversion/CodegenUtils/FunctionUtils.h"
 #include "iree/compiler/Conversion/CodegenUtils/GetNumWorkgroups.h"
 #include "iree/compiler/Conversion/CodegenUtils/MarkerUtils.h"
-#include "iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h"
 #include "iree/compiler/Conversion/Common/Attributes.h"
 #include "iree/compiler/Conversion/Common/Transforms.h"
 #include "iree/compiler/Conversion/LinalgToLLVM/KernelDispatch.h"
 #include "iree/compiler/Dialect/HAL/IR/HALDialect.h"
 #include "iree/compiler/Dialect/HAL/IR/HALOps.h"
+#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h"
 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
 #include "mlir/IR/Builders.h"
 #include "mlir/IR/Matchers.h"
diff --git a/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndVectorizePass.cpp b/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndVectorizePass.cpp
index 98777c1..6e6a540 100644
--- a/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndVectorizePass.cpp
+++ b/iree/compiler/Conversion/LinalgToLLVM/LinalgTileAndVectorizePass.cpp
@@ -13,9 +13,10 @@
 // limitations under the License.
 
 #include "iree/compiler/Conversion/CodegenUtils/MarkerUtils.h"
-#include "iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h"
+#include "iree/compiler/Conversion/CodegenUtils/TransformUtils.h"
 #include "iree/compiler/Conversion/LinalgToLLVM/KernelDispatch.h"
 #include "mlir/Conversion/StandardToSPIRV/StandardToSPIRV.h"
+#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h"
 #include "mlir/Dialect/Linalg/Transforms/Hoisting.h"
 #include "mlir/IR/PatternMatch.h"
 #include "mlir/Pass/Pass.h"
@@ -31,7 +32,8 @@
 struct TileWorkgroups : public linalg::LinalgBaseTilingPattern {
   using Base = linalg::LinalgBaseTilingPattern;
   TileWorkgroups(MLIRContext *context, linalg::LinalgTilingOptions options,
-                 linalg::LinalgMarker marker, PatternBenefit benefit = 1)
+                 linalg::LinalgTransformationFilter marker,
+                 PatternBenefit benefit = 1)
       : Base(LinalgOpTy::getOperationName(), context, options, marker,
              benefit) {}
   LogicalResult matchAndRewrite(Operation *op,
@@ -77,7 +79,7 @@
               return TileSizeFn::get<TilingLevel::Level1Tiles>(
                   cpuKernelDispatch, builder, operation);
             }),
-        linalg::LinalgMarker(
+        linalg::LinalgTransformationFilter(
             Identifier::get(getWorkgroupMarker(), context),
             Identifier::get(getWorkgroupL1TileMarker(), context)));
 
@@ -96,7 +98,7 @@
               return TileSizeFn::get<TilingLevel::Level2Tiles>(
                   cpuKernelDispatch, builder, operation);
             }),
-        linalg::LinalgMarker(
+        linalg::LinalgTransformationFilter(
             Identifier::get(getWorkgroupL1TileMarker(), context),
             Identifier::get(getVectorizeMarker(), context)));
 
@@ -120,8 +122,9 @@
     vectorizationPatterns
         .insert<linalg::LinalgVectorizationPattern<linalg::MatmulOp>,
                 linalg::LinalgVectorizationPattern<linalg::BatchMatmulOp>>(
-            context, linalg::LinalgMarker(
-                         Identifier::get(getVectorizeMarker(), context)));
+            context, linalg::LinalgVectorizationOptions(),
+            linalg::LinalgTransformationFilter(
+                Identifier::get(getVectorizeMarker(), context)));
     applyPatternsAndFoldGreedily(funcOp, std::move(vectorizationPatterns));
   }
 
diff --git a/iree/compiler/Conversion/LinalgToLLVM/LinalgVectorizePass.cpp b/iree/compiler/Conversion/LinalgToLLVM/LinalgVectorizePass.cpp
index 00295e7..ea17d54 100644
--- a/iree/compiler/Conversion/LinalgToLLVM/LinalgVectorizePass.cpp
+++ b/iree/compiler/Conversion/LinalgToLLVM/LinalgVectorizePass.cpp
@@ -62,8 +62,9 @@
         .insert<linalg::LinalgVectorizationPattern<linalg::MatmulOp>,
                 linalg::LinalgVectorizationPattern<linalg::BatchMatmulOp>,
                 linalg::LinalgVectorizationPattern<linalg::GenericOp>>(
-            context, linalg::LinalgMarker(ArrayRef<Identifier>(
-                         Identifier::get(getWorkgroupMarker(), context))));
+            context, linalg::LinalgVectorizationOptions(),
+            linalg::LinalgTransformationFilter(ArrayRef<Identifier>(
+                Identifier::get(getWorkgroupMarker(), context))));
     applyPatternsAndFoldGreedily(funcOp, std::move(vectorizationPatterns));
 
     LLVM_DEBUG({
diff --git a/iree/compiler/Conversion/LinalgToLLVM/PlanConvLoopOrder.cpp b/iree/compiler/Conversion/LinalgToLLVM/PlanConvLoopOrder.cpp
index db898a4..5154426 100644
--- a/iree/compiler/Conversion/LinalgToLLVM/PlanConvLoopOrder.cpp
+++ b/iree/compiler/Conversion/LinalgToLLVM/PlanConvLoopOrder.cpp
@@ -38,10 +38,10 @@
   auto context = funcOp.getContext();
 
   auto marker = Identifier::get("generalized_from_conv", context);
-  linalg::LinalgMarker firstStepMarker(
+  linalg::LinalgTransformationFilter firstStepMarker(
       /*matchDisjunction=*/ArrayRef<Identifier>(),
       /*replacement=*/marker);
-  linalg::LinalgMarker secondStepMarker(
+  linalg::LinalgTransformationFilter secondStepMarker(
       /*matchDisjunction=*/marker,
       /*replacement=*/llvm::None);
 
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp b/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp
index 0ee0b0f..735b91f 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/LinalgTileAndFusePass.cpp
@@ -57,15 +57,15 @@
 //===----------------------------------------------------------------------===//
 
 /// Returns a Linalg marker that replaces existing markers.
-linalg::LinalgMarker getLinalgReplaceMarker(StringRef maker,
-                                            MLIRContext *context) {
-  return linalg::LinalgMarker(ArrayRef<Identifier>(),
-                              Identifier::get(maker, context));
+linalg::LinalgTransformationFilter getLinalgReplaceMarker(
+    StringRef maker, MLIRContext *context) {
+  return linalg::LinalgTransformationFilter(ArrayRef<Identifier>(),
+                                            Identifier::get(maker, context));
 }
 
 /// Returns a Linalg marker that matches any of the `matchMarkers` and replaces
 /// it with `replaceMarker`.
-linalg::LinalgMarker getLinalgMatchAndReplaceMarker(
+linalg::LinalgTransformationFilter getLinalgMatchAndReplaceMarker(
     ArrayRef<StringRef> matchMarkers, StringRef replaceMarker,
     MLIRContext *context) {
   SmallVector<Identifier, 2> markers;
@@ -73,7 +73,8 @@
   for (StringRef marker : matchMarkers) {
     markers.emplace_back(Identifier::get(marker, context));
   }
-  return linalg::LinalgMarker(markers, Identifier::get(replaceMarker, context));
+  return linalg::LinalgTransformationFilter(
+      markers, Identifier::get(replaceMarker, context));
 }
 
 /// Returns the distribution options for operations when targeting workgroups.
@@ -138,7 +139,7 @@
     : public linalg::LinalgPromotionPattern<linalg::MatmulOp> {
   PromoteMatmulSubviewsPattern(MLIRContext *context,
                                linalg::LinalgPromotionOptions options,
-                               linalg::LinalgMarker marker,
+                               linalg::LinalgTransformationFilter marker,
                                PatternBenefit benefit = 1)
       : linalg::LinalgPromotionPattern<linalg::MatmulOp>(
             context,
@@ -163,7 +164,7 @@
     : public linalg::LinalgPromotionPattern<linalg::ConvOp> {
   PromoteConvSubviewsPattern(MLIRContext *context,
                              linalg::LinalgPromotionOptions options,
-                             linalg::LinalgMarker marker,
+                             linalg::LinalgTransformationFilter marker,
                              PatternBenefit benefit = 1)
       : linalg::LinalgPromotionPattern<linalg::ConvOp>(
             context,
@@ -216,7 +217,7 @@
   using Base = linalg::LinalgTilingPattern<linalg::MatmulOp>;
   TileMatmulSubgroupPattern(MLIRContext *context,
                             linalg::LinalgTilingOptions options,
-                            linalg::LinalgMarker marker,
+                            linalg::LinalgTransformationFilter marker,
                             PatternBenefit benefit = 1)
       : Base(context, options, marker, benefit) {}
 };
@@ -329,8 +330,9 @@
                   linalg::LinalgVectorizationPattern<linalg::BatchMatmulOp>,
                   linalg::LinalgVectorizationPattern<linalg::FillOp>,
                   linalg::LinalgVectorizationPattern<linalg::GenericOp>>(
-      context,
-      linalg::LinalgMarker(Identifier::get(getVectorizeMarker(), context)));
+      context, linalg::LinalgVectorizationOptions(),
+      linalg::LinalgTransformationFilter(
+          Identifier::get(getVectorizeMarker(), context)));
 }
 
 //====---------------------------------------------------------------------===//
@@ -387,10 +389,10 @@
 // Patterns to tile convolution window dimensions
 //====---------------------------------------------------------------------===//
 
-static void populateTilingConvFilterPatterns(MLIRContext *context,
-                                             OwningRewritePatternList &patterns,
-                                             const LaunchConfig &launchConfig,
-                                             linalg::LinalgMarker marker) {
+static void populateTilingConvFilterPatterns(
+    MLIRContext *context, OwningRewritePatternList &patterns,
+    const LaunchConfig &launchConfig,
+    linalg::LinalgTransformationFilter marker) {
   auto getTileSizeFn = [&launchConfig](OpBuilder &builder, Operation *op) {
     SmallVector<Value, 4> tileSizes;
     ArrayRef<int64_t> fourthLevel = launchConfig.getTileSizes(op, 3);
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/MatMulVectorizationTest.cpp b/iree/compiler/Conversion/LinalgToSPIRV/MatMulVectorizationTest.cpp
index 9183933..2bd8f94 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/MatMulVectorizationTest.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/MatMulVectorizationTest.cpp
@@ -11,7 +11,7 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
-#include "iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h"
+#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h"
 #include "mlir/IR/Builders.h"
 #include "mlir/Pass/Pass.h"
 #include "mlir/Pass/PassRegistry.h"
@@ -46,7 +46,7 @@
   FuncOp fn = getFunction();
   SmallVector<uint32_t, 3> vUnrollSize(unrollSize.begin(), unrollSize.end());
   if (vUnrollSize.size() != 3) signalPassFailure();
-  MatmulCodegenStrategy strategy;
+  linalg::CodegenStrategy strategy;
   strategy
       .tile<linalg::MatmulOp>(
           linalg::LinalgTilingOptions()
@@ -55,9 +55,11 @@
               //.setLoopType(linalg::LinalgTilingLoopType::ParallelLoops)
               .setTileSizes({wgTileSize, wgTileSize, wgTileSize}))
       .setHoistInvariantCode(enableLICM)
-      .vectorize<linalg::MatmulOp>()
-      .unrollVector<vector::ContractionOp>(
-          {vUnrollSize[0], vUnrollSize[1], vUnrollSize[2]});
+      .vectorize<linalg::LinalgOp>()
+      // TODO upstream to the core CodegenStrategy
+      // .unrollVector<vector::ContractionOp>(
+      //     {vUnrollSize[0], vUnrollSize[1], vUnrollSize[2]})
+      ;
   strategy.transform(fn);
 }
 
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/VectorToGPUPass.cpp b/iree/compiler/Conversion/LinalgToSPIRV/VectorToGPUPass.cpp
index b2d378b..8574774 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/VectorToGPUPass.cpp
+++ b/iree/compiler/Conversion/LinalgToSPIRV/VectorToGPUPass.cpp
@@ -22,13 +22,14 @@
 
 #include "iree/compiler/Conversion/CodegenUtils/FunctionUtils.h"
 #include "iree/compiler/Conversion/CodegenUtils/MarkerUtils.h"
-#include "iree/compiler/Conversion/CodegenUtils/MatmulCodegenStrategy.h"
+#include "iree/compiler/Conversion/CodegenUtils/TransformUtils.h"
 #include "iree/compiler/Conversion/LinalgToSPIRV/CooperativeMatrixAnalysis.h"
 #include "iree/compiler/Conversion/LinalgToSPIRV/Passes.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Support/FormatVariadic.h"
 #include "mlir/Dialect/GPU/GPUDialect.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
+#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h"
 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
 #include "mlir/Dialect/SCF/SCF.h"
 #include "mlir/Dialect/SPIRV/IR/TargetAndABI.h"
@@ -195,8 +196,9 @@
   OwningRewritePatternList vectorizationPatterns;
   vectorizationPatterns
       .insert<linalg::LinalgVectorizationPattern<linalg::CopyOp>>(
-          context, linalg::LinalgMarker(
-                       Identifier::get(getVectorizeMarker(), context), {}));
+          context, linalg::LinalgVectorizationOptions(),
+          linalg::LinalgTransformationFilter(
+              Identifier::get(getVectorizeMarker(), context), {}));
   applyPatternsAndFoldGreedily(funcOp, std::move(vectorizationPatterns));
 }
 
diff --git a/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp b/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp
index 0fe9904..d001fa8 100644
--- a/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp
@@ -234,7 +234,7 @@
     : public linalg::LinalgBaseTilingPattern {
   using Base = linalg::LinalgBaseTilingPattern;
   TileAndDistributeOnTensorsPattern(linalg::LinalgTilingOptions options,
-                                    linalg::LinalgMarker marker,
+                                    linalg::LinalgTransformationFilter marker,
                                     PatternBenefit benefit = 1)
       : Base(options, marker, benefit) {}
 
@@ -418,8 +418,8 @@
   patterns.insert<TileAndDistributeOnTensorsPattern>(
       linalgTilingOptions,
       // TODO(nicolavasilache): use refactored `getWorkgroupMarker()`
-      linalg::LinalgMarker(ArrayRef<Identifier>(),
-                           Identifier::get("workgroup", context)));
+      linalg::LinalgTransformationFilter(
+          ArrayRef<Identifier>(), Identifier::get("workgroup", context)));
 
   // Add canonicalization patterns.
   linalg::populateLinalgTilingCanonicalizationPatterns(patterns, context);
diff --git a/iree/compiler/Dialect/HAL/IR/HALOps.td b/iree/compiler/Dialect/HAL/IR/HALOps.td
index 1b0bcb1..beb1a72 100644
--- a/iree/compiler/Dialect/HAL/IR/HALOps.td
+++ b/iree/compiler/Dialect/HAL/IR/HALOps.td
@@ -2334,7 +2334,7 @@
 
     /// Return the expected rank of each of the`static_offsets`, `static_sizes`
     /// and `static_strides` attributes.
-    std::array<unsigned, 3> getArrayAttrRanks() {
+    std::array<unsigned, 3> getArrayAttrMaxRanks() {
       unsigned resultRank = getResult().getType().cast<ShapedType>().getRank();
       return {resultRank, resultRank, resultRank};
     }
@@ -2412,7 +2412,7 @@
 
     /// Return the expected rank of each of the`static_offsets`, `static_sizes`
     /// and `static_strides` attributes.
-    std::array<unsigned, 3> getArrayAttrRanks() {
+    std::array<unsigned, 3> getArrayAttrMaxRanks() {
       unsigned rank = operand().getType().cast<ShapedType>().getRank();
       return {rank, rank, rank};
     }
diff --git a/third_party/llvm-bazel b/third_party/llvm-bazel
index 5e89086..77871f4 160000
--- a/third_party/llvm-bazel
+++ b/third_party/llvm-bazel
@@ -1 +1 @@
-Subproject commit 5e89086c53b991b0c72bd009e7fe852789967868
+Subproject commit 77871f43e449ad492bf8b94dee453670ac15e158
diff --git a/third_party/llvm-project b/third_party/llvm-project
index c85b6bf..b92a39a 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit c85b6bf33c473633c9873b600f8a31fa55464e1e
+Subproject commit b92a39ac1319c796777bca19a3af2856acbc69c1
diff --git a/third_party/mlir-hlo b/third_party/mlir-hlo
index 30ce827..2b72ddc 160000
--- a/third_party/mlir-hlo
+++ b/third_party/mlir-hlo
@@ -1 +1 @@
-Subproject commit 30ce82790d4ffef48f70e99a6f96f13ddbe857d8
+Subproject commit 2b72ddc6b2b4d670bcd1ffa3f4652468b419f986
diff --git a/third_party/tensorflow b/third_party/tensorflow
index 9fd861b..16613a7 160000
--- a/third_party/tensorflow
+++ b/third_party/tensorflow
@@ -1 +1 @@
-Subproject commit 9fd861b9a1d777daef138a2e06d7f50b7ea63744
+Subproject commit 16613a70ef36b103e7c1ffa903d541814b62c109