[spirv] Delete experimental matmul vectorization code (#5638)
It is not integrated into the main SPIR-V pipeline and not
really used.
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/BUILD b/iree/compiler/Conversion/LinalgToSPIRV/BUILD
index 7cbc3cc..936f3b6 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/BUILD
+++ b/iree/compiler/Conversion/LinalgToSPIRV/BUILD
@@ -40,7 +40,6 @@
"FoldGPUProcessorIDUses.cpp",
"KernelDispatchUtils.cpp",
"LinalgTileAndDistributePass.cpp",
- "MatMulVectorizationTest.cpp",
"Passes.cpp",
"SplitDispatchFunctionPass.cpp",
"TileAndVectorizeInOneWorkgroupPass.cpp",
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/CMakeLists.txt b/iree/compiler/Conversion/LinalgToSPIRV/CMakeLists.txt
index ea663f5..c4e1d0d 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/CMakeLists.txt
+++ b/iree/compiler/Conversion/LinalgToSPIRV/CMakeLists.txt
@@ -37,7 +37,6 @@
"FoldGPUProcessorIDUses.cpp"
"KernelDispatchUtils.cpp"
"LinalgTileAndDistributePass.cpp"
- "MatMulVectorizationTest.cpp"
"Passes.cpp"
"SplitDispatchFunctionPass.cpp"
"TileAndVectorizeInOneWorkgroupPass.cpp"
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/MatMulVectorizationTest.cpp b/iree/compiler/Conversion/LinalgToSPIRV/MatMulVectorizationTest.cpp
deleted file mode 100644
index 295a48e..0000000
--- a/iree/compiler/Conversion/LinalgToSPIRV/MatMulVectorizationTest.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h"
-#include "mlir/IR/Builders.h"
-#include "mlir/Pass/Pass.h"
-#include "mlir/Pass/PassRegistry.h"
-
-static llvm::cl::opt<int> wgTileSize(
- "iree-codegen-linalg-to-gpu-wg-tile-size",
- llvm::cl::desc(
- "Specify the size of workgroup tile for matmul vector lowering"),
- llvm::cl::init(32));
-
-static llvm::cl::list<uint32_t> unrollSize(
- "iree-codegen-linalg-to-gpu-unroll-size",
- llvm::cl::desc("Specify the size of the "), llvm::cl::CommaSeparated);
-
-static llvm::cl::opt<bool> enableLICM(
- "iree-codegen-linalg-to-gpu-matmul-licm",
- llvm::cl::desc(
- "If true run LICM and hoisting passes after the staged transforms"),
- llvm::cl::init(true));
-
-namespace mlir {
-namespace iree_compiler {
-
-namespace {
-struct MatMulTileAndVectorizeGPUPass
- : PassWrapper<MatMulTileAndVectorizeGPUPass, FunctionPass> {
- void runOnFunction() override;
-};
-} // namespace
-
-void MatMulTileAndVectorizeGPUPass::runOnFunction() {
- FuncOp fn = getFunction();
- SmallVector<uint32_t, 3> vUnrollSize(unrollSize.begin(), unrollSize.end());
- if (vUnrollSize.size() != 3) signalPassFailure();
- linalg::CodegenStrategy strategy;
- strategy
- .tile<linalg::MatmulOp>(
- linalg::LinalgTilingOptions()
- // TODO(thomasraoux): Enable parallel loops once affine.min
- // canonicalize supports it.
- //.setLoopType(linalg::LinalgTilingLoopType::ParallelLoops)
- .setTileSizes({wgTileSize, wgTileSize, wgTileSize}))
- .setEnableLICM(enableLICM)
- .vectorize<linalg::MatmulOp>()
- // TODO upstream to the core CodegenStrategy
- // .unrollVector<vector::ContractionOp>(
- // {vUnrollSize[0], vUnrollSize[1], vUnrollSize[2]})
- ;
- strategy.transform(fn);
-}
-
-std::unique_ptr<FunctionPass> createMatMulTileAndVectorizeGPUPass() {
- return std::make_unique<MatMulTileAndVectorizeGPUPass>();
-}
-
-static PassRegistration<MatMulTileAndVectorizeGPUPass> pass(
- "iree-codegen-linalg-to-gpu-matmul-vectorization-pass",
- "Tile and vectorize linalg.matmul operation",
- [] { return std::make_unique<MatMulTileAndVectorizeGPUPass>(); });
-
-} // namespace iree_compiler
-} // namespace mlir
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/Passes.h b/iree/compiler/Conversion/LinalgToSPIRV/Passes.h
index 7214ed8..2faa5b8 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/Passes.h
+++ b/iree/compiler/Conversion/LinalgToSPIRV/Passes.h
@@ -63,9 +63,6 @@
/// cooperative matrix ops when possible.
std::unique_ptr<FunctionPass> createConvertVectorToCooperativeMatrixPass();
-/// Pass to apply tiling and vectorization transformations on linagl::MatMulOp.
-std::unique_ptr<FunctionPass> createMatMulTileAndVectorizeGPUPass();
-
/// Converts memref of scalar to memref of vector of efficent size. This will
/// allow to convert memory accesses to vector load/store in SPIR-V without
/// having pointer bitcast.
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/BUILD b/iree/compiler/Conversion/LinalgToSPIRV/test/BUILD
index 2ad8790..01321c5 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/BUILD
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/BUILD
@@ -36,7 +36,6 @@
"forop_canonicalization.mlir",
"materialize_launch_configuration.mlir",
"materialize_launch_configuration2.mlir",
- "matmul_vectorization_licm.mlir",
"pipeline_matmul_cooperative_matrix.mlir",
"pipeline_matmul_vectorization.mlir",
"promote_workgroup_memory.mlir",
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/CMakeLists.txt b/iree/compiler/Conversion/LinalgToSPIRV/test/CMakeLists.txt
index 2d60bba..fc9cf49 100644
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/CMakeLists.txt
+++ b/iree/compiler/Conversion/LinalgToSPIRV/test/CMakeLists.txt
@@ -23,7 +23,6 @@
"forop_canonicalization.mlir"
"materialize_launch_configuration.mlir"
"materialize_launch_configuration2.mlir"
- "matmul_vectorization_licm.mlir"
"pipeline_matmul_cooperative_matrix.mlir"
"pipeline_matmul_vectorization.mlir"
"promote_workgroup_memory.mlir"
diff --git a/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_vectorization_licm.mlir b/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_vectorization_licm.mlir
deleted file mode 100644
index 223fe2b..0000000
--- a/iree/compiler/Conversion/LinalgToSPIRV/test/matmul_vectorization_licm.mlir
+++ /dev/null
@@ -1,21 +0,0 @@
-// RUN: iree-opt --iree-codegen-linalg-to-gpu-matmul-vectorization-pass
-// RUN: -split-input-file %s --iree-codegen-linalg-to-gpu-unroll-size=8,8,32 \
-// RUN: -iree-codegen-linalg-to-gpu-matmul-licm | IreeFileCheck %s
-
-// CHECK-LABEL: func @matmul_128x128x128
-// CHECK-SAME: (%[[ARG0:.+]]: memref<128x128xf32>, %[[ARG1:.+]]: memref<128x128xf32>, %[[ARG2:.+]]: memref<128x128xf32>)
-func @matmul_128x128x128(%arg0 : memref<128x128xf32>, %arg1: memref<128x128xf32>, %arg2: memref<128x128xf32>) {
- linalg.matmul ins(%arg0, %arg1 : memref<128x128xf32>, memref<128x128xf32>) outs(%arg2 : memref<128x128xf32>)
- return
-}
-
-// CHECK-DAG: %[[TILESIZE:.+]] = constant 32 : index
-// CHECK-DAG: %[[MATSIZE:.+]] = constant 128 : index
-// CHECK-DAG: %[[START:.+]] = constant 0 : index
-// CHECK: scf.for %[[IL:.+]] = %[[START]] to %[[MATSIZE]] step %[[TILESIZE]]
-// CHECK: scf.for %[[JL:.+]] = %[[START]] to %[[MATSIZE]] step %[[TILESIZE]]
-// CHECK: %[[SUBVVIEWC:.+]] = memref.subview %[[ARG2]][%[[IL]], %[[JL]]] [32, 32] [1, 1] : memref<128x128xf32> to memref<32x32xf32
-// CHECK: scf.for %[[KL:.+]] = %[[START]] to %[[MATSIZE]] step %[[TILESIZE]]
-// CHECK: %[[SUBVVIEWA:.+]] = memref.subview %[[ARG0]][%[[IL]], %[[KL]]] [32, 32] [1, 1] : memref<128x128xf32> to memref<32x32xf32
-// CHECK: %[[SUBVVIEWB:.+]] = memref.subview %[[ARG1]][%[[KL]], %[[JL]]] [32, 32] [1, 1] : memref<128x128xf32> to memref<32x32xf32
-
diff --git a/iree/compiler/Conversion/init_conversions.h b/iree/compiler/Conversion/init_conversions.h
index 9220eb6..f37e08f 100644
--- a/iree/compiler/Conversion/init_conversions.h
+++ b/iree/compiler/Conversion/init_conversions.h
@@ -70,7 +70,6 @@
createTileAndVectorizeInOneWorkgroupPass(SPIRVCodegenOptions());
createSplitDispatchFunctionPass();
createVectorToGPUPass();
- createMatMulTileAndVectorizeGPUPass();
createVectorizeMemrefLoadStorePass();
return true;
}();