[ModelBuilder] Add benchmark of vector.contract -> LLVM matrix intrinsics
This revision creates a new benchmark for column major matrix multiplication expressed as a vector.contract and lowering through LLVM intrinsics.
This also disables running benchmarks as tests since they are flaky (timeouts may occur).
For this new benchmark I see the following numbers:
```
Run on (72 X 3700 MHz CPU s)
CPU Caches:
L1 Data 32K (x36)
L1 Instruction 32K (x36)
L2 Unified 1024K (x36)
L3 Unified 25344K (x2)
Load Average: 0.79, 1.02, 0.82
***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead.
----------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations UserCounters...
----------------------------------------------------------------------------------------------------------
BM_MxMColMajorVectors<1, 1, 1, true, false> 6949471 ns 6942194 ns 99 NumMxM/Iter=1000
BM_MxMColMajorVectors<1, 1, 1, true, true> 6840072 ns 6839872 ns 102 NumMxM/Iter=1000
BM_MxMColMajorVectors<1, 1, 1, false, false> 1745 ns 1745 ns 402472 NumMxM/Iter=1000
BM_MxMColMajorVectors<1, 1, 1, false, true> 1763 ns 1763 ns 401647 NumMxM/Iter=1000
BM_MxMColMajorVectors<2, 2, 2, true, false> 9772526 ns 9771891 ns 72 NumMxM/Iter=1000
BM_MxMColMajorVectors<2, 2, 2, true, true> 8497043 ns 8496864 ns 82 NumMxM/Iter=1000
BM_MxMColMajorVectors<2, 2, 2, false, false> 4005 ns 4005 ns 174149 NumMxM/Iter=1000
BM_MxMColMajorVectors<2, 2, 2, false, true> 2068 ns 2067 ns 339627 NumMxM/Iter=1000
BM_MxMColMajorVectors<4, 4, 4, true, false> 19025263 ns 19023626 ns 37 NumMxM/Iter=1000
BM_MxMColMajorVectors<4, 4, 4, true, true> 12758627 ns 12757435 ns 55 NumMxM/Iter=1000
BM_MxMColMajorVectors<4, 4, 4, false, false> 23200 ns 23198 ns 29839 NumMxM/Iter=1000
BM_MxMColMajorVectors<4, 4, 4, false, true> 5125 ns 5125 ns 136724 NumMxM/Iter=1000
BM_MxMColMajorVectors<8, 8, 8, true, false> 33859980 ns 33856718 ns 21 NumMxM/Iter=1000
BM_MxMColMajorVectors<8, 8, 8, true, true> 32033194 ns 32030923 ns 22 NumMxM/Iter=1000
BM_MxMColMajorVectors<8, 8, 8, false, false> 192196 ns 192182 ns 3631 NumMxM/Iter=1000
BM_MxMColMajorVectors<8, 8, 8, false, true> 21039 ns 21037 ns 33215 NumMxM/Iter=1000
BM_MxMColMajorVectors<16, 16, 16, true, false> 250724875 ns 250700891 ns 3 NumMxM/Iter=1000
BM_MxMColMajorVectors<16, 16, 16, true, true> 162891413 ns 162871163 ns 4 NumMxM/Iter=1000
BM_MxMColMajorVectors<16, 16, 16, false, false> 1993352 ns 1993182 ns 341 NumMxM/Iter=1000
BM_MxMColMajorVectors<16, 16, 16, false, true> 243686 ns 243674 ns 2804 NumMxM/Iter=1000
```
PiperOrigin-RevId: 301858301
diff --git a/experimental/ModelBuilder/BUILD b/experimental/ModelBuilder/BUILD
index 11dab7f..e12c7d6 100644
--- a/experimental/ModelBuilder/BUILD
+++ b/experimental/ModelBuilder/BUILD
@@ -31,6 +31,7 @@
"@llvm-project//mlir:LinalgOps",
"@llvm-project//mlir:LinalgTransforms",
"@llvm-project//mlir:LoopOps",
+ "@llvm-project//mlir:LoopOpsTransforms",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:VectorOps",
],
@@ -62,6 +63,7 @@
"@llvm-project//mlir:TargetLLVMIR",
"@llvm-project//mlir:TransformUtils",
"@llvm-project//mlir:VectorOps",
+ "@llvm-project//mlir:VectorToLLVM",
"@llvm-project//mlir:mlir_c_runner_utils",
],
)
diff --git a/experimental/ModelBuilder/ModelRunner.cpp b/experimental/ModelBuilder/ModelRunner.cpp
index fd26c3b..e78d2f9 100644
--- a/experimental/ModelBuilder/ModelRunner.cpp
+++ b/experimental/ModelBuilder/ModelRunner.cpp
@@ -17,8 +17,8 @@
#include "llvm/Support/TargetSelect.h"
#include "mlir/Conversion/LinalgToLLVM/LinalgToLLVM.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
+#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
#include "mlir/Dialect/Linalg/Passes.h"
-#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/ExecutionEngine/ExecutionEngine.h"
#include "mlir/ExecutionEngine/OptUtils.h"
#include "mlir/IR/PatternMatch.h"
@@ -35,7 +35,11 @@
};
static LLVMInitializer initializer;
-void mlir::ModelRunner::compile(int llvmOptLevel, int llcOptLevel,
+namespace llvm {
+extern Pass* createLowerMatrixIntrinsicsPass();
+} // end namespace llvm
+
+void mlir::ModelRunner::compile(CompilationOptions compilationOptions,
const std::string& runtime) {
// Lower vector operations progressively into more elementary
// vector operations before running the regular compiler passes.
@@ -43,8 +47,9 @@
OwningRewritePatternList patterns;
vector::populateVectorSlicesLoweringPatterns(patterns,
module->getContext());
- vector::populateVectorContractLoweringPatterns(patterns,
- module->getContext());
+ vector::populateVectorContractLoweringPatterns(
+ patterns, module->getContext(),
+ compilationOptions.vectorTransformsOptions);
mlir::applyPatternsGreedily(*module, patterns);
}
@@ -52,6 +57,7 @@
PassManager manager(module->getContext());
manager.addPass(mlir::createConvertLinalgToLoopsPass());
manager.addPass(mlir::createConvertLinalgToLLVMPass());
+ manager.addPass(mlir::createConvertVectorToLLVMPass());
manager.addPass(mlir::createLowerToLLVMPass());
if (failed(manager.run(*module))) {
llvm::errs() << "conversion to the LLVM IR dialect failed\n";
@@ -67,10 +73,14 @@
if (!tmOrError) llvm::errs() << tmOrError.takeError() << "\n";
assert(tmOrError);
targetMachine = std::move(tmOrError.get());
+ // TODO(ntv): Looking up the pass by name fails quite surprisingly. Just build
+ // the pass to get its ID to look up the PassInfo.
+ const llvm::PassInfo* lowerMatrixIntrinsics = llvm::Pass::lookupPassInfo(
+ llvm::createLowerMatrixIntrinsicsPass()->getPassID());
+ assert(lowerMatrixIntrinsics);
+ SmallVector<const llvm::PassInfo*, 4> llvmPasses{lowerMatrixIntrinsics};
auto transformer = mlir::makeLLVMPassesTransformer(
- /*llvmPasses=*/{},
- llvmOptLevel == -1 ? llvm::Optional<unsigned>() : llvmOptLevel,
- targetMachine.get(),
+ llvmPasses, compilationOptions.llvmOptLevel, targetMachine.get(),
/*optPassesInsertPos=*/0);
// Pass in runtime support library when specified.
@@ -79,7 +89,8 @@
// Obtain the execution engine.
auto created = mlir::ExecutionEngine::create(
- *module, transformer, static_cast<llvm::CodeGenOpt::Level>(llcOptLevel),
+ *module, transformer,
+ static_cast<llvm::CodeGenOpt::Level>(compilationOptions.llcOptLevel),
libs,
/*enableObjectCache=*/true,
/*enableGDBNotificationListener=*/false);
diff --git a/experimental/ModelBuilder/ModelRunner.h b/experimental/ModelBuilder/ModelRunner.h
index 79c3908..52e4c3f 100644
--- a/experimental/ModelBuilder/ModelRunner.h
+++ b/experimental/ModelBuilder/ModelRunner.h
@@ -47,6 +47,7 @@
#ifndef IREE_EXPERIMENTAL_MODELBUILDER_MODELRUNNER_H_
#define IREE_EXPERIMENTAL_MODELBUILDER_MODELRUNNER_H_
+#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/ExecutionEngine/ExecutionEngine.h"
#include "mlir/IR/Module.h"
@@ -58,6 +59,13 @@
class ExecutionEngine;
+struct CompilationOptions {
+ unsigned llvmOptLevel = 3;
+ unsigned llcOptLevel = 3;
+ vector::VectorTransformsOptions vectorTransformsOptions =
+ vector::VectorTransformsOptions();
+};
+
class ModelRunner {
public:
// Initialize the runner with an OwningModuleRef, typically constructed with
@@ -69,9 +77,11 @@
// Compile the owned `module` into LLVMIR that can be passed to the buffer.
// For now, the MLIR passes and transformations are kept to a minimum and only
- // perform straightforward lowering to LLVMIR. An optional shared runtime
+ // perform straightforward lowering to LLVMIR.
+ // An optional CompilationOptions object is passed to control special passes
+ // An optional shared runtime
// support library is passed to the execution engine.
- void compile(int llvmOptLevel, int llcOptLevel,
+ void compile(CompilationOptions compilationOptions,
const std::string &runtime = {});
// Reference to the compiled module.
diff --git a/experimental/ModelBuilder/test/BUILD b/experimental/ModelBuilder/test/BUILD
index b122d24..c3a7f92 100644
--- a/experimental/ModelBuilder/test/BUILD
+++ b/experimental/ModelBuilder/test/BUILD
@@ -23,15 +23,16 @@
iree_lit_test_suite(
name = "lit",
- srcs = glob(["*.cpp"]),
+ srcs = glob(
+ ["*.cpp"],
+ exclude = ["Bench*"],
+ ),
data = [
":runtime-support.so",
# Tests.
":test-mnist-jit",
":test-simple-jit",
":test-simple-mlir",
- # Benchmarks.
- ":bench-matmul-vector-jit",
# FileChecker.
"//iree/tools:IreeFileCheck",
],
@@ -89,7 +90,6 @@
)
# Benchmarks.
-
cc_binary(
name = "bench-matmul-vector-jit",
srcs = ["BenchMatMulVectorJIT.cpp"],
@@ -103,8 +103,23 @@
"@llvm-project//mlir:AllPassesAndDialects",
"@llvm-project//mlir:EDSC",
"@llvm-project//mlir:IR",
- "@llvm-project//mlir:LoopOps",
- "@llvm-project//mlir:LoopOpsTransforms",
+ ],
+)
+
+cc_binary(
+ name = "bench-matmul-vector-column-major-llvm-intrinsics-jit",
+ srcs = ["BenchMatMulVectorColumnMajorLLVMIntrinsicsJIT.cpp"],
+ copts = ["-O3"],
+ tags = [
+ "noga",
+ ],
+ deps = [
+ "//experimental/ModelBuilder",
+ "//experimental/ModelBuilder:ModelRunner",
+ "@com_google_benchmark//:benchmark:benchmark_main",
+ "@llvm-project//mlir:AllPassesAndDialects",
+ "@llvm-project//mlir:EDSC",
+ "@llvm-project//mlir:IR",
],
)
diff --git a/experimental/ModelBuilder/test/BenchMatMulVectorColumnMajorLLVMIntrinsicsJIT.cpp b/experimental/ModelBuilder/test/BenchMatMulVectorColumnMajorLLVMIntrinsicsJIT.cpp
new file mode 100644
index 0000000..30f8a3d
--- /dev/null
+++ b/experimental/ModelBuilder/test/BenchMatMulVectorColumnMajorLLVMIntrinsicsJIT.cpp
@@ -0,0 +1,164 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "experimental/ModelBuilder/MemRefUtils.h"
+#include "experimental/ModelBuilder/ModelBuilder.h"
+#include "experimental/ModelBuilder/ModelRunner.h"
+
+using namespace mlir; // NOLINT
+
+// Helper method to construct an affine map.
+static SmallVector<AffineMap, 3> makeColumnMajorMatmulMaps(ModelBuilder &mb) {
+ AffineExpr m, n, k;
+ bindDims(mb.getContext(), m, n, k);
+ SmallVector<AffineMap, 3> results;
+ results.push_back(AffineMap::get(3, 0, {k, n}));
+ results.push_back(AffineMap::get(3, 0, {m, k}));
+ results.push_back(AffineMap::get(3, 0, {n, m}));
+ return results;
+}
+
+// Helper method to build matrix-matrix-transposed multiplication.
+template <unsigned M, unsigned N, unsigned K, unsigned I>
+void buildMatMat(ModelBuilder &mb, StringLiteral fn) {
+ auto f32 = mb.f32;
+ auto mkVectorType = mb.getVectorType({M, K}, f32);
+ auto typeA = mb.getMemRefType({}, mkVectorType);
+ auto knVectorType = mb.getVectorType({K, N}, f32);
+ auto typeB = mb.getMemRefType({}, knVectorType);
+ auto mnVectorType = mb.getVectorType({M, N}, f32);
+ auto typeC = mb.getMemRefType({}, mnVectorType);
+
+ auto f = mb.makeFunction(fn, {}, {typeA, typeB, typeC});
+ OpBuilder b(&f.getBody());
+ ScopedContext scope(b, f.getLoc());
+
+ // Build the following accesses:
+ // affine_map<(m, n, k) -> (k, m)>,
+ // affine_map<(m, n, k) -> (n, k)>,
+ // affine_map<(m, n, k) -> (n, m)>
+ SmallVector<AffineMap, 4> accesses = makeColumnMajorMatmulMaps(mb);
+
+ // Build the following iterator types:
+ // iterator_types = ["parallel", "parallel", "reduction"]
+ SmallVector<Attribute, 4> iterator_types;
+ iterator_types.push_back(mb.getStringAttr("parallel"));
+ iterator_types.push_back(mb.getStringAttr("parallel"));
+ iterator_types.push_back(mb.getStringAttr("reduction"));
+
+ // Loop I times over the kernel to reduce the JIT's overhead.
+ auto loop =
+ b.create<loop::ForOp>(f.getLoc(), std_constant_index(0),
+ std_constant_index(I), std_constant_index(1));
+
+ OpBuilder bodyBuilder = loop.getBodyBuilder();
+ {
+ edsc::ScopedContext bodyScope(bodyBuilder, f.getLoc());
+ // Compute C += A x B, in column-major form, with LLVM matrix intrinsics.
+ StdIndexedValue A(f.getArgument(0)), B(f.getArgument(1)),
+ C(f.getArgument(2));
+ C() = (vector_contract(*A(), *B(), *C(), mb.getAffineMapArrayAttr(accesses),
+ mb.getArrayAttr(iterator_types)));
+ }
+
+ std_ret();
+}
+
+// Benchmark method.
+template <unsigned M, unsigned N, unsigned K, bool MeasureBuild,
+ bool LowerToLLVMMatrixIntrinsics>
+void BM_MxMColMajorVectors(benchmark::State &state) {
+ constexpr unsigned NumMxMPerIteration = 1000;
+ state.counters["NumMxM/Iter"] = NumMxMPerIteration;
+ // Column major vector types.
+ using TypeLHS = Vector2D<K, M, float>;
+ using TypeRHS = Vector2D<N, K, float>;
+ using TypeRES = Vector2D<N, M, float>;
+ // Prepare arguments beforehand.
+ auto oneInit = [](unsigned idx, TypeLHS *ptr) {
+ float *p = reinterpret_cast<float *>(ptr + idx);
+ for (unsigned i = 0; i < M * N; ++i) p[i] = 1.0f;
+ };
+ auto incInit = [](unsigned idx, TypeRHS *ptr) {
+ float *p = reinterpret_cast<float *>(ptr + idx);
+ for (unsigned i = 0; i < M * N; ++i) p[i] = 1.0f + i;
+ };
+ auto zeroInit = [](unsigned idx, TypeRES *ptr) {
+ float *p = reinterpret_cast<float *>(ptr + idx);
+ for (unsigned i = 0; i < M * N; ++i) p[i] = 0.0f;
+ };
+ auto A = makeInitializedStridedMemRefDescriptor<TypeLHS, 1>({1}, oneInit);
+ auto B = makeInitializedStridedMemRefDescriptor<TypeRHS, 1>({1}, incInit);
+ auto C = makeInitializedStridedMemRefDescriptor<TypeRES, 1>({1}, zeroInit);
+ auto *bufferA = A.get();
+ auto *bufferB = B.get();
+ auto *bufferC = C.get();
+ void *args[3] = {&bufferA, &bufferB, &bufferC};
+ StringLiteral funcName = "matmult_column_major";
+ const std::string kFuncAdapterName =
+ (llvm::Twine("_mlir_ciface_") + funcName).str();
+
+ vector::VectorTransformsOptions vectorTransformsOptions{
+ LowerToLLVMMatrixIntrinsics};
+ CompilationOptions compilationOptions{/*llvmOptLevel=*/3, /*llcOptLevel=*/3,
+ vectorTransformsOptions};
+ if (MeasureBuild) {
+ // If this is a build-time benchmark, build, compile, and execute
+ // the function inside the timed loop, building a fresh new function
+ // in each iteration to get the full JIT time (keep I == 1 here).
+ for (auto _ : state) {
+ ModelBuilder builder;
+ buildMatMat<M, N, K, 1>(builder, funcName);
+ ModelRunner runner(builder.getModuleRef());
+ runner.compile(compilationOptions);
+ auto err = runner.engine->invoke(kFuncAdapterName,
+ MutableArrayRef<void *>{args});
+ if (err) llvm_unreachable("Error compiling/running function.");
+ }
+ } else {
+ // If this is a run-time benchmark, build, compile, and execute
+ // the function once outside the timed loop, then continue running
+ // the same function inside the loop to focus on actual runtime
+ // (set I == NumIterations here to amortize calling overhead).
+ ModelBuilder builder;
+ buildMatMat<M, N, K, NumMxMPerIteration>(builder, funcName);
+ ModelRunner runner(builder.getModuleRef());
+ runner.compile(compilationOptions);
+ auto err =
+ runner.engine->invoke(kFuncAdapterName, MutableArrayRef<void *>{args});
+ if (err) llvm_unreachable("Error compiling/running function.");
+ for (auto _ : state) {
+ auto err_run = runner.engine->invoke(kFuncAdapterName,
+ MutableArrayRef<void *>{args});
+ if (err_run) llvm_unreachable("Error running function.");
+ }
+ }
+}
+
+//
+// Benchmark drivers (build).
+//
+
+#define BENCHMARK_MATMUL_COLUMN_MAJOR(SZ_M, SZ_N, SZ_K) \
+ BENCHMARK_TEMPLATE(BM_MxMColMajorVectors, SZ_M, SZ_N, SZ_K, true, false); \
+ BENCHMARK_TEMPLATE(BM_MxMColMajorVectors, SZ_M, SZ_N, SZ_K, true, true); \
+ BENCHMARK_TEMPLATE(BM_MxMColMajorVectors, SZ_M, SZ_N, SZ_K, false, false); \
+ BENCHMARK_TEMPLATE(BM_MxMColMajorVectors, SZ_M, SZ_N, SZ_K, false, true);
+
+BENCHMARK_MATMUL_COLUMN_MAJOR(1, 1, 1);
+BENCHMARK_MATMUL_COLUMN_MAJOR(2, 2, 2);
+BENCHMARK_MATMUL_COLUMN_MAJOR(4, 4, 4);
+BENCHMARK_MATMUL_COLUMN_MAJOR(8, 8, 8);
+BENCHMARK_MATMUL_COLUMN_MAJOR(16, 16, 16);
diff --git a/experimental/ModelBuilder/test/BenchMatMulVectorJIT.cpp b/experimental/ModelBuilder/test/BenchMatMulVectorJIT.cpp
index b2ae8c9..140d370 100644
--- a/experimental/ModelBuilder/test/BenchMatMulVectorJIT.cpp
+++ b/experimental/ModelBuilder/test/BenchMatMulVectorJIT.cpp
@@ -12,17 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// RUN: bench-matmul-vector-jit --benchmark_filter=all
-
#include "benchmark/benchmark.h"
#include "experimental/ModelBuilder/MemRefUtils.h"
#include "experimental/ModelBuilder/ModelBuilder.h"
#include "experimental/ModelBuilder/ModelRunner.h"
-#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
-#include "mlir/EDSC/Builders.h"
-#include "mlir/EDSC/Intrinsics.h"
-#include "mlir/IR/Function.h"
-#include "mlir/IR/StandardTypes.h"
using namespace mlir; // NOLINT
@@ -121,7 +114,7 @@
ModelBuilder builder;
buildMatMat<M, N, K, 1>(builder, funcName);
ModelRunner runner(builder.getModuleRef());
- runner.compile(/*llvmOptLevel=*/3, /*llcOptLevel=*/3);
+ runner.compile(CompilationOptions());
auto err = runner.engine->invoke(kFuncAdapterName,
MutableArrayRef<void *>{args});
if (err) llvm_unreachable("Error compiling/running function.");
@@ -134,7 +127,7 @@
ModelBuilder builder;
buildMatMat<M, N, K, 1000>(builder, funcName);
ModelRunner runner(builder.getModuleRef());
- runner.compile(/*llvmOptLevel=*/3, /*llcOptLevel=*/3);
+ runner.compile(CompilationOptions());
auto err =
runner.engine->invoke(kFuncAdapterName, MutableArrayRef<void *>{args});
if (err) llvm_unreachable("Error compiling/running function.");
@@ -170,13 +163,10 @@
}
BENCHMARK(BM_Build_MatMul_8_8);
-// TODO(ajcbik): enable when faster
-#if 0
static void BM_Build_MatMul_16_16(benchmark::State &state) {
testMatMulUsingVectors<16, 16, 16>(state, "test_matmul_16_16_16", true);
}
BENCHMARK(BM_Build_MatMul_16_16);
-#endif
//
// Benchmark drivers (run).
diff --git a/experimental/ModelBuilder/test/TestMNISTJIT.cpp b/experimental/ModelBuilder/test/TestMNISTJIT.cpp
index 94712ac..7286d95 100644
--- a/experimental/ModelBuilder/test/TestMNISTJIT.cpp
+++ b/experimental/ModelBuilder/test/TestMNISTJIT.cpp
@@ -19,8 +19,6 @@
#include "experimental/ModelBuilder/ModelRunner.h"
// RunnerUtils.h with iostream needed for printMemRef atm
#include "mlir/ExecutionEngine/RunnerUtils.h"
-#include "mlir/IR/Function.h"
-#include "mlir/IR/StandardTypes.h"
using namespace mlir; // NOLINT
@@ -170,7 +168,7 @@
// 3. Compile the function.
ModelRunner runner(modelBuilder.getModuleRef());
- runner.compile(/*llvmOptLevel=*/3, /*llcOptLevel=*/3);
+ runner.compile(CompilationOptions());
// 4. Allocate data within data structures that interoperate with the MLIR ABI
// conventions used by codegen.
diff --git a/experimental/ModelBuilder/test/TestSimpleJIT.cpp b/experimental/ModelBuilder/test/TestSimpleJIT.cpp
index 5e30e15..d6914c5 100644
--- a/experimental/ModelBuilder/test/TestSimpleJIT.cpp
+++ b/experimental/ModelBuilder/test/TestSimpleJIT.cpp
@@ -24,10 +24,6 @@
#include "experimental/ModelBuilder/ModelRunner.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/InitLLVM.h"
-#include "mlir/EDSC/Builders.h"
-#include "mlir/EDSC/Intrinsics.h"
-#include "mlir/IR/Function.h"
-#include "mlir/IR/StandardTypes.h"
using namespace mlir; // NOLINT
@@ -66,7 +62,7 @@
// 2. Compile the function, pass in runtime support library
// to the execution engine for vector.print.
ModelRunner runner(modelBuilder.getModuleRef());
- runner.compile(/*llvmOptLevel=*/3, /*llcOptLevel=*/3, runtimeSupport);
+ runner.compile(CompilationOptions(), runtimeSupport);
// 3. Allocate data within data structures that interoperate with the MLIR ABI
// conventions used by codegen.
@@ -130,7 +126,7 @@
// 2. Compile the function, pass in runtime support library
// to the execution engine for vector.print.
ModelRunner runner(modelBuilder.getModuleRef());
- runner.compile(/*llvmOptLevel=*/3, /*llcOptLevel=*/3, runtimeSupport);
+ runner.compile(CompilationOptions(), runtimeSupport);
// 3. Allocate data within data structures that interoperate with the MLIR ABI
// conventions used by codegen.