Changes to comply with new LLVM APIs

PiperOrigin-RevId: 294462207
diff --git a/experimental/ModelBuilder/BUILD b/experimental/ModelBuilder/BUILD
index 3389e7d..71e6fd5 100644
--- a/experimental/ModelBuilder/BUILD
+++ b/experimental/ModelBuilder/BUILD
@@ -25,11 +25,12 @@
         "noga",
     ],
     deps = [
-        "@llvm-project//llvm:support",
+        "@llvm-project//mlir:AffineOps",
         "@llvm-project//mlir:EDSC",
         "@llvm-project//mlir:IR",
         "@llvm-project//mlir:LinalgOps",
         "@llvm-project//mlir:LinalgTransforms",
+        "@llvm-project//mlir:StandardOps",
     ],
 )
 
diff --git a/experimental/ModelBuilder/ModelBuilder.cpp b/experimental/ModelBuilder/ModelBuilder.cpp
index b1d4278..8e03ab6 100644
--- a/experimental/ModelBuilder/ModelBuilder.cpp
+++ b/experimental/ModelBuilder/ModelBuilder.cpp
@@ -14,6 +14,7 @@
 
 #include "experimental/ModelBuilder/ModelBuilder.h"
 
+#include "mlir/Dialect/AffineOps/EDSC/Builders.h"
 #include "mlir/IR/StandardTypes.h"
 #include "mlir/IR/TypeUtilities.h"
 
@@ -33,8 +34,8 @@
       f32(FloatType::getF32(&ctx)) {}
 
 Value mlir::ModelBuilder::constant_f32(float v) {
-  return constant_float(llvm::APFloat(v),
-                        FloatType::getF32(ScopedContext::getContext()));
+  return std_constant_float(llvm::APFloat(v),
+                            FloatType::getF32(ScopedContext::getContext()));
 }
 
 FuncOp mlir::ModelBuilder::makeFunction(StringRef name, ArrayRef<Type> results,
@@ -59,10 +60,10 @@
 Value mlir::ModelBuilder::fusedBiasTanh(ValueHandle x, ValueHandle bias) {
   using edsc::op::operator+;
   using edsc::op::operator*;
-  using edsc::intrinsics::tanh;
+  using edsc::intrinsics::std_tanh;
   assert(x.getType().isF32() && bias.getType().isF32() && "f32 expected");
   ValueHandle half(constant_f32(0.5f));
-  return x + half * tanh((x + bias) * half) + half;
+  return x + half * std_tanh((x + bias) * half) + half;
 }
 
 ValueHandle mlir::ModelBuilder::FCBiasTanh(std::array<Value, 3> fcArgs,
diff --git a/experimental/ModelBuilder/ModelBuilder.h b/experimental/ModelBuilder/ModelBuilder.h
index e9c0a7b7..8dbfa37 100644
--- a/experimental/ModelBuilder/ModelBuilder.h
+++ b/experimental/ModelBuilder/ModelBuilder.h
@@ -47,6 +47,7 @@
 
 #include "mlir/Dialect/Linalg/EDSC/Builders.h"
 #include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
+#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
 #include "mlir/EDSC/Builders.h"
 #include "mlir/EDSC/Intrinsics.h"
 
@@ -62,11 +63,11 @@
 // From the Linalg Dialect.
 using edsc::intrinsics::linalg_fill;
 // From the Std Dialect.
-using edsc::intrinsics::alloc;
-using edsc::intrinsics::constant_float;
-using edsc::intrinsics::dealloc;
-using edsc::intrinsics::dim;
-using edsc::intrinsics::ret;
+using edsc::intrinsics::std_alloc;
+using edsc::intrinsics::std_constant_float;
+using edsc::intrinsics::std_dealloc;
+using edsc::intrinsics::std_dim;
+using edsc::intrinsics::std_ret;
 // -----------------------------------------------------------------------------
 
 // Entry point class to build a whole model declaratively with C++ EDSCs.
diff --git a/experimental/ModelBuilder/test/TestMNISTJIT.cpp b/experimental/ModelBuilder/test/TestMNISTJIT.cpp
index 0b582ba..be5ddd3 100644
--- a/experimental/ModelBuilder/test/TestMNISTJIT.cpp
+++ b/experimental/ModelBuilder/test/TestMNISTJIT.cpp
@@ -54,17 +54,17 @@
   OpBuilder b(&func.getBody());
   ScopedContext scope(b, func.getLoc());
   Value input = func.getArgument(0);
-  Value batchSize = dim(input, 0);
-  Value h1Weights = alloc(modelBuilder.getMemRefType({W0, W1}, f32));
-  Value h2Weights = alloc(modelBuilder.getMemRefType({W1, W2}, f32));
-  Value h3Weights = alloc(modelBuilder.getMemRefType({W2, W3}, f32));
-  Value bias1 = alloc(modelBuilder.getMemRefType({W1}, f32));
-  Value bias2 = alloc(modelBuilder.getMemRefType({W2}, f32));
-  Value bias3 = alloc(modelBuilder.getMemRefType({W3}, f32));
+  Value batchSize = std_dim(input, 0);
+  Value h1Weights = std_alloc(modelBuilder.getMemRefType({W0, W1}, f32));
+  Value h2Weights = std_alloc(modelBuilder.getMemRefType({W1, W2}, f32));
+  Value h3Weights = std_alloc(modelBuilder.getMemRefType({W2, W3}, f32));
+  Value bias1 = std_alloc(modelBuilder.getMemRefType({W1}, f32));
+  Value bias2 = std_alloc(modelBuilder.getMemRefType({W2}, f32));
+  Value bias3 = std_alloc(modelBuilder.getMemRefType({W3}, f32));
   Value outputBlock1 =
-      alloc(modelBuilder.getMemRefType({-1, W1}, f32), batchSize);
+      std_alloc(modelBuilder.getMemRefType({-1, W1}, f32), batchSize);
   Value outputBlock2 =
-      alloc(modelBuilder.getMemRefType({-1, W2}, f32), batchSize);
+      std_alloc(modelBuilder.getMemRefType({-1, W2}, f32), batchSize);
   Value outputBlock3 = func.getArgument(1);
 
   ValueHandle zero(modelBuilder.constant_f32(0.0f));
@@ -84,16 +84,16 @@
 
   // TODO(ntv): tensor->buffer, drop all alloc/fill/dealloc.
   // Vexing parses.
-  (dealloc(h1Weights));
-  (dealloc(h2Weights));
-  (dealloc(h3Weights));
-  (dealloc(bias1));
-  (dealloc(bias2));
-  (dealloc(bias3));
-  (dealloc(outputBlock1));
-  (dealloc(outputBlock2));
+  (std_dealloc(h1Weights));
+  (std_dealloc(h2Weights));
+  (std_dealloc(h3Weights));
+  (std_dealloc(bias1));
+  (std_dealloc(bias2));
+  (std_dealloc(bias3));
+  (std_dealloc(outputBlock1));
+  (std_dealloc(outputBlock2));
 
-  (ret());
+  (std_ret());
 }
 
 // Helper function to build a func `funcName` that takes a tensors for the input
@@ -140,7 +140,7 @@
   auto outputBlock3 = modelBuilder.FCBiasTanhTensors(
       outputBlock3Type, {outputBlock2, h3Weights}, bias3);
   // Vexing parses.
-  (ret(outputBlock3));
+  (std_ret(outputBlock3));
 }
 
 int main() {
@@ -179,9 +179,13 @@
   ManagedUnrankedMemRefDescriptor outputBuffer =
       makeInitializedUnrankedDescriptor<float>({B, W3}, outputLinearInit);
 
-  // 5. Call the funcOp name `kFuncBuffersName` with arguments.
+  // 5. Call the funcOp name `kFuncBuffersName` with arguments. Call the wrapped
+  // C-compatible function rather than the function defined above and delegate
+  // memref descriptor unpacking to generated code.
+  const std::string kFuncAdapterName =
+      (llvm::Twine("_mlir_ciface_") + kFuncBuffersName).str();
   void *args[2] = {&inputBuffer->descriptor, &outputBuffer->descriptor};
-  auto error = runner.engine->invoke(kFuncBuffersName,
+  auto error = runner.engine->invoke(kFuncAdapterName,
                                      llvm::MutableArrayRef<void *>{args});
 
   // 6. Dump content of output buffer for testing with FileCheck.
diff --git a/iree/compiler/Dialect/HAL/Target/LLVM/test/binaryop_test.mlir b/iree/compiler/Dialect/HAL/Target/LLVM/test/binaryop_test.mlir
index 8be25c2..18e3a26 100644
--- a/iree/compiler/Dialect/HAL/Target/LLVM/test/binaryop_test.mlir
+++ b/iree/compiler/Dialect/HAL/Target/LLVM/test/binaryop_test.mlir
@@ -17,6 +17,7 @@
 // CHECK-SAME:     data = dense
 // CHECK-SAME:     format = 1280071245 : i32} {
 // CHECK-NEXT:     module {
-// CHECK-NEXT:       llvm.func @simpleMath_rgn_dispatch_0(%arg0: !llvm<"{ float*, float*, i64, [1 x i64], [1 x i64] }*">, %arg1: !llvm<"{ float*, float*, i64, [1 x i64], [1 x i64] }*">) attributes {iree.executable.export, iree.executable.workgroup_size = dense<[32, 1, 1]> : vector<3xi32>, iree.executable.workload = dense<[4, 1, 1]> : vector<3xi32>} {
-// CHECK-NEXT:          %0 = llvm.load %arg0 : !llvm<"{ float*, float*, i64, [1 x i64], [1 x i64] }*">
-// CHECK-NEXT:          %1 = llvm.load %arg1 : !llvm<"{ float*, float*, i64, [1 x i64], [1 x i64] }*">
+// CHECK-NEXT:       llvm.func @simpleMath_rgn_dispatch_0(
+// CHECK-SAME: %{{[a-zA-Z0-9_]*}}: !llvm<"float*">, %{{[a-zA-Z0-9_]*}}: !llvm<"float*">, %{{[a-zA-Z0-9_]*}}: !llvm.i64, %{{[a-zA-Z0-9_]*}}: !llvm.i64, %{{[a-zA-Z0-9_]*}}: !llvm.i64,
+// CHECK-SAME: %{{[a-zA-Z0-9_]*}}: !llvm<"float*">, %{{[a-zA-Z0-9_]*}}: !llvm<"float*">, %{{[a-zA-Z0-9_]*}}: !llvm.i64, %{{[a-zA-Z0-9_]*}}: !llvm.i64, %{{[a-zA-Z0-9_]*}}: !llvm.i64)
+// CHECK-SAME: attributes {iree.executable.export, iree.executable.workgroup_size = dense<[32, 1, 1]> : vector<3xi32>, iree.executable.workload = dense<[4, 1, 1]> : vector<3xi32>} {
diff --git a/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LowerToSPIRV.cpp b/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LowerToSPIRV.cpp
index e84fd48..a0817f1 100644
--- a/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LowerToSPIRV.cpp
+++ b/iree/compiler/Translation/SPIRV/LinalgToSPIRV/LowerToSPIRV.cpp
@@ -196,11 +196,16 @@
         });
     SPIRVTypeConverter typeConverter;
     OwningRewritePatternList patterns;
-    SmallVector<int64_t, 3> workGroupSize;
-    if (failed(getLegacyWorkGroupSize(funcOp, workGroupSize))) {
-      return;
-    }
-    populateGPUToSPIRVPatterns(context, typeConverter, patterns, workGroupSize);
+    SmallVector<int32_t, 3> workGroupSize;
+    if (failed(getLegacyWorkGroupSize(funcOp, workGroupSize))) return;
+
+    // Set spv.entry_point_abi on each kernel functions to drive SPIR-V CodeGen.
+    StringRef abiAttrName = spirv::getEntryPointABIAttrName();
+    auto abiAttr = spirv::getEntryPointABIAttr(workGroupSize, context);
+    for (Operation *kernel : kernelModules)
+      kernel->setAttr(abiAttrName, abiAttr);
+
+    populateGPUToSPIRVPatterns(context, typeConverter, patterns);
     populateStandardToSPIRVPatterns(context, typeConverter, patterns);
 
     std::unique_ptr<ConversionTarget> target =
diff --git a/iree/compiler/Translation/SPIRV/LinalgToSPIRV/test/BUILD b/iree/compiler/Translation/SPIRV/LinalgToSPIRV/test/BUILD
index f63a472..84bff16 100644
--- a/iree/compiler/Translation/SPIRV/LinalgToSPIRV/test/BUILD
+++ b/iree/compiler/Translation/SPIRV/LinalgToSPIRV/test/BUILD
@@ -23,7 +23,10 @@
 
 iree_lit_test_suite(
     name = "lit",
-    srcs = glob(["*.mlir"]),
+    srcs = glob(
+        ["*.mlir"],
+        exclude = ["pw_add.mlir"],
+    ),
     data = [
         "//iree/tools:IreeFileCheck",
         "//iree/tools:iree-opt",