Integrate llvm-project @266a5a9cb9daa96c1eeaebc18e10f5a37d638734 (#17911)

Bump llvm-project to
https://github.com/llvm/llvm-project/commit/266a5a9cb9daa96c1eeaebc18e10f5a37d638734

Update torch-mlir in IREE third_party (TODO: bump torch-mlir and update
to bumped submodule):
- Updated all uses of `linalg::MatmulUnsignedOp` to `linalg::MatmulOp`
with TypeFnAttr

---------

Signed-off-by: aviator19941 <avinash.sharma@amd.com>
Signed-off-by: Max Dawkins <max.dawkins@gmail.com>
Co-authored-by: Max Dawkins <max.dawkins@gmail.com>
diff --git a/compiler/plugins/target/LLVMCPU/LLVMTargetOptions.cpp b/compiler/plugins/target/LLVMCPU/LLVMTargetOptions.cpp
index b8c2513..a9da1c9 100644
--- a/compiler/plugins/target/LLVMCPU/LLVMTargetOptions.cpp
+++ b/compiler/plugins/target/LLVMCPU/LLVMTargetOptions.cpp
@@ -49,11 +49,8 @@
     }
     outCpu = triple.isX86() ? llvm::sys::getHostCPUName().str() : "";
     llvm::SubtargetFeatures features;
-    llvm::StringMap<bool> hostFeatures;
-    if (llvm::sys::getHostCPUFeatures(hostFeatures)) {
-      for (auto &feature : hostFeatures) {
-        features.AddFeature(feature.first(), feature.second);
-      }
+    for (auto &feature : llvm::sys::getHostCPUFeatures()) {
+      features.AddFeature(feature.first(), feature.second);
     }
     outCpuFeatures = features.getString();
   } else {
diff --git a/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp b/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp
index e4758ff..329354e 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp
@@ -239,22 +239,15 @@
         castNumeric(rhsParams->producer, rhsLowPType, isSigned, rewriter);
     Value newAccum =
         castNumeric(accumParams->producer, accumLowPType, isSigned, rewriter);
-    Value newResult;
 
-    if (isSigned) {
-      newResult = rewriter
-                      .create<linalg::MatmulOp>(loc, ValueRange{newLhs, newRhs},
-                                                ValueRange{newAccum})
-                      .getResult(0);
-    } else {
-      newResult = rewriter
-                      .create<linalg::MatmulUnsignedOp>(
-                          loc, ValueRange{newLhs, newRhs}, ValueRange{newAccum})
-                      .getResult(0);
+    auto newMatmulOp = rewriter.create<linalg::MatmulOp>(
+        loc, ValueRange{newLhs, newRhs}, ValueRange{newAccum});
+    if (!isSigned) {
+      newMatmulOp.setCast(linalg::TypeFn::cast_unsigned);
     }
-
     // Cast back.
-    newResult = castNumeric(newResult, origResultType, isSigned, rewriter);
+    Value newResult = castNumeric(newMatmulOp.getResult(0), origResultType,
+                                  isSigned, rewriter);
     rewriter.replaceOp(matmulOp, ValueRange{newResult});
 
     return success();
diff --git a/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp b/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp
index 879ec00..8c267e2 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp
+++ b/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp
@@ -13,6 +13,7 @@
 #include "iree/compiler/GlobalOptimization/Passes.h"
 #include "iree/compiler/GlobalOptimization/Utils.h"
 #include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Casting.h"
 #include "mlir/Dialect/Affine/IR/AffineOps.h"
 #include "mlir/Dialect/Arith/IR/Arith.h"
 #include "mlir/Dialect/Linalg/IR/Linalg.h"
@@ -316,8 +317,11 @@
           // Signed operations can only be folded with (implicitly) signed
           // linalg named ops
           if (llvm::isa<arith::ExtSIOp>(*castOp)) {
-            return !llvm::isa<linalg::MatmulUnsignedOp,
-                              linalg::PoolingNhwcMaxUnsignedOp,
+            if (auto matmul =
+                    llvm::dyn_cast<linalg::MatmulOp>(namedOp.getOperation())) {
+              return matmul.getCast() != linalg::TypeFn::cast_unsigned;
+            }
+            return !llvm::isa<linalg::PoolingNhwcMaxUnsignedOp,
                               linalg::PoolingNhwcMinUnsignedOp,
                               linalg::PoolingNwcMaxUnsignedOp,
                               linalg::PoolingNwcMinUnsignedOp>(namedOp);
diff --git a/compiler/src/iree/compiler/GlobalOptimization/test/optimize_numerics.mlir b/compiler/src/iree/compiler/GlobalOptimization/test/optimize_numerics.mlir
index f31bb33..51bc2c3 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/test/optimize_numerics.mlir
+++ b/compiler/src/iree/compiler/GlobalOptimization/test/optimize_numerics.mlir
@@ -8,7 +8,7 @@
   %lhs = util.numeric.optional_narrow %arg0 : tensor<5x3xf32> as ui7 {max_value = 127 : ui7, min_value = 0 : ui7}
   %rhs = util.numeric.optional_narrow %arg1 : tensor<3x1xf32> as ui7 {max_value = 127 : ui7, min_value = 0 : ui7}
   %init = util.numeric.optional_narrow %arg2 : tensor<5x1xf32> as ui0
-  // CHECK: %[[RESULT:.*]] = linalg.matmul_unsigned ins(%[[LHS]], %[[RHS]] : tensor<5x3xi8>, tensor<3x1xi8>) outs(%[[INIT]] : tensor<5x1xi32>)
+  // CHECK: %[[RESULT:.*]] = linalg.matmul {cast = #linalg.type_fn<cast_unsigned>} ins(%[[LHS]], %[[RHS]] : tensor<5x3xi8>, tensor<3x1xi8>) outs(%[[INIT]] : tensor<5x1xi32>)
   %2 = linalg.matmul ins(%lhs, %rhs : tensor<5x3xf32>, tensor<3x1xf32>) outs(%init : tensor<5x1xf32>) -> tensor<5x1xf32>
   // CHECK: arith.uitofp %[[RESULT]] : tensor<5x1xi32> to tensor<5x1xf32>
   util.return %2 : tensor<5x1xf32>
diff --git a/compiler/src/iree/compiler/GlobalOptimization/test/raise_special_ops.mlir b/compiler/src/iree/compiler/GlobalOptimization/test/raise_special_ops.mlir
index 9dc25b7..a1cd2d6 100644
--- a/compiler/src/iree/compiler/GlobalOptimization/test/raise_special_ops.mlir
+++ b/compiler/src/iree/compiler/GlobalOptimization/test/raise_special_ops.mlir
@@ -678,14 +678,14 @@
   %2 = tensor.empty() : tensor<10x40xi32>
   %3 = arith.constant 0 : i32
   %4 = linalg.fill ins(%3 : i32) outs(%2 : tensor<10x40xi32>) -> tensor<10x40xi32>
-  %5 = linalg.matmul_unsigned ins(%arg0, %1 : tensor<10x20xi32>, tensor<20x40xi32>)
+  %5 = linalg.matmul {cast = #linalg.type_fn<cast_unsigned>} ins(%arg0, %1 : tensor<10x20xi32>, tensor<20x40xi32>)
       outs(%4 : tensor<10x40xi32>) -> tensor<10x40xi32>
   util.return %5 : tensor<10x40xi32>
 }
 // CHECK-LABEL: util.func public @unsigned_matmul_extsi
 //  CHECK-SAME:     %[[ARG0:.+]]: tensor<10x20xi32>
 //       CHECK:   %[[GEN:.+]] = linalg.generic
-//       CHECK:   %[[RESULT:.+]] = linalg.matmul_unsigned ins(%[[ARG0]], %[[GEN]]
+//       CHECK:   %[[RESULT:.+]] = linalg.matmul {cast = #linalg.type_fn<cast_unsigned>} ins(%[[ARG0]], %[[GEN]]
 //       CHECK:   util.return %[[RESULT]]
 
 // -----
diff --git a/tests/e2e/stablehlo_ops/BUILD.bazel b/tests/e2e/stablehlo_ops/BUILD.bazel
index 799ca19..4d8a31a 100644
--- a/tests/e2e/stablehlo_ops/BUILD.bazel
+++ b/tests/e2e/stablehlo_ops/BUILD.bazel
@@ -88,6 +88,11 @@
     ],
     driver = "local-task",
     input_type = "stablehlo",
+    tags = [
+        # round_nearest_afz/fmodf fail with a wasm target, just disable all tests there for now
+        #   undefined symbol: round_nearest_afz/fmodf
+        "nowasm",
+    ],
     target_backend = "llvm-cpu",
 )
 
diff --git a/tests/e2e/stablehlo_ops/CMakeLists.txt b/tests/e2e/stablehlo_ops/CMakeLists.txt
index fc8f20e..eeae196 100644
--- a/tests/e2e/stablehlo_ops/CMakeLists.txt
+++ b/tests/e2e/stablehlo_ops/CMakeLists.txt
@@ -83,6 +83,8 @@
     "--iree-input-demote-f64-to-f32"
   INPUT_TYPE
     "stablehlo"
+  LABELS
+    "nowasm"
 )
 
 iree_check_single_backend_test_suite(
diff --git a/third_party/llvm-project b/third_party/llvm-project
index c9f6518..1f11b9f 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit c9f6518f742c88bda309d5331e0a5d4664387f94
+Subproject commit 1f11b9fed2337ea24d137ff82fec75bddcd85b3c
diff --git a/third_party/torch-mlir b/third_party/torch-mlir
index 5e4f00a..2f239ac 160000
--- a/third_party/torch-mlir
+++ b/third_party/torch-mlir
@@ -1 +1 @@
-Subproject commit 5e4f00acb13f3f849a05e5ac28ee39307a5fdbff
+Subproject commit 2f239acda84814ff94bbe9061789ea151d5d25fd