Integrate LLVM at llvm/llvm-project@8cb234e07d46
Updates LLVM usage to match
[8cb234e07d46](https://github.com/llvm/llvm-project/commit/8cb234e07d46)
PiperOrigin-RevId: 399154862
diff --git a/SUBMODULE_VERSIONS.txt b/SUBMODULE_VERSIONS.txt
index 1f16377..3b22381 100644
--- a/SUBMODULE_VERSIONS.txt
+++ b/SUBMODULE_VERSIONS.txt
@@ -4,7 +4,7 @@
aa533abfd4232b01f9e57041d70114d5a77e6de0 third_party/googletest
88b845dee001723c4a0db1fe5477de735b6d3bb0 third_party/liburing
acd6f6f014c25e46363e718381e0b35205df2d83 third_party/libyaml
-efb284c07e97776e01933f470afb5215a561db3e third_party/llvm-project
+8cb234e07d467ca96748d1abeadf1a40c22444bc third_party/llvm-project
0009c086a69fdb6d3dcfe3b256db427b4880d1f1 third_party/mlir-hlo
3f701faace7addc75d16dea8a6cd769fa5b3f260 third_party/musl
4c7697dbe973ed01ae6fbec37d186ebd05982e1f third_party/pybind11
diff --git a/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp b/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp
index 89380c1..19786fd 100644
--- a/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp
@@ -81,14 +81,16 @@
(paddingForM > 0 || paddingForK > 0)
? linalg::PadTensorOp::createPadScalarOp(
lhsPaddedType, lhs, lhsPaddingValue, createPadding({0, 0}),
- createPadding({paddingForM, paddingForK}), loc, rewriter)
+ createPadding({paddingForM, paddingForK}), /*packing=*/false,
+ loc, rewriter)
: lhs;
auto paddedrhs =
(paddingForK > 0 || paddingForN > 0)
? linalg::PadTensorOp::createPadScalarOp(
rhsPaddedType, rhs, rhsPaddingValue, createPadding({0, 0}),
- createPadding({paddingForK, paddingForN}), loc, rewriter)
+ createPadding({paddingForK, paddingForN}), /*packing=*/false,
+ loc, rewriter)
: rhs;
// Padding for K-dim only result doesn't change result size.
@@ -105,7 +107,8 @@
loc, rewriter.getZeroAttr(resultType.getElementType()));
Value paddedResult = linalg::PadTensorOp::createPadScalarOp(
newResultType, result, resultPaddingValue, createPadding({0, 0}),
- createPadding({paddingForM, paddingForN}), loc, rewriter);
+ createPadding({paddingForM, paddingForN}), /*packing=*/false, loc,
+ rewriter);
auto paddedMatmulOp =
cast<linalg::LinalgOp>(matmulOp.getOperation())
.clone(rewriter, loc, {newResultType},
diff --git a/third_party/llvm-project b/third_party/llvm-project
index efb284c..8cb234e 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit efb284c07e97776e01933f470afb5215a561db3e
+Subproject commit 8cb234e07d467ca96748d1abeadf1a40c22444bc