[stablehlo] Update StableHLO to 0b7ecf3e353843746adcbc7763f86348a3d4ed9b (#17166)
Pulled third_party package `stablehlo` to this revision:
https://github.com/openxla/stablehlo/commit/0b7ecf3e353843746adcbc7763f86348a3d4ed9b
Modified a convolution tests were the parameters were f32 and bf16. This
test is modified to make all parameters f32 since it is illegal as per rule C27
in the stablehlo spec:
https://github.com/openxla/stablehlo/blob/main/docs/spec.md#convolution
to have parameters of convolution of different data-types.
diff --git a/compiler/plugins/input/StableHLO/Conversion/Preprocessing/test/stablehlo_to_stablehlo.mlir b/compiler/plugins/input/StableHLO/Conversion/Preprocessing/test/stablehlo_to_stablehlo.mlir
index 972067f..998d9ee 100644
--- a/compiler/plugins/input/StableHLO/Conversion/Preprocessing/test/stablehlo_to_stablehlo.mlir
+++ b/compiler/plugins/input/StableHLO/Conversion/Preprocessing/test/stablehlo_to_stablehlo.mlir
@@ -344,10 +344,11 @@
// CHECK-LABEL: @convolution
// CHECK-SAME: (%[[ARG0:.+]]: tensor<{{.+}}xbf16>, %[[ARG1:.+]]: tensor<{{.+}}xbf16>)
func.func @convolution(%arg0: tensor<16x32x256xbf16>, %arg1: tensor<1x256x256xbf16>) -> tensor<16x32x256xf32> {
- %cast = stablehlo.convert %arg0 : (tensor<16x32x256xbf16>) -> tensor<16x32x256xf32>
+ %cast0 = stablehlo.convert %arg0 : (tensor<16x32x256xbf16>) -> tensor<16x32x256xf32>
+ %cast1 = stablehlo.convert %arg1 : (tensor<1x256x256xbf16>) -> tensor<1x256x256xf32>
// CHECK: %[[CONV:.+]] = stablehlo.convolution(%[[ARG0]], %[[ARG1]])
// CHECK-SAME: -> tensor<16x32x256xf32>
- %0 = "stablehlo.convolution"(%cast, %arg1) {
+ %0 = "stablehlo.convolution"(%cast0, %cast1) {
batch_group_count = 1 : i64,
dimension_numbers = #stablehlo.conv<[b, 0, f]x[0, i, o]->[b, 0, f]>,
feature_group_count = 1 : i64,
@@ -356,7 +357,7 @@
precision_config = [#stablehlo<precision DEFAULT>, #stablehlo<precision DEFAULT>],
rhs_dilation = array<i64: 1>,
window_strides = array<i64: 1>
- } : (tensor<16x32x256xf32>, tensor<1x256x256xbf16>) -> tensor<16x32x256xf32>
+ } : (tensor<16x32x256xf32>, tensor<1x256x256xf32>) -> tensor<16x32x256xf32>
// CHECK: return %[[CONV]]
func.return %0 : tensor<16x32x256xf32>
}
diff --git a/compiler/plugins/input/StableHLO/Conversion/StableHLOToLinalg.cpp b/compiler/plugins/input/StableHLO/Conversion/StableHLOToLinalg.cpp
index 32b54da..a6dbc11 100644
--- a/compiler/plugins/input/StableHLO/Conversion/StableHLOToLinalg.cpp
+++ b/compiler/plugins/input/StableHLO/Conversion/StableHLOToLinalg.cpp
@@ -868,7 +868,8 @@
Value emptyTensor =
getEmptyTensorFor(rewriter, loc, resultTy, op, adaptor.getOperands());
- auto permutation = op.getPermutationAttr();
+ auto permutation =
+ op.getPermutationAttr().dyn_cast_or_null<DenseI64ArrayAttr>();
rewriter.replaceOpWithNewOp<linalg::TransposeOp>(
op, adaptor.getOperand(), emptyTensor, permutation,
diff --git a/compiler/plugins/input/StableHLO/Conversion/StableHLOToLinalgConvolution.cpp b/compiler/plugins/input/StableHLO/Conversion/StableHLOToLinalgConvolution.cpp
index 04ced37..f51fcd6 100644
--- a/compiler/plugins/input/StableHLO/Conversion/StableHLOToLinalgConvolution.cpp
+++ b/compiler/plugins/input/StableHLO/Conversion/StableHLOToLinalgConvolution.cpp
@@ -19,12 +19,12 @@
/// Apply dilation and padding to the input of a convolution.
Value applyConvolutionPadding(Location loc, Value input,
DenseIntElementsAttr padding,
- DenseI64ArrayAttr lhsDilation,
+ std::optional<ArrayRef<int64_t>> lhsDilation,
llvm::ArrayRef<int64_t> dimMappings,
OpBuilder &rewriter) {
SmallVector<int64_t> lhsDilationValues;
- if (lhsDilation)
- lhsDilationValues = llvm::to_vector(lhsDilation.asArrayRef());
+ if (lhsDilation.has_value())
+ lhsDilationValues = llvm::to_vector(lhsDilation.value());
bool noPadding = !padding || isSplatValue(padding, 0);
bool noDilation = !lhsDilation || hlo::isSplatArray(lhsDilationValues, 1);
if (noPadding && noDilation)
@@ -224,7 +224,7 @@
llvm::SmallVector<int64_t> spatialDimMapping(rank - 2);
std::iota(spatialDimMapping.begin(), spatialDimMapping.end(), 1);
input = applyConvolutionPadding(loc, input, op.getPaddingAttr(),
- op.getLhsDilationAttr(), spatialDimMapping,
+ op.getLhsDilation(), spatialDimMapping,
rewriter);
switch (rank) {
@@ -345,10 +345,10 @@
// Decompose the convolution into an initial padding
Value modifiedLhs = applyConvolutionPadding(
op.getLoc(), adaptor.getLhs(), adaptor.getPaddingAttr(),
- adaptor.getLhsDilationAttr(),
+ adaptor.getLhsDilation(),
op.getDimensionNumbers().getInputSpatialDimensions(), rewriter);
Value modifiedRhs = applyConvolutionPadding(
- op.getLoc(), adaptor.getRhs(), nullptr, adaptor.getRhsDilationAttr(),
+ op.getLoc(), adaptor.getRhs(), nullptr, adaptor.getRhsDilation(),
op.getDimensionNumbers().getKernelSpatialDimensions(), rewriter);
modifiedRhs = applyConvolutionReversal(loc, rewriter, op, modifiedRhs);
@@ -640,7 +640,7 @@
llvm::SmallVector<int64_t> spatialDimMapping(spatialRank);
std::iota(spatialDimMapping.begin(), spatialDimMapping.end(), 1);
input = applyConvolutionPadding(loc, input, op.getPaddingAttr(),
- op.getLhsDilationAttr(), spatialDimMapping,
+ op.getLhsDilation(), spatialDimMapping,
rewriter);
auto filterDims =
diff --git a/third_party/stablehlo b/third_party/stablehlo
index 341e063..0b7ecf3 160000
--- a/third_party/stablehlo
+++ b/third_party/stablehlo
@@ -1 +1 @@
-Subproject commit 341e063f0924fc1350538dc53a92c21ec5e022a3
+Subproject commit 0b7ecf3e353843746adcbc7763f86348a3d4ed9b