Remove mhlo.conv padding preprocessing.
diff --git a/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp b/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp
index 571ea7f..b20d1b5 100644
--- a/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp
+++ b/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp
@@ -79,71 +79,6 @@
.getResult();
}
-class ExtractConvOpPaddingAttributes : public OpRewritePattern<mhlo::ConvOp> {
- public:
- using OpRewritePattern<mhlo::ConvOp>::OpRewritePattern;
-
- LogicalResult matchAndRewrite(mhlo::ConvOp op,
- PatternRewriter &rewriter) const override {
- if (!hasPadding(op)) return failure();
- auto inputType = op.lhs().getType().cast<ShapedType>();
- int rank = inputType.getRank();
-
- // TODO(suderman): Add proper support for padding + dilation for codegen.
- // We can't extract padding if the left hand side has dilation.
- if (op.lhs_dilation().hasValue()) {
- for (auto val : op.lhs_dilation().getValue().getValues<APInt>()) {
- if (val != 1) {
- return failure();
- }
- }
- }
-
- SmallVector<int64_t, 4> paddingLow, paddingHigh, interiorPadding, shape;
- paddingLow.append(rank, 0);
- paddingHigh.append(rank, 0);
- interiorPadding.append(rank, 0);
- for (auto iter :
- llvm::enumerate(op.dimension_numbers().getInputSpatialDimensions())) {
- unsigned idx = iter.index();
- unsigned dim = iter.value();
- paddingLow[dim] = op.paddingAttr().getValues<int64_t>()[{idx, 0}];
- paddingHigh[dim] = op.paddingAttr().getValues<int64_t>()[{idx, 1}];
- }
- for (unsigned i = 0; i < rank; ++i) {
- // mhlo.pad doesn't support dynamic shape.
- if (inputType.isDynamicDim(i)) return failure();
- int size = inputType.getShape()[i];
- shape.push_back(size + paddingLow[i] + paddingHigh[i]);
- }
-
- auto toDenseAttr = [&rewriter](ArrayRef<int64_t> elements) {
- return DenseIntElementsAttr::get(
- RankedTensorType::get(elements.size(), rewriter.getIntegerType(64)),
- elements);
- };
-
- auto loc = op.getLoc();
- auto padResultType =
- RankedTensorType::get(shape, inputType.getElementType());
- Attribute zeroAttr = rewriter.getZeroAttr(
- RankedTensorType::get({}, inputType.getElementType()));
- auto zero = rewriter.create<arith::ConstantOp>(loc, zeroAttr);
- auto padOp = rewriter.create<mhlo::PadOp>(
- loc, padResultType, op.lhs(), zero, toDenseAttr(paddingLow),
- toDenseAttr(paddingHigh), toDenseAttr(interiorPadding));
- auto resultType = op.getResult().getType();
- auto newOp = rewriter.create<mhlo::ConvOp>(
- op.getLoc(), resultType, padOp.getResult(), op.rhs(),
- op.window_stridesAttr(), /*padding=*/nullptr, op.lhs_dilationAttr(),
- op.rhs_dilationAttr(), /*window_reversal=*/nullptr,
- op.dimension_numbersAttr(), op.feature_group_countAttr(),
- op.batch_group_countAttr(), op.precision_configAttr());
- rewriter.replaceOp(op, newOp.getResult());
- return success();
- }
-};
-
// Guarantee that the input dimensions are ordered batch, spatial_dims, feature
// dim.
class ReorderConvOpInputDimensions : public OpRewritePattern<mhlo::ConvOp> {
@@ -987,9 +922,6 @@
ReorderBroadcastInDimOpAndElementwiseOp<mhlo::AndOp>,
ReorderBroadcastInDimOpAndElementwiseOp<mhlo::OrOp>,
ReorderBroadcastInDimOpAndElementwiseOp<mhlo::XorOp>>(context);
- if (extractPadFromConv) {
- patterns.insert<ExtractConvOpPaddingAttributes>(context);
- }
if (orderConvFeatures) {
patterns.insert<ReorderConvOpInputDimensions>(context);
patterns.insert<ReorderConvOpKernelDimensions>(context);
diff --git a/iree/compiler/InputConversion/MHLO/Passes.td b/iree/compiler/InputConversion/MHLO/Passes.td
index aa633a8..4cf16ae 100644
--- a/iree/compiler/InputConversion/MHLO/Passes.td
+++ b/iree/compiler/InputConversion/MHLO/Passes.td
@@ -40,8 +40,6 @@
let summary = "Apply mhlo to mhlo transformations for some mhlo ops";
let constructor = "mlir::iree_compiler::MHLO::createMHLOToMHLOPreprocessingPass()";
let options = [
- Option<"extractPadFromConv", "extract-pad-from-conv", "bool", /*default=*/"true",
- "Extract padding attributes from conv op">,
Option<"orderConvFeatures", "order-conv-features", "bool", /*default=*/"true",
"Guarantees input/output features ordered from conv kernel">
];
diff --git a/iree/compiler/InputConversion/MHLO/test/BUILD b/iree/compiler/InputConversion/MHLO/test/BUILD
index d0fbcbf..8e14a4d 100644
--- a/iree/compiler/InputConversion/MHLO/test/BUILD
+++ b/iree/compiler/InputConversion/MHLO/test/BUILD
@@ -29,7 +29,6 @@
"legalize_input_types.mlir",
"mhlo_to_mhlo_preprocessing.mlir",
"mhlo_to_mhlo_preprocessing_canoncalize_dot_general.mlir",
- "mhlo_to_mhlo_preprocessing_extract_pad_from_conv.mlir",
"missing_legalizations.mlir",
"verify_compiler_mhlo_input_legality.mlir",
],
diff --git a/iree/compiler/InputConversion/MHLO/test/CMakeLists.txt b/iree/compiler/InputConversion/MHLO/test/CMakeLists.txt
index f95c73d..96e60e4 100644
--- a/iree/compiler/InputConversion/MHLO/test/CMakeLists.txt
+++ b/iree/compiler/InputConversion/MHLO/test/CMakeLists.txt
@@ -24,7 +24,6 @@
"legalize_input_types.mlir"
"mhlo_to_mhlo_preprocessing.mlir"
"mhlo_to_mhlo_preprocessing_canoncalize_dot_general.mlir"
- "mhlo_to_mhlo_preprocessing_extract_pad_from_conv.mlir"
"missing_legalizations.mlir"
"verify_compiler_mhlo_input_legality.mlir"
TOOLS
diff --git a/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_extract_pad_from_conv.mlir b/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_extract_pad_from_conv.mlir
deleted file mode 100644
index 427c37f..0000000
--- a/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing_extract_pad_from_conv.mlir
+++ /dev/null
@@ -1,29 +0,0 @@
-// RUN: iree-opt -iree-mhlo-to-mhlo-preprocessing %s | FileCheck %s
-
-// CHECK-LABEL: @conv
-// CHECK: mhlo.pad
-// CHECK-SAME: edge_padding_high = dense<[0, 1, 1, 0]>
-// CHECK-SAME: edge_padding_low = dense<[0, 1, 0, 0]>
-// CHECK: mhlo.convolution
-// CHECK-NOT: padding
-func @conv(%inputs: tensor<1x4x5x2xf32>, %weights: tensor<3x2x2x1xf32>) -> tensor<1x4x5x1xf32> {
- %0 = "mhlo.convolution"(%inputs, %weights) {
- batch_group_count = 1 : i64,
- dimension_numbers = #mhlo.conv<raw
- input_batch_dimension = 0,
- input_feature_dimension = 3,
- input_spatial_dimensions = [1, 2],
- kernel_input_feature_dimension = 2,
- kernel_output_feature_dimension = 3,
- kernel_spatial_dimensions = [0, 1],
- output_batch_dimension = 0,
- output_feature_dimension = 3,
- output_spatial_dimensions = [1, 2]
- >,
- feature_group_count = 1 : i64,
- padding = dense<[[1, 1], [0, 1]]> : tensor<2x2xi64>,
- rhs_dilation = dense<1> : tensor<2xi64>,
- window_strides = dense<1> : tensor<2xi64>} :
- (tensor<1x4x5x2xf32>, tensor<3x2x2x1xf32>) -> tensor<1x4x5x1xf32>
- return %0 : tensor<1x4x5x1xf32>
-}