[Codegen][DT] Remove tensor.pad logics entirely from materialization. (#18130)

It removes a test in `materialize_encoding_into_nop.mlir` because it is
no longer the case; we have good test coverage in other test cases.

---------

Signed-off-by: hanhanW <hanhan0912@gmail.com>
diff --git a/compiler/src/iree/compiler/Codegen/Common/MaterializeEncodingIntoPackUnPack.cpp b/compiler/src/iree/compiler/Codegen/Common/MaterializeEncodingIntoPackUnPack.cpp
index 2281ca7..c3fec88 100644
--- a/compiler/src/iree/compiler/Codegen/Common/MaterializeEncodingIntoPackUnPack.cpp
+++ b/compiler/src/iree/compiler/Codegen/Common/MaterializeEncodingIntoPackUnPack.cpp
@@ -188,27 +188,9 @@
 // to `pack` and `unpack` operations respectively.
 //===---------------------------------------------------------------------===//
 
-/// Utility method to get the optional padding value to use with pack operation
-/// if source is defined using a `tensor.pad` operation. Note `source` is
-/// passed by reference. It is updated to use the source of the pad operation.
-static std::optional<Value> getPaddingValue(Value &source) {
-  auto padOp = source.getDefiningOp<tensor::PadOp>();
-  if (!padOp || padOp.getNofold() || !padOp.hasZeroLowPad()) {
-    return std::nullopt;
-  }
-
-  Value constantPaddingValue = padOp.getConstantPaddingValue();
-  if (!constantPaddingValue) {
-    return std::nullopt;
-  }
-
-  source = padOp.getSource();
-  return constantPaddingValue;
-}
-
-/// Utility method to convert from `set_encoding` op to `pack` operation.
-/// For now this takes a `paddingValue` as input. The source is also taken
-/// as input so that these could be used with `OpConversionPatterns`.
+/// Utility method to convert from `set_encoding` op to `pack` operation with
+/// zero padding values. The source is also taken as input so that these could
+/// be used with `OpConversionPatterns`.
 static FailureOr<tensor::PackOp> lowerSetEncodingOpToPackOp(
     RewriterBase &rewriter, IREE::Encoding::SetEncodingOp encodingOp,
     Value source, MaterializeEncodingFn materializeEncodingFn,
@@ -235,13 +217,8 @@
     return rewriter.notifyMatchFailure(
         encodingOp, "failed to generate runtime tile size query");
   }
-  std::optional<Value> paddingValue;
-  if (encoding.getRoundDimsToArray().empty()) {
-    paddingValue = getPaddingValue(source);
-  } else {
-    paddingValue = rewriter.create<arith::ConstantOp>(
-        loc, rewriter.getZeroAttr(resultType.getElementType()));
-  }
+  Value paddingValue = rewriter.create<arith::ConstantOp>(
+      loc, rewriter.getZeroAttr(resultType.getElementType()));
   SmallVector<OpFoldResult> sourceDims =
       tensor::getMixedSizes(rewriter, loc, source);
   SmallVector<OpFoldResult> resultDims = tensor::PackOp::getResultShape(
diff --git a/compiler/src/iree/compiler/Codegen/Common/test/materialize_encoding_into_nop.mlir b/compiler/src/iree/compiler/Codegen/Common/test/materialize_encoding_into_nop.mlir
index ee54231..2e4a0ce 100644
--- a/compiler/src/iree/compiler/Codegen/Common/test/materialize_encoding_into_nop.mlir
+++ b/compiler/src/iree/compiler/Codegen/Common/test/materialize_encoding_into_nop.mlir
@@ -17,48 +17,6 @@
 #map = affine_map<(d0, d1, d2) -> (d0, d2)>
 #map1 = affine_map<(d0, d1, d2) -> (d2, d1)>
 #map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
-func.func @pad_gemm(%arg0 : tensor<100x250xf32>, %arg1 : tensor<250x500xf32>, %arg2 : tensor<100x500xf32>) -> tensor<100x500xf32> {
-  %pad_value = arith.constant 0.0 : f32
-  %pad_lhs = tensor.pad %arg0 low[0, 0] high[4, 2] {
-    ^bb0(%b0: index, %b1 : index):
-      tensor.yield %pad_value : f32
-    } : tensor<100x250xf32> to tensor<104x252xf32>
-  %lhs = iree_encoding.set_encoding %pad_lhs : tensor<104x252xf32> -> tensor<104x252xf32, #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2]>>
-  %pad_rhs = tensor.pad %arg1 low[0, 0] high[2, 4] {
-    ^bb0(%b0: index, %b1 : index):
-      tensor.yield %pad_value : f32
-    } : tensor<250x500xf32> to tensor<252x504xf32>
-  %rhs = iree_encoding.set_encoding %pad_rhs : tensor<252x504xf32> -> tensor<252x504xf32, #iree_encoding.encoding<operand_index = 1, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2]>>
-  %pad_output = tensor.pad %arg2 low[0, 0] high[4, 4] {
-    ^bb0(%b0: index, %b1 : index):
-      tensor.yield %pad_value : f32
-    } : tensor<100x500xf32> to tensor<104x504xf32>
-  %output = iree_encoding.set_encoding %pad_output : tensor<104x504xf32> -> tensor<104x504xf32, #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2]>>
-  %gemm_packed = linalg.matmul ins(%lhs, %rhs : tensor<104x252xf32, #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2]>>, tensor<252x504xf32, #iree_encoding.encoding<operand_index = 1, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2]>>)
-      outs(%output : tensor<104x504xf32, #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2]>>) -> tensor<104x504xf32, #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2]>>
-  %gemm = iree_encoding.unset_encoding %gemm_packed : tensor<104x504xf32, #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2]>> -> tensor<104x504xf32>
-  %result = tensor.extract_slice %gemm[0, 0] [100, 500] [1, 1] : tensor<104x504xf32> to tensor<100x500xf32>
-  return %result : tensor<100x500xf32>
-}
-//      CHECK: func @pad_gemm(
-// CHECK-SAME:     %[[ARG0:.+]]: tensor<100x250xf32>
-// CHECK-SAME:     %[[ARG1:.+]]: tensor<250x500xf32>
-// CHECK-SAME:     %[[ARG2:.+]]: tensor<100x500xf32>
-//      CHECK:   %[[CST:.+]] = arith.constant 0.0
-//  CHECK-DAG:   %[[LHS:.+]] = tensor.pad %[[ARG0]]
-//  CHECK-DAG:   %[[RHS:.+]] = tensor.pad %[[ARG1]]
-//  CHECK-DAG:   %[[DEST:.+]] = tensor.pad %[[ARG2]]
-//      CHECK:   %[[GEMM:.+]] = linalg.matmul
-// CHECK-SAME:       ins(%[[LHS]], %[[RHS]] :
-// CHECK-SAME:       outs(%[[DEST]] :
-//      CHECK:   %[[RES:.+]] = tensor.extract_slice %[[GEMM]]
-//      CHECK:   return %[[RES]]
-
-// -----
-
-#map = affine_map<(d0, d1, d2) -> (d0, d2)>
-#map1 = affine_map<(d0, d1, d2) -> (d2, d1)>
-#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
 func.func @gemm_dynamic(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> {
   %0 = iree_encoding.set_encoding %arg0 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2]>>
   %1 = iree_encoding.set_encoding %arg1 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2]>>
@@ -103,8 +61,8 @@
 // CHECK-SAME:     %[[RHS:[a-zA-Z0-9]+]]: tensor<?x?xf32>
 //  CHECK-DAG:   %[[C0:.+]] = arith.constant 0 : index
 //  CHECK-DAG:   %[[C1:.+]] = arith.constant 1 : index
-//  CHECK-DAG:   %[[D0:.+]] = tensor.dim %[[ARG0]], %[[C0]]
-//  CHECK-DAG:   %[[D1:.+]] = tensor.dim %[[ARG1]], %[[C1]]
+//  CHECK-DAG:   %[[D0:.+]] = tensor.dim %[[LHS]], %[[C0]]
+//  CHECK-DAG:   %[[D1:.+]] = tensor.dim %[[RHS]], %[[C1]]
 //  CHECK-DAG:   %[[EMPTY:.+]] = tensor.empty(%[[D0]], %[[D1]]) : tensor<?x?xf32>
 //      CHECK:   %[[FILL:.+]] = linalg.fill
 // CHECK-SAME:       outs(%[[EMPTY]] :