NFC: Fix various typos and code style (#9695)

diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp b/compiler/src/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp
index 493a9ea..0ec209c 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp
@@ -18,7 +18,7 @@
 namespace Flow {
 
 namespace {
-/// A pattern to pad staticly shaped matmul operands to the next integer
+/// A pattern to pad statically shaped matmul operands to the next integer
 /// multiple of padSize.
 class PadMatmulOp : public OpRewritePattern<linalg::MatmulOp> {
  public:
@@ -39,9 +39,8 @@
 
     if (!lhsType || !rhsType) return failure();
 
-    if (!lhsType.hasStaticShape() || !rhsType.hasStaticShape()) {
+    if (!lhsType.hasStaticShape() || !rhsType.hasStaticShape())
       return failure();
-    }
 
     auto lhsShape = lhsType.getShape();
     auto rhsShape = rhsType.getShape();
@@ -56,9 +55,8 @@
     int paddingForN = newNSize - N;
     int paddingForK = newKSize - K;
 
-    if (paddingForM == 0 && paddingForN == 0 && paddingForK == 0) {
+    if (paddingForM == 0 && paddingForN == 0 && paddingForK == 0)
       return failure();
-    }
 
     auto lhsPaddedType =
         RankedTensorType::get({newMSize, newKSize}, lhsType.getElementType());
@@ -88,7 +86,7 @@
                   loc, rewriter)
             : lhs;
 
-    auto paddedrhs =
+    Value paddedRhs =
         (paddingForK > 0 || paddingForN > 0)
             ? tensor::createPadScalarOp(
                   rhsPaddedType, rhs, rhsPaddingValue, createPadding({0, 0}),
@@ -96,12 +94,12 @@
                   loc, rewriter)
             : rhs;
 
-    // Padding for K-dim only result doesn't change result size.
+    // Padding for K-dim doesn't change result size.
     if (paddingForM == 0 && paddingForN == 0) {
       auto paddedMatmulOp =
           cast<linalg::LinalgOp>(matmulOp.getOperation())
               .clone(rewriter, loc, {resultType},
-                     ArrayRef<Value>{paddedLhs, paddedrhs, result});
+                     ArrayRef<Value>{paddedLhs, paddedRhs, result});
       rewriter.replaceOp(matmulOp, paddedMatmulOp->getResults());
     } else {
       auto newResultType = RankedTensorType::get({newMSize, newNSize},
@@ -115,7 +113,7 @@
       auto paddedMatmulOp =
           cast<linalg::LinalgOp>(matmulOp.getOperation())
               .clone(rewriter, loc, {newResultType},
-                     ArrayRef<Value>{paddedLhs, paddedrhs, paddedResult});
+                     ArrayRef<Value>{paddedLhs, paddedRhs, paddedResult});
 
       SmallVector<OpFoldResult> offsets(2, rewriter.getI64IntegerAttr(0));
       SmallVector<OpFoldResult> strides(2, rewriter.getI64IntegerAttr(1));
@@ -151,6 +149,7 @@
  private:
   int paddingSize;
 };
+
 }  // namespace
 
 std::unique_ptr<Pass> createPadLinalgOpsToIntegerMultiplePass(int paddingSize) {
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/PadTensorToTensorInsertSlice.cpp b/compiler/src/iree/compiler/Dialect/Flow/Transforms/PadTensorToTensorInsertSlice.cpp
index 4204fd7..df83519 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/PadTensorToTensorInsertSlice.cpp
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/PadTensorToTensorInsertSlice.cpp
@@ -4,7 +4,7 @@
 // See https://llvm.org/LICENSE.txt for license information.
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 
-//===- PadTensorToInsertSlice.cpp - Pass to legalize linalg.pad_tensor-===//
+//===- PadTensorToInsertSlice.cpp ----- Pass to legalize linalg.pad_tensor-===//
 //
 // Pass to convert linalg.pad_tensor to linalg.fill + tensor.insert_slice
 // operations which is the only way Vulkan backend can lower it to a single
diff --git a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/pad_tensor_to_tensor.mlir b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/pad_tensor_to_tensor.mlir
index bda35ad..b47cc20 100644
--- a/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/pad_tensor_to_tensor.mlir
+++ b/compiler/src/iree/compiler/Dialect/Flow/Transforms/test/pad_tensor_to_tensor.mlir
@@ -7,7 +7,7 @@
     %c3 = arith.constant 3 : index
     %0 = tensor.extract %arg1[] : tensor<f32>
     %1 = tensor.pad %arg0 low[%c4, %arg2] high[%arg3, %c3]  {
-    ^bb0(%arg4: index, %arg5: index):  // no predecessors
+    ^bb0(%arg4: index, %arg5: index):
       tensor.yield %0 : f32
     } : tensor<?x?xf32> to tensor<?x?xf32>
     return %1 : tensor<?x?xf32>
@@ -44,7 +44,7 @@
     %c3 = arith.constant 3 : index
     %0 = tensor.extract %arg1[] : tensor<f32>
     %1 = tensor.pad %arg0 low[%c4, %c5] high[%c2, %c3]  {
-    ^bb0(%arg2: index, %arg3: index):  // no predecessors
+    ^bb0(%arg2: index, %arg3: index):
       tensor.yield %0 : f32
     } : tensor<12x4xf32> to tensor<18x12xf32>
     return %1 : tensor<18x12xf32>