Replace tabs throughout the repo
Mostly from predecessor comments generated by MLIR, which now uses spaces instead (https://github.com/llvm/llvm-project/commit/13090ec7dd4cbd54d42fe6f9f7ce7ab88c7c3c6d).
The only remaining tabs are in `.gitmodules` which is tool-generated and I'm not going to touch.
PiperOrigin-RevId: 311039190
diff --git a/WORKSPACE b/WORKSPACE
index 6332e8e..c573c28 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -27,8 +27,8 @@
sha256 = "cf3b76a90c86c0554c5b10f4b160f05af71d252026b71362c4674e2fb9936cf9",
strip_prefix = "rules_cc-01d4a48911d5e7591ecb1c06d3b8af47fe872371",
urls = [
- "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
- "https://github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
+ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
+ "https://github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
],
)
diff --git a/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/conv.mlir b/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/conv.mlir
index a8d0dae..55c81a1 100644
--- a/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/conv.mlir
+++ b/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/conv.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: @conv
func @conv(%arg0: tensor<1x4x5x2xf32>, %arg1: tensor<3x2x2x1xf32>) -> tensor<1x2x3x1xf32> attributes { sym_visibility = "private" } {
- // CHECK: vmla.conv
+ // CHECK: vmla.conv
// CHECK-SAME: {batch_group_count = 1 : i32,
// CHECK-SAME: feature_group_count = 1 : i32,
// CHECK-SAME: lhs_dilation = dense<1> : vector<2xi32>,
diff --git a/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/reduce.mlir b/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/reduce.mlir
index 6bca682..9f9971c 100644
--- a/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/reduce.mlir
+++ b/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/reduce.mlir
@@ -14,7 +14,7 @@
// CHECK-SAME: out %[[DST]](%[[DST_SHAPE]] : !shapex.ranked_shape<[4]>)
// CHECK-SaME: {dimension = 1 : i32} : f32
%0 = "xla_hlo.reduce"(%arg0, %cst) ( {
- ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
+ ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
%1 = xla_hlo.add %arg1, %arg2 : tensor<f32>
"xla_hlo.return"(%1) : (tensor<f32>) -> ()
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<4x8xf32>, tensor<f32>) -> tensor<4xf32>
diff --git a/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/reduce_window.mlir b/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/reduce_window.mlir
index 0e92a70..2e83290 100644
--- a/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/reduce_window.mlir
+++ b/iree/compiler/Dialect/VMLA/Conversion/HLOToVMLA/test/reduce_window.mlir
@@ -6,7 +6,7 @@
// CHECK: vmla.pooling.max
%cst = constant dense<0.000000e+00> : tensor<f32>
%0 = "xla_hlo.reduce_window"(%arg0, %cst) ( {
- ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
+ ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
%1 = xla_hlo.maximum %arg1, %arg2 : tensor<f32>
"xla_hlo.return"(%1) : (tensor<f32>) -> ()
}) {window_dimensions = dense<[1, 2, 3, 1]> : tensor<4xi64>,
@@ -23,7 +23,7 @@
// CHECK: vmla.pooling.min
%cst = constant dense<0> : tensor<i32>
%0 = "xla_hlo.reduce_window"(%arg0, %cst) ( {
- ^bb0(%arg1: tensor<i32>, %arg2: tensor<i32>): // no predecessors
+ ^bb0(%arg1: tensor<i32>, %arg2: tensor<i32>): // no predecessors
%1 = xla_hlo.minimum %arg1, %arg2 : tensor<i32>
"xla_hlo.return"(%1) : (tensor<i32>) -> ()
}) {window_dimensions = dense<[1, 2, 3, 1]> : tensor<4xi64>,
@@ -40,7 +40,7 @@
// CHECK: vmla.pooling.sum
%cst = constant dense<0.000000e+00> : tensor<f32>
%0 = "xla_hlo.reduce_window"(%arg0, %cst) ( {
- ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
+ ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
%1 = xla_hlo.add %arg1, %arg2 : tensor<f32>
"xla_hlo.return"(%1) : (tensor<f32>) -> ()
}) {window_dimensions = dense<[2, 3]> : tensor<2xi64>,
diff --git a/iree/compiler/Dialect/VMLA/Transforms/test/unroll_reductions.mlir b/iree/compiler/Dialect/VMLA/Transforms/test/unroll_reductions.mlir
index 32f4d8d..39d58fe 100644
--- a/iree/compiler/Dialect/VMLA/Transforms/test/unroll_reductions.mlir
+++ b/iree/compiler/Dialect/VMLA/Transforms/test/unroll_reductions.mlir
@@ -5,12 +5,12 @@
// CHECK-DAG: %[[INITIAL:.+]] = constant dense<0.000000e+00> : tensor<f32>
%cst = constant dense<0.000000e+00> : tensor<f32>
// CHECK-NEXT: %[[TEMP:.+]] = "xla_hlo.reduce"(%arg0, %[[INITIAL]]) ( {
- // CHECK-NEXT: ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
+ // CHECK-NEXT: ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
// CHECK-NEXT: %2 = xla_hlo.add %arg1, %arg2 : tensor<f32>
// CHECK-NEXT: "xla_hlo.return"(%2) : (tensor<f32>) -> ()
// CHECK-NEXT: }) {dimensions = dense<2> : tensor<1xi64>} : (tensor<4x2x8xf32>, tensor<f32>) -> tensor<4x2xf32>
// CHECK-NEXT: %[[RESULT:.+]] = "xla_hlo.reduce"(%[[TEMP]], %[[INITIAL]]) ( {
- // CHECK-NEXT: ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
+ // CHECK-NEXT: ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
// CHECK-NEXT: %2 = xla_hlo.add %arg1, %arg2 : tensor<f32>
// CHECK-NEXT: "xla_hlo.return"(%2) : (tensor<f32>) -> ()
// CHECK-NEXT: }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<4x2xf32>, tensor<f32>) -> tensor<4xf32>
diff --git a/iree/samples/models/mnist.mlir b/iree/samples/models/mnist.mlir
index 7d84ca3..c6ae699 100644
--- a/iree/samples/models/mnist.mlir
+++ b/iree/samples/models/mnist.mlir
@@ -34,14 +34,14 @@
%14 = "xla_hlo.dot"(%13, %7) : (tensor<1x128xf32>, tensor<128x10xf32>) -> tensor<1x10xf32>
%15 = "xla_hlo.add"(%14, %6) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<10xf32>) -> tensor<1x10xf32>
%16 = "xla_hlo.reduce"(%15, %4) ( {
- ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
+ ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
%21 = xla_hlo.maximum %arg1, %arg2 : tensor<f32>
"xla_hlo.return"(%21) : (tensor<f32>) -> ()
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
%17 = "xla_hlo.subtract"(%15, %16) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<1xf32>) -> tensor<1x10xf32>
%18 = "xla_hlo.exponential"(%17) : (tensor<1x10xf32>) -> tensor<1x10xf32>
%19 = "xla_hlo.reduce"(%18, %5) ( {
- ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
+ ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
%21 = xla_hlo.add %arg1, %arg2 : tensor<f32>
"xla_hlo.return"(%21) : (tensor<f32>) -> ()
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>