[StableHLO][NFC] Port remaining op tests (#13297)
Any ops that were not ported over from xla_ops are not supported yet.
Issue: https://github.com/openxla/iree/issues/12678
diff --git a/tests/e2e/stablehlo_ops/BUILD.bazel b/tests/e2e/stablehlo_ops/BUILD.bazel
index adefb05..c7a7300 100644
--- a/tests/e2e/stablehlo_ops/BUILD.bazel
+++ b/tests/e2e/stablehlo_ops/BUILD.bazel
@@ -43,9 +43,39 @@
"dot_general.mlir",
"dynamic_slice.mlir",
"dynamic_update_slice.mlir",
+ "exponential.mlir",
+ "exponential_minus_one.mlir",
+ "finite.mlir",
+ "floor.mlir",
+ "iota.mlir",
+ "log.mlir",
+ "log_plus_one.mlir",
+ "maximum.mlir",
+ "minimum.mlir",
+ "multiply.mlir",
+ "negate.mlir",
+ "pad.mlir",
+ "reduce.mlir",
+ "reduce_window.mlir",
+ "remainder.mlir",
+ "reshape.mlir",
+ "rng_uniform.mlir",
+ "round.mlir",
+ "rsqrt.mlir",
+ "select.mlir",
+ "sine.mlir",
+ "slice.mlir",
+ "sqrt.mlir",
+ "subtract.mlir",
+ "tanh.mlir",
+ "torch_index_select.mlir",
+ "transpose.mlir",
+ "while.mlir",
],
include = ["*.mlir"],
- exclude = [],
+ exclude = [
+ "pow.mlir", # TODO(#12678): Investigate this failing on riscv-32.
+ ],
),
compiler_flags = ["--iree-input-type=stablehlo_experimental"],
driver = "local-task",
@@ -75,6 +105,35 @@
"dot_general.mlir",
"dynamic_slice.mlir",
"dynamic_update_slice.mlir",
+ "exponential.mlir",
+ "exponential_minus_one.mlir",
+ "finite.mlir",
+ "floor.mlir",
+ "iota.mlir",
+ "log.mlir",
+ "log_plus_one.mlir",
+ "maximum.mlir",
+ "minimum.mlir",
+ "multiply.mlir",
+ "negate.mlir",
+ "pad.mlir",
+ "pow.mlir",
+ "reduce.mlir",
+ "reduce_window.mlir",
+ "remainder.mlir",
+ "reshape.mlir",
+ "rng_uniform.mlir",
+ "round.mlir",
+ "rsqrt.mlir",
+ "select.mlir",
+ "sine.mlir",
+ "slice.mlir",
+ "sqrt.mlir",
+ "subtract.mlir",
+ "tanh.mlir",
+ "torch_index_select.mlir",
+ "transpose.mlir",
+ "while.mlir",
],
include = ["*.mlir"],
exclude = [],
@@ -105,6 +164,35 @@
"dot_general.mlir",
"dynamic_slice.mlir",
"dynamic_update_slice.mlir",
+ "exponential.mlir",
+ "exponential_minus_one.mlir",
+ "finite.mlir",
+ "floor.mlir",
+ "iota.mlir",
+ "log.mlir",
+ "log_plus_one.mlir",
+ "maximum.mlir",
+ "minimum.mlir",
+ "multiply.mlir",
+ "negate.mlir",
+ "pad.mlir",
+ "pow.mlir",
+ "reduce.mlir",
+ "reduce_window.mlir",
+ "remainder.mlir",
+ "reshape.mlir",
+ "rng_uniform.mlir",
+ "round.mlir",
+ "rsqrt.mlir",
+ "select.mlir",
+ "sine.mlir",
+ "slice.mlir",
+ "sqrt.mlir",
+ "subtract.mlir",
+ "tanh.mlir",
+ "torch_index_select.mlir",
+ "transpose.mlir",
+ "while.mlir",
],
include = ["*.mlir"],
exclude = [
@@ -141,6 +229,35 @@
"dot_general.mlir",
"dynamic_slice.mlir",
"dynamic_update_slice.mlir",
+ "exponential.mlir",
+ "exponential_minus_one.mlir",
+ "finite.mlir",
+ "floor.mlir",
+ "iota.mlir",
+ "log.mlir",
+ "log_plus_one.mlir",
+ "maximum.mlir",
+ "minimum.mlir",
+ "multiply.mlir",
+ "negate.mlir",
+ "pad.mlir",
+ "pow.mlir",
+ "reduce.mlir",
+ "reduce_window.mlir",
+ "remainder.mlir",
+ "reshape.mlir",
+ "rng_uniform.mlir",
+ "round.mlir",
+ "rsqrt.mlir",
+ "select.mlir",
+ "sine.mlir",
+ "slice.mlir",
+ "sqrt.mlir",
+ "subtract.mlir",
+ "tanh.mlir",
+ "torch_index_select.mlir",
+ "transpose.mlir",
+ "while.mlir",
],
include = ["*.mlir"],
exclude = [],
diff --git a/tests/e2e/stablehlo_ops/CMakeLists.txt b/tests/e2e/stablehlo_ops/CMakeLists.txt
index 91ce971..62ab767 100644
--- a/tests/e2e/stablehlo_ops/CMakeLists.txt
+++ b/tests/e2e/stablehlo_ops/CMakeLists.txt
@@ -32,6 +32,34 @@
"dot_general.mlir"
"dynamic_slice.mlir"
"dynamic_update_slice.mlir"
+ "exponential.mlir"
+ "exponential_minus_one.mlir"
+ "finite.mlir"
+ "floor.mlir"
+ "iota.mlir"
+ "log.mlir"
+ "log_plus_one.mlir"
+ "maximum.mlir"
+ "minimum.mlir"
+ "multiply.mlir"
+ "negate.mlir"
+ "pad.mlir"
+ "reduce.mlir"
+ "reduce_window.mlir"
+ "remainder.mlir"
+ "reshape.mlir"
+ "rng_uniform.mlir"
+ "round.mlir"
+ "rsqrt.mlir"
+ "select.mlir"
+ "sine.mlir"
+ "slice.mlir"
+ "sqrt.mlir"
+ "subtract.mlir"
+ "tanh.mlir"
+ "torch_index_select.mlir"
+ "transpose.mlir"
+ "while.mlir"
TARGET_BACKEND
"llvm-cpu"
DRIVER
@@ -62,6 +90,35 @@
"dot_general.mlir"
"dynamic_slice.mlir"
"dynamic_update_slice.mlir"
+ "exponential.mlir"
+ "exponential_minus_one.mlir"
+ "finite.mlir"
+ "floor.mlir"
+ "iota.mlir"
+ "log.mlir"
+ "log_plus_one.mlir"
+ "maximum.mlir"
+ "minimum.mlir"
+ "multiply.mlir"
+ "negate.mlir"
+ "pad.mlir"
+ "pow.mlir"
+ "reduce.mlir"
+ "reduce_window.mlir"
+ "remainder.mlir"
+ "reshape.mlir"
+ "rng_uniform.mlir"
+ "round.mlir"
+ "rsqrt.mlir"
+ "select.mlir"
+ "sine.mlir"
+ "slice.mlir"
+ "sqrt.mlir"
+ "subtract.mlir"
+ "tanh.mlir"
+ "torch_index_select.mlir"
+ "transpose.mlir"
+ "while.mlir"
TARGET_BACKEND
"vmvx"
DRIVER
@@ -90,6 +147,35 @@
"dot_general.mlir"
"dynamic_slice.mlir"
"dynamic_update_slice.mlir"
+ "exponential.mlir"
+ "exponential_minus_one.mlir"
+ "finite.mlir"
+ "floor.mlir"
+ "iota.mlir"
+ "log.mlir"
+ "log_plus_one.mlir"
+ "maximum.mlir"
+ "minimum.mlir"
+ "multiply.mlir"
+ "negate.mlir"
+ "pad.mlir"
+ "pow.mlir"
+ "reduce.mlir"
+ "reduce_window.mlir"
+ "remainder.mlir"
+ "reshape.mlir"
+ "rng_uniform.mlir"
+ "round.mlir"
+ "rsqrt.mlir"
+ "select.mlir"
+ "sine.mlir"
+ "slice.mlir"
+ "sqrt.mlir"
+ "subtract.mlir"
+ "tanh.mlir"
+ "torch_index_select.mlir"
+ "transpose.mlir"
+ "while.mlir"
TARGET_BACKEND
"vulkan-spirv"
DRIVER
@@ -120,6 +206,35 @@
"dot_general.mlir"
"dynamic_slice.mlir"
"dynamic_update_slice.mlir"
+ "exponential.mlir"
+ "exponential_minus_one.mlir"
+ "finite.mlir"
+ "floor.mlir"
+ "iota.mlir"
+ "log.mlir"
+ "log_plus_one.mlir"
+ "maximum.mlir"
+ "minimum.mlir"
+ "multiply.mlir"
+ "negate.mlir"
+ "pad.mlir"
+ "pow.mlir"
+ "reduce.mlir"
+ "reduce_window.mlir"
+ "remainder.mlir"
+ "reshape.mlir"
+ "rng_uniform.mlir"
+ "round.mlir"
+ "rsqrt.mlir"
+ "select.mlir"
+ "sine.mlir"
+ "slice.mlir"
+ "sqrt.mlir"
+ "subtract.mlir"
+ "tanh.mlir"
+ "torch_index_select.mlir"
+ "transpose.mlir"
+ "while.mlir"
TARGET_BACKEND
"llvm-cpu"
DRIVER
diff --git a/tests/e2e/stablehlo_ops/exponential.mlir b/tests/e2e/stablehlo_ops/exponential.mlir
new file mode 100644
index 0000000..b6b2d5b
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/exponential.mlir
@@ -0,0 +1,27 @@
+func.func @tensor() {
+ %input = util.unfoldable_constant dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32>
+ %result = "stablehlo.exponential"(%input) : (tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[1.0, 2.7183, 7.3891, 54.5981]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
+
+func.func @scalar() {
+ %input = util.unfoldable_constant dense<1.0> : tensor<f32>
+ %result = "stablehlo.exponential"(%input) : (tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<2.7183> : tensor<f32>) : tensor<f32>
+ return
+}
+
+func.func @double() {
+ %input = util.unfoldable_constant dense<1.0> : tensor<f64>
+ %result = "stablehlo.exponential"(%input) : (tensor<f64>) -> tensor<f64>
+ check.expect_almost_eq_const(%result, dense<2.7183> : tensor<f64>) : tensor<f64>
+ return
+}
+
+func.func @negative() {
+ %input = util.unfoldable_constant dense<-1.0> : tensor<f32>
+ %result = "stablehlo.exponential"(%input) : (tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<0.367879> : tensor<f32>) : tensor<f32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/exponential_minus_one.mlir b/tests/e2e/stablehlo_ops/exponential_minus_one.mlir
new file mode 100644
index 0000000..2a770f2
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/exponential_minus_one.mlir
@@ -0,0 +1,6 @@
+func.func @exponential_minus_one() {
+ %input = util.unfoldable_constant dense<[0.0, 0.5, 1.0, -1.0]> : tensor<4xf32>
+ %result = "stablehlo.exponential_minus_one"(%input) : (tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[0.0, 0.6487213, 1.7182818, -0.6321205]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/finite.mlir b/tests/e2e/stablehlo_ops/finite.mlir
new file mode 100644
index 0000000..c4abe41
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/finite.mlir
@@ -0,0 +1,11 @@
+func.func @f32() {
+ %0 = util.unfoldable_constant dense<[1.0, 6.0, -6.0, 0.0]> : tensor<4xf32>
+ %1 = util.unfoldable_constant dense<[0.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
+ %2 = "stablehlo.divide"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+ %result = "stablehlo.is_finite"(%2) : (tensor<4xf32>) -> tensor<4xi1>
+ %c0 = util.unfoldable_constant dense<0> : tensor<4xi8>
+ %c1 = util.unfoldable_constant dense<1> : tensor<4xi8>
+ %output = "stablehlo.select"(%result, %c1, %c0) : (tensor<4xi1>, tensor<4xi8>, tensor<4xi8>) -> tensor<4xi8>
+ check.expect_eq_const(%output, dense<[0, 1, 1, 1]> : tensor<4xi8>) : tensor<4xi8>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/floor.mlir b/tests/e2e/stablehlo_ops/floor.mlir
new file mode 100644
index 0000000..658aba0
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/floor.mlir
@@ -0,0 +1,20 @@
+func.func @tensor() {
+ %input = util.unfoldable_constant dense<[0.0, 1.1, 2.5, 4.9]> : tensor<4xf32>
+ %result = "stablehlo.floor"(%input) : (tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[0.0, 1.0, 2.0, 4.0]> : tensor<4xf32>): tensor<4xf32>
+ return
+}
+
+func.func @scalar() {
+ %input = util.unfoldable_constant dense<101.3> : tensor<f32>
+ %result = "stablehlo.floor"(%input) : (tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<101.0> : tensor<f32>): tensor<f32>
+ return
+}
+
+func.func @negative() {
+ %input = util.unfoldable_constant dense<-1.1> : tensor<f32>
+ %result = "stablehlo.floor"(%input) : (tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<-2.0> : tensor<f32>): tensor<f32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/iota.mlir b/tests/e2e/stablehlo_ops/iota.mlir
new file mode 100644
index 0000000..6109e1b
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/iota.mlir
@@ -0,0 +1,16 @@
+func.func @iota_dim0() {
+ %result = "stablehlo.iota"() {iota_dimension = 0 : i64} : () -> tensor<2x3xf32>
+ check.expect_almost_eq_const(%result, dense<[
+ [0.0, 0.0, 0.0],
+ [1.0, 1.0, 1.0]]> : tensor<2x3xf32>) : tensor<2x3xf32>
+ return
+}
+
+
+func.func @iota_dim1() {
+ %result = "stablehlo.iota"() {iota_dimension = 1 : i64} : () -> tensor<2x3xf32>
+ check.expect_almost_eq_const(%result, dense<[
+ [0.0, 1.0, 2.0],
+ [0.0, 1.0, 2.0]]> : tensor<2x3xf32>) : tensor<2x3xf32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/log.mlir b/tests/e2e/stablehlo_ops/log.mlir
new file mode 100644
index 0000000..14969a4
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/log.mlir
@@ -0,0 +1,20 @@
+func.func @tensor() {
+ %input = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
+ %result = "stablehlo.log"(%input) : (tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[0.0, 0.693147, 1.09861, 1.38629]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
+
+func.func @scalar() {
+ %input = util.unfoldable_constant dense<4.0> : tensor<f32>
+ %result = "stablehlo.log"(%input) : (tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<1.3863> : tensor<f32>) : tensor<f32>
+ return
+}
+
+func.func @double() {
+ %input = util.unfoldable_constant dense<4.0> : tensor<f64>
+ %result = "stablehlo.log"(%input) : (tensor<f64>) -> tensor<f64>
+ check.expect_almost_eq_const(%result, dense<1.3863> : tensor<f64>) : tensor<f64>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/log_plus_one.mlir b/tests/e2e/stablehlo_ops/log_plus_one.mlir
new file mode 100644
index 0000000..34a6209
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/log_plus_one.mlir
@@ -0,0 +1,6 @@
+func.func @log_plus_one() {
+ %input = util.unfoldable_constant dense<[0.0, 0.5, 1.0, 5.0]> : tensor<4xf32>
+ %result = "stablehlo.log_plus_one"(%input) : (tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[0.0, 0.4054651, 0.6931472, 1.7917595]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/maximum.mlir b/tests/e2e/stablehlo_ops/maximum.mlir
new file mode 100644
index 0000000..c4f2254
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/maximum.mlir
@@ -0,0 +1,87 @@
+func.func @tensor_i32() {
+ %lhs = util.unfoldable_constant dense<[1, 6, 7, 8]> : tensor<4xi32>
+ %rhs = util.unfoldable_constant dense<[5, 6, 3, 8]> : tensor<4xi32>
+ %result = "stablehlo.maximum"(%lhs, %rhs) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32>
+ check.expect_eq_const(%result, dense<[5, 6, 7, 8]> : tensor<4xi32>) : tensor<4xi32>
+ return
+}
+
+func.func @tensor_odd_dim() {
+ %lhs = util.unfoldable_constant dense<[1, 6, 7]> : tensor<3xi32>
+ %rhs = util.unfoldable_constant dense<[5, 6, 3]> : tensor<3xi32>
+ %result = "stablehlo.maximum"(%lhs, %rhs) : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi32>
+ check.expect_eq_const(%result, dense<[5, 6,7]> : tensor<3xi32>) : tensor<3xi32>
+ return
+}
+
+func.func @scalar_i32() {
+ %lhs = util.unfoldable_constant dense<1> : tensor<i32>
+ %rhs = util.unfoldable_constant dense<2> : tensor<i32>
+ %result = "stablehlo.maximum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ check.expect_eq_const(%result, dense<2> : tensor<i32>) : tensor<i32>
+ return
+}
+
+func.func @negative_i32() {
+ %lhs = util.unfoldable_constant dense<1> : tensor<i32>
+ %rhs = util.unfoldable_constant dense<-2> : tensor<i32>
+ %result = "stablehlo.maximum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ check.expect_eq_const(%result, dense<1> : tensor<i32>) : tensor<i32>
+ return
+}
+
+func.func @i8() {
+ %lhs = util.unfoldable_constant dense<1> : tensor<i8>
+ %rhs = util.unfoldable_constant dense<2> : tensor<i8>
+ %result = "stablehlo.maximum"(%lhs, %rhs) : (tensor<i8>, tensor<i8>) -> tensor<i8>
+ check.expect_eq_const(%result, dense<2> : tensor<i8>) : tensor<i8>
+ return
+}
+
+func.func @i16() {
+ %lhs = util.unfoldable_constant dense<1> : tensor<i16>
+ %rhs = util.unfoldable_constant dense<2> : tensor<i16>
+ %result = "stablehlo.maximum"(%lhs, %rhs) : (tensor<i16>, tensor<i16>) -> tensor<i16>
+ check.expect_eq_const(%result, dense<2> : tensor<i16>) : tensor<i16>
+ return
+}
+
+func.func @i64() {
+ %lhs = util.unfoldable_constant dense<1> : tensor<i64>
+ %rhs = util.unfoldable_constant dense<2> : tensor<i64>
+ %result = "stablehlo.maximum"(%lhs, %rhs) : (tensor<i64>, tensor<i64>) -> tensor<i64>
+ check.expect_eq_const(%result, dense<2> : tensor<i64>) : tensor<i64>
+ return
+}
+
+func.func @tensor_f32() {
+ %lhs = util.unfoldable_constant dense<[1.0, 2.0, 7.0, 4.0]> : tensor<4xf32>
+ %rhs = util.unfoldable_constant dense<[5.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
+
+func.func @scalar_f32() {
+ %lhs = util.unfoldable_constant dense<1.0> : tensor<f32>
+ %rhs = util.unfoldable_constant dense<2.0> : tensor<f32>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<1.0> : tensor<f32>) : tensor<f32>
+ return
+}
+
+func.func @double() {
+ %lhs = util.unfoldable_constant dense<1.0> : tensor<f64>
+ %rhs = util.unfoldable_constant dense<2.0> : tensor<f64>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<f64>, tensor<f64>) -> tensor<f64>
+ check.expect_almost_eq_const(%result, dense<1.0> : tensor<f64>) : tensor<f64>
+ return
+}
+
+func.func @negative_f32() {
+ %lhs = util.unfoldable_constant dense<1.0> : tensor<f32>
+ %rhs = util.unfoldable_constant dense<-2.0> : tensor<f32>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<-2.0> : tensor<f32>) : tensor<f32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/minimum.mlir b/tests/e2e/stablehlo_ops/minimum.mlir
new file mode 100644
index 0000000..83d2c7d
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/minimum.mlir
@@ -0,0 +1,87 @@
+func.func @tensor_i32() {
+ %lhs = util.unfoldable_constant dense<[1, 2, 7, 4]> : tensor<4xi32>
+ %rhs = util.unfoldable_constant dense<[5, 2, 3, 4]> : tensor<4xi32>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32>
+ check.expect_eq_const(%result, dense<[1, 2, 3, 4]> : tensor<4xi32>) : tensor<4xi32>
+ return
+}
+
+func.func @tensor_odd_dim() {
+ %lhs = util.unfoldable_constant dense<[1, 2, 7]> : tensor<3xi32>
+ %rhs = util.unfoldable_constant dense<[5, 2, 3]> : tensor<3xi32>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi32>
+ check.expect_eq_const(%result, dense<[1, 2, 3]> : tensor<3xi32>) : tensor<3xi32>
+ return
+}
+
+func.func @scalar_i32() {
+ %lhs = util.unfoldable_constant dense<1> : tensor<i32>
+ %rhs = util.unfoldable_constant dense<2> : tensor<i32>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ check.expect_eq_const(%result, dense<1> : tensor<i32>) : tensor<i32>
+ return
+}
+
+func.func @negative_i32() {
+ %lhs = util.unfoldable_constant dense<1> : tensor<i32>
+ %rhs = util.unfoldable_constant dense<-2> : tensor<i32>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ check.expect_eq_const(%result, dense<-2> : tensor<i32>) : tensor<i32>
+ return
+}
+
+func.func @i8() {
+ %lhs = util.unfoldable_constant dense<1> : tensor<i8>
+ %rhs = util.unfoldable_constant dense<2> : tensor<i8>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<i8>, tensor<i8>) -> tensor<i8>
+ check.expect_eq_const(%result, dense<1> : tensor<i8>) : tensor<i8>
+ return
+}
+
+func.func @i16() {
+ %lhs = util.unfoldable_constant dense<1> : tensor<i16>
+ %rhs = util.unfoldable_constant dense<2> : tensor<i16>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<i16>, tensor<i16>) -> tensor<i16>
+ check.expect_eq_const(%result, dense<1> : tensor<i16>) : tensor<i16>
+ return
+}
+
+func.func @i64() {
+ %lhs = util.unfoldable_constant dense<1> : tensor<i64>
+ %rhs = util.unfoldable_constant dense<2> : tensor<i64>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<i64>, tensor<i64>) -> tensor<i64>
+ check.expect_eq_const(%result, dense<1> : tensor<i64>) : tensor<i64>
+ return
+}
+
+func.func @tensor_f32() {
+ %lhs = util.unfoldable_constant dense<[1.0, 2.0, 7.0, 4.0]> : tensor<4xf32>
+ %rhs = util.unfoldable_constant dense<[5.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
+
+func.func @scalar_f32() {
+ %lhs = util.unfoldable_constant dense<1.0> : tensor<f32>
+ %rhs = util.unfoldable_constant dense<2.0> : tensor<f32>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<1.0> : tensor<f32>) : tensor<f32>
+ return
+}
+
+func.func @double() {
+ %lhs = util.unfoldable_constant dense<1.0> : tensor<f64>
+ %rhs = util.unfoldable_constant dense<2.0> : tensor<f64>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<f64>, tensor<f64>) -> tensor<f64>
+ check.expect_almost_eq_const(%result, dense<1.0> : tensor<f64>) : tensor<f64>
+ return
+}
+
+func.func @negative_f32() {
+ %lhs = util.unfoldable_constant dense<1.0> : tensor<f32>
+ %rhs = util.unfoldable_constant dense<-2.0> : tensor<f32>
+ %result = "stablehlo.minimum"(%lhs, %rhs) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<-2.0> : tensor<f32>) : tensor<f32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/multiply.mlir b/tests/e2e/stablehlo_ops/multiply.mlir
new file mode 100644
index 0000000..603ceae
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/multiply.mlir
@@ -0,0 +1,6 @@
+func.func @multiply () {
+ %c2 = util.unfoldable_constant dense<2.0> : tensor<f32>
+ %res = "stablehlo.multiply"(%c2, %c2) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%res, dense<4.0> : tensor<f32>) : tensor<f32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/negate.mlir b/tests/e2e/stablehlo_ops/negate.mlir
new file mode 100644
index 0000000..e9072a8
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/negate.mlir
@@ -0,0 +1,13 @@
+func.func @tensor() {
+ %input = util.unfoldable_constant dense<[-1.0, -2.0, 3.0, 4.0]> : tensor<4xf32>
+ %result = "stablehlo.negate"(%input) : (tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[1.0, 2.0, -3.0, -4.0]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
+
+func.func @scalar() {
+ %input = util.unfoldable_constant dense<-4.0> : tensor<f32>
+ %result = "stablehlo.negate"(%input) : (tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<4.0> : tensor<f32>) : tensor<f32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/pad.mlir b/tests/e2e/stablehlo_ops/pad.mlir
new file mode 100644
index 0000000..9774bbc
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/pad.mlir
@@ -0,0 +1,22 @@
+func.func @pad_test() {
+ %input = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32>
+ %c0 = arith.constant dense<0> : tensor<i32>
+ %res = "stablehlo.pad"(%input, %c0) {
+ edge_padding_low = dense<[0, 1]> : tensor<2xi64>,
+ edge_padding_high = dense<[1, 5]> : tensor<2xi64>,
+ interior_padding = dense<0> : tensor<2xi64>
+ } : (tensor<2x3xi32>, tensor<i32>) -> tensor<3x9xi32>
+ check.expect_eq_const(%res, dense<[
+ [0, 1, 2, 3, 0, 0, 0, 0, 0],
+ [0, 4, 5, 6, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]]> : tensor<3x9xi32>) : tensor<3x9xi32>
+ return
+}
+
+func.func @pad_no_op() {
+ %input = util.unfoldable_constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32>
+ %c0 = arith.constant dense<0> : tensor<i32>
+ %res = "stablehlo.pad"(%input, %c0) {edge_padding_high = dense<[0, 0]> : tensor<2xi64>, edge_padding_low = dense<[0, 0]> : tensor<2xi64>, interior_padding = dense<0> : tensor<2xi64>} : (tensor<2x3xi32>, tensor<i32>) -> tensor<2x3xi32>
+ check.expect_eq(%res, %input) : tensor<2x3xi32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/pow.mlir b/tests/e2e/stablehlo_ops/pow.mlir
new file mode 100644
index 0000000..7919281
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/pow.mlir
@@ -0,0 +1,15 @@
+func.func @tensor() {
+ %cst = stablehlo.constant dense<3.0e+00> : tensor<4xf32>
+ %input = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
+ %result = "stablehlo.power"(%input, %cst) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[1.0, 8.0, 27.0, 64.0]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
+
+func.func @scalar() {
+ %cst = stablehlo.constant dense<2.0e+00> : tensor<f32>
+ %input = util.unfoldable_constant dense<16.0> : tensor<f32>
+ %result = "stablehlo.power"(%input, %cst) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<256.0> : tensor<f32>) : tensor<f32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/reduce.mlir b/tests/e2e/stablehlo_ops/reduce.mlir
new file mode 100644
index 0000000..fdad897
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/reduce.mlir
@@ -0,0 +1,360 @@
+// Int sum values from [1, 10]
+func.func @reduce_sum_1x10xi32() {
+ %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]> : tensor<1x10xi32>
+ %1 = util.unfoldable_constant dense<0> : tensor<i32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xi32>, tensor<i32>) -> tensor<1xi32>
+ check.expect_eq_const(%res, dense<55> : tensor<1xi32>) : tensor<1xi32>
+ return
+}
+
+// Int max values from [1, 10]
+func.func @reduce_max_1x10xi32() {
+ %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]> : tensor<1x10xi32>
+ %1 = util.unfoldable_constant dense<0> : tensor<i32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors
+ %3 = "stablehlo.maximum"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xi32>, tensor<i32>) -> tensor<1xi32>
+ check.expect_eq_const(%res, dense<10> : tensor<1xi32>) : tensor<1xi32>
+ return
+}
+
+// Int min values, along multiple dimensions. Expected to just be a reshape in this case.
+func.func @reduce_min_5x1x1xi32() {
+ %0 = util.unfoldable_constant dense<[[[1]],[[2]],[[3]],[[4]],[[5]]]> : tensor<5x1x1xi32>
+ %1 = util.unfoldable_constant dense<999> : tensor<i32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors
+ %3 = "stablehlo.minimum"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<[1, 2]> : tensor<2xi64>} : (tensor<5x1x1xi32>, tensor<i32>) -> tensor<5xi32>
+ check.expect_eq_const(%res, dense<[1, 2, 3, 4, 5]> : tensor<5xi32>) : tensor<5xi32>
+ return
+}
+
+
+// The following cases match the examples presented at
+// https://www.tensorflow.org/xla/operation_semantics#reduce
+
+func.func @reduce_sum_2x3xi32_dim0() {
+ %0 = util.unfoldable_constant dense<[
+ [1, 2, 3],
+ [4, 5, 6]]> : tensor<2x3xi32>
+ %1 = util.unfoldable_constant dense<0> : tensor<i32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<0> : tensor<1xi64>} : (tensor<2x3xi32>, tensor<i32>) -> tensor<3xi32>
+ check.expect_eq_const(%res, dense<[5, 7, 9]> : tensor<3xi32>) : tensor<3xi32>
+ return
+}
+
+func.func @reduce_sum_2x3xi32_dim1() {
+ %0 = util.unfoldable_constant dense<[
+ [1, 2, 3],
+ [4, 5, 6]]> : tensor<2x3xi32>
+ %1 = util.unfoldable_constant dense<0> : tensor<i32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<2x3xi32>, tensor<i32>) -> tensor<2xi32>
+ check.expect_eq_const(%res, dense<[6, 15]> : tensor<2xi32>) : tensor<2xi32>
+ return
+}
+
+func.func @reduce_sum_4x2x3xi32_dim0() {
+ %0 = util.unfoldable_constant dense<[
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]]]> : tensor<4x2x3xi32>
+ %1 = util.unfoldable_constant dense<0> : tensor<i32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<0> : tensor<1xi64>} : (tensor<4x2x3xi32>, tensor<i32>) -> tensor<2x3xi32>
+ check.expect_eq_const(%res, dense<[[4, 8, 12],[16, 20, 24]]> : tensor<2x3xi32>) : tensor<2x3xi32>
+ return
+}
+
+func.func @reduce_sum_4x2x3xi32_dim2() {
+ %0 = util.unfoldable_constant dense<[
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]]]> : tensor<4x2x3xi32>
+ %1 = util.unfoldable_constant dense<0> : tensor<i32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<2> : tensor<1xi64>} : (tensor<4x2x3xi32>, tensor<i32>) -> tensor<4x2xi32>
+ check.expect_eq_const(%res, dense<[[6, 15],[6, 15],[6, 15],[6, 15]]> : tensor<4x2xi32>) : tensor<4x2xi32>
+ return
+}
+
+func.func @reduce_sum_4x2x3xi32_dims_0_1() {
+ %0 = util.unfoldable_constant dense<[
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]]]> : tensor<4x2x3xi32>
+ %1 = util.unfoldable_constant dense<0> : tensor<i32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<4x2x3xi32>, tensor<i32>) -> tensor<3xi32>
+ check.expect_eq_const(%res, dense<[20, 28, 36]> : tensor<3xi32>) : tensor<3xi32>
+ return
+}
+
+func.func @reduce_sum_4x2x3xi32_dims_0_1_2() {
+ %0 = util.unfoldable_constant dense<[
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]],
+ [[1, 2, 3], [4, 5, 6]]]> : tensor<4x2x3xi32>
+ %1 = util.unfoldable_constant dense<0> : tensor<i32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<i32>, %arg1: tensor<i32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<4x2x3xi32>, tensor<i32>) -> tensor<i32>
+ check.expect_eq_const(%res, dense<84> : tensor<i32>) : tensor<i32>
+ return
+}
+
+// Float sum values from [1.0, 10.0]
+func.func @reduce_sum_1x10xf32() {
+ %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]]> : tensor<1x10xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
+ check.expect_almost_eq_const(%res, dense<55.0> : tensor<1xf32>) : tensor<1xf32>
+ return
+}
+
+// Float max values from [1.0, 10.0]
+func.func @reduce_max_1x10xf32() {
+ %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]]> : tensor<1x10xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce"(%0, %1)
+ ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.maximum"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ })
+ {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xf32>, tensor<f32>) -> tensor<1xf32>
+ check.expect_almost_eq_const(%res, dense<10.0> : tensor<1xf32>) : tensor<1xf32>
+ return
+}
+
+// Float min values, along multiple dimensions. Expected to just be a reshape in this case.
+func.func @reduce_min_5x1x1xf32() {
+ %0 = util.unfoldable_constant dense<[[[1.0]],[[2.0]],[[3.0]],[[4.0]],[[5.0]]]> : tensor<5x1x1xf32>
+ %1 = util.unfoldable_constant dense<999.0> : tensor<f32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.minimum"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {dimensions = dense<[1, 2]> : tensor<2xi64>} : (tensor<5x1x1xf32>, tensor<f32>) -> tensor<5xf32>
+ check.expect_almost_eq_const(%res, dense<[1.0, 2.0, 3.0, 4.0, 5.0]> : tensor<5xf32>) : tensor<5xf32>
+ return
+}
+
+// The following cases match the examples presented at
+// https://www.tensorflow.org/xla/operation_semantics#reduce
+
+func.func @reduce_sum_2x3xf32_dim0() {
+ %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {dimensions = dense<0> : tensor<1xi64>} : (tensor<2x3xf32>, tensor<f32>) -> tensor<3xf32>
+ check.expect_almost_eq_const(%res, dense<[5.0, 7.0, 9.0]> : tensor<3xf32>) : tensor<3xf32>
+ return
+}
+
+func.func @reduce_sum_2x3xf32_dim1() {
+ %0 = util.unfoldable_constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<2x3xf32>, tensor<f32>) -> tensor<2xf32>
+ check.expect_almost_eq_const(%res, dense<[6.0, 15.0]> : tensor<2xf32>) : tensor<2xf32>
+ return
+}
+
+func.func @reduce_sum_4x2x3xf32_dim0() {
+ %0 = util.unfoldable_constant dense<[
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<4x2x3xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {dimensions = dense<0> : tensor<1xi64>} : (tensor<4x2x3xf32>, tensor<f32>) -> tensor<2x3xf32>
+ check.expect_almost_eq_const(%res, dense<[[4.0, 8.0, 12.0],[16.0, 20.0, 24.0]]> : tensor<2x3xf32>) : tensor<2x3xf32>
+ return
+}
+
+func.func @reduce_sum_4x2x3xf32_dim1() {
+ %0 = util.unfoldable_constant dense<[
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<4x2x3xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<4x2x3xf32>, tensor<f32>) -> tensor<4x3xf32>
+ check.expect_almost_eq_const(%res, dense<[
+ [5.0, 7.0, 9.0],
+ [5.0, 7.0, 9.0],
+ [5.0, 7.0, 9.0],
+ [5.0, 7.0, 9.0]]> : tensor<4x3xf32>) : tensor<4x3xf32>
+ return
+}
+
+func.func @reduce_sum_4x2x3xf32_dim2() {
+ %0 = util.unfoldable_constant dense<[
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<4x2x3xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {dimensions = dense<2> : tensor<1xi64>} : (tensor<4x2x3xf32>, tensor<f32>) -> tensor<4x2xf32>
+ check.expect_almost_eq_const(%res, dense<[
+ [6.0, 15.0],
+ [6.0, 15.0],
+ [6.0, 15.0],
+ [6.0, 15.0]]> : tensor<4x2xf32>) : tensor<4x2xf32>
+ return
+}
+
+func.func @reduce_sum_4x2x3xf32_dims_0_1() {
+ %0 = util.unfoldable_constant dense<[
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<4x2x3xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<4x2x3xf32>, tensor<f32>) -> tensor<3xf32>
+ check.expect_almost_eq_const(%res, dense<[20.0, 28.0, 36.0]> : tensor<3xf32>) : tensor<3xf32>
+ return
+}
+
+func.func @reduce_sum_4x2x3xf32_dims_0_1_2() {
+ %0 = util.unfoldable_constant dense<[
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
+ [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]> : tensor<4x2x3xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<4x2x3xf32>, tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%res, dense<84.0> : tensor<f32>) : tensor<f32>
+ return
+}
+
+func.func @reducemulti_result() {
+ %cst0 = stablehlo.constant dense<-2147483648> : tensor<i32>
+ %cst1 = stablehlo.constant dense<0> : tensor<i32>
+ %arg0 = util.unfoldable_constant dense<[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17, 18]]> : tensor<9x2xi32>
+ %arg1 = util.unfoldable_constant dense<[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]]> : tensor<9x2xi32>
+ %res0, %res1 = "stablehlo.reduce"(%arg0, %arg1, %cst0, %cst1) ( {
+ ^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>, %arg4: tensor<i32>, %arg5: tensor<i32>): // no predecessors
+ %0 = "stablehlo.compare"(%arg2, %arg4) {comparison_direction = #stablehlo<comparison_direction GE>} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %1 = "stablehlo.select"(%0, %arg2, %arg4) : (tensor<i1>, tensor<i32>, tensor<i32>) -> tensor<i32>
+ %2 = "stablehlo.compare"(%arg2, %arg4) {comparison_direction = #stablehlo<comparison_direction EQ>} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %3 = stablehlo.minimum %arg3, %arg5 : tensor<i32>
+ %4 = "stablehlo.select"(%0, %arg3, %arg5) : (tensor<i1>, tensor<i32>, tensor<i32>) -> tensor<i32>
+ %5 = "stablehlo.select"(%2, %3, %4) : (tensor<i1>, tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%1, %5) : (tensor<i32>, tensor<i32>) -> ()
+ }) {dimensions = dense<0> : tensor<1xi64>} : (tensor<9x2xi32>, tensor<9x2xi32>, tensor<i32>, tensor<i32>) -> (tensor<2xi32>, tensor<2xi32>)
+ check.expect_eq_const(%res0, dense<[17, 18]> : tensor<2xi32>) : tensor<2xi32>
+ check.expect_eq_const(%res1, dense<[16, 17]> : tensor<2xi32>) : tensor<2xi32>
+ return
+}
+
+func.func @reduce_dim_1() {
+ %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]> : tensor<2x5xi32>
+ %1 = util.unfoldable_constant dense<10> : tensor<i32>
+ %2 = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0 : tensor<i32>, %arg1 : tensor<i32>):
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<2x5xi32>, tensor<i32>) -> tensor<2xi32>
+ check.expect_eq_const(%2, dense<[25, 50]> : tensor<2xi32>) : tensor<2xi32>
+ return
+}
+
+// Constants get folded in which linalg.indexed_generic ops. Check to
+// make sure this works as expected.
+func.func @reduce_dim_1_const() {
+ %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]> : tensor<2x5xi32>
+ %1 = arith.constant dense<10> : tensor<i32>
+ %2 = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0 : tensor<i32>, %arg1 : tensor<i32>):
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<2x5xi32>, tensor<i32>) -> tensor<2xi32>
+ check.expect_eq_const(%2, dense<[25, 50]> : tensor<2xi32>) : tensor<2xi32>
+ return
+}
+
+func.func @reduce_dim_0() {
+ %0 = util.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]> : tensor<1x10xi32>
+ %1 = util.unfoldable_constant dense<10> : tensor<i32>
+ %2 = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0 : tensor<i32>, %arg1 : tensor<i32>):
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x10xi32>, tensor<i32>) -> tensor<1xi32>
+ check.expect_eq_const(%2, dense<[65]> : tensor<1xi32>) : tensor<1xi32>
+ return
+}
+
+func.func @reduce_to_scalar() {
+ %0 = util.unfoldable_constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : tensor<10xi32>
+ %1 = util.unfoldable_constant dense<10> : tensor<i32>
+ %2 = "stablehlo.reduce"(%0, %1) ( {
+ ^bb0(%arg0 : tensor<i32>, %arg1 : tensor<i32>):
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "stablehlo.return"(%3) : (tensor<i32>) -> ()
+ }) {dimensions = dense<0> : tensor<1xi64>} : (tensor<10xi32>, tensor<i32>) -> tensor<i32>
+ check.expect_eq_const(%2, dense<65> : tensor<i32>) : tensor<i32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/reduce_window.mlir b/tests/e2e/stablehlo_ops/reduce_window.mlir
new file mode 100644
index 0000000..568d51c
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/reduce_window.mlir
@@ -0,0 +1,98 @@
+func.func @reduce_window_nonoverlapping_1x4x6x1xf32() {
+ %0 = util.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]],
+ [[ 7.0], [ 8.0], [ 9.0], [10.0], [11.0], [12.0]],
+ [[13.0], [14.0], [15.0], [16.0], [17.0], [18.0]],
+ [[19.0], [20.0], [21.0], [22.0], [23.0], [24.0]]]]> : tensor<1x4x6x1xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce_window"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {window_dimensions = dense<[1, 2, 3, 1]> : tensor<4xi64>,
+ window_strides = dense<[1, 2, 3, 1]> : tensor<4xi64>} : (tensor<1x4x6x1xf32>, tensor<f32>) -> tensor<1x2x2x1xf32>
+ check.expect_eq_const(%res, dense<[[[[30.0], [48.0]],[[102.0], [120.0]]]]> : tensor<1x2x2x1xf32>) : tensor<1x2x2x1xf32>
+ return
+}
+
+func.func @reduce_window_overlapping_4x6xf32() {
+ %0 = util.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]],
+ [[ 7.0], [ 8.0], [ 9.0], [10.0], [11.0], [12.0]],
+ [[13.0], [14.0], [15.0], [16.0], [17.0], [18.0]],
+ [[19.0], [20.0], [21.0], [22.0], [23.0], [24.0]]]]> : tensor<1x4x6x1xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce_window"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {window_dimensions = dense<[1, 2, 3, 1]> : tensor<4xi64>,
+ window_strides = dense<[1, 1, 1, 1]> : tensor<4xi64>} : (tensor<1x4x6x1xf32>, tensor<f32>) -> tensor<1x3x4x1xf32>
+ check.expect_eq_const(%res, dense<[[
+ [[ 30.0], [ 36.0], [ 42.0], [ 48.0]],
+ [[ 66.0], [ 72.0], [ 78.0], [ 84.0]],
+ [[102.0], [108.0], [114.0], [120.0]]]]> : tensor<1x3x4x1xf32>) : tensor<1x3x4x1xf32>
+ return
+}
+
+func.func @reduce_window_max_4x6xf32() {
+ %0 = util.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]],
+ [[ 7.0], [ 8.0], [ 9.0], [10.0], [11.0], [12.0]],
+ [[13.0], [14.0], [15.0], [16.0], [17.0], [18.0]],
+ [[19.0], [20.0], [21.0], [22.0], [23.0], [24.0]]]]> : tensor<1x4x6x1xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce_window"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.maximum"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {window_dimensions = dense<[1, 2, 3, 1]> : tensor<4xi64>,
+ window_strides = dense<[1, 2, 3, 1]> : tensor<4xi64>} : (tensor<1x4x6x1xf32>, tensor<f32>) -> tensor<1x2x2x1xf32>
+ check.expect_almost_eq_const(%res, dense<[[[[9.0], [12.0]], [[21.0], [24.0]]]]> : tensor<1x2x2x1xf32>) : tensor<1x2x2x1xf32>
+ return
+}
+
+func.func @reduce_window_min_4x6xf32() {
+ %0 = util.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]],
+ [[ 7.0], [ 8.0], [ 9.0], [10.0], [11.0], [12.0]],
+ [[13.0], [14.0], [15.0], [16.0], [17.0], [18.0]],
+ [[19.0], [20.0], [21.0], [22.0], [23.0], [24.0]]]]> : tensor<1x4x6x1xf32>
+ %1 = util.unfoldable_constant dense<14.0> : tensor<f32>
+ %res = "stablehlo.reduce_window"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.minimum"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {window_dimensions = dense<[1, 2, 3, 1]> : tensor<4xi64>,
+ window_strides = dense<[1, 2, 3, 1]> : tensor<4xi64>} : (tensor<1x4x6x1xf32>, tensor<f32>) -> tensor<1x2x2x1xf32>
+ check.expect_almost_eq_const(%res, dense<[[[[1.0], [4.0]], [[13.0], [14.0]]]]> : tensor<1x2x2x1xf32>) : tensor<1x2x2x1xf32>
+ return
+}
+
+func.func @reduce_window_max_with_padding_4x6xf32() {
+ %0 = util.unfoldable_constant dense<[[[[ 1.0], [ 2.0], [ 3.0], [ 4.0], [ 5.0], [ 6.0]],
+ [[ 7.0], [ 8.0], [ 9.0], [10.0], [11.0], [12.0]],
+ [[13.0], [14.0], [15.0], [16.0], [17.0], [18.0]],
+ [[19.0], [20.0], [21.0], [22.0], [23.0], [24.0]]]]> : tensor<1x4x6x1xf32>
+ %1 = util.unfoldable_constant dense<0.0> : tensor<f32>
+ %res = "stablehlo.reduce_window"(%0, %1) ( {
+ ^bb0(%arg0: tensor<f32>, %arg1: tensor<f32>): // no predecessors
+ %3 = "stablehlo.maximum"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ "stablehlo.return"(%3) : (tensor<f32>) -> ()
+ }) {window_dimensions = dense<[1, 2, 3, 1]> : tensor<4xi64>,
+ window_strides = dense<[1, 2, 3, 1]> : tensor<4xi64>,
+ padding = dense<[[0, 0], [1, 1], [0, 0], [0, 0]]> : tensor<4x2xi64>} : (tensor<1x4x6x1xf32>, tensor<f32>) -> tensor<1x3x2x1xf32>
+ check.expect_almost_eq_const(%res, dense<[[[[3.0], [6.0]], [[15.0], [18.0]], [[21.0], [24.0]]]]> : tensor<1x3x2x1xf32>) : tensor<1x3x2x1xf32>
+ return
+}
+
+func.func @cumsum_f32() {
+ %0 = stablehlo.constant dense<0.000000e+00> : tensor<f32>
+ %1 = util.unfoldable_constant dense<1.0> : tensor<2x2x2xf32>
+ %res = "stablehlo.reduce_window"(%1, %0) ({
+ ^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):
+ %4 = stablehlo.add %arg1, %arg2 : tensor<f32>
+ "stablehlo.return"(%4) : (tensor<f32>) -> ()
+ }) {padding = dense<[[1, 0], [0, 0], [0, 0]]> : tensor<3x2xi64>,
+ window_dimensions = dense<[2, 1, 1]> : tensor<3xi64>,
+ window_strides = dense<1> : tensor<3xi64>
+ } : (tensor<2x2x2xf32>, tensor<f32>) -> tensor<2x2x2xf32>
+ check.expect_almost_eq_const(%res, dense<[[[1.0, 1.0], [1.0, 1.0]], [[2.0, 2.0], [2.0, 2.0]]]> : tensor<2x2x2xf32>) : tensor<2x2x2xf32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/remainder.mlir b/tests/e2e/stablehlo_ops/remainder.mlir
new file mode 100644
index 0000000..8ab0ea6
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/remainder.mlir
@@ -0,0 +1,63 @@
+func.func @scalar() {
+ %input1 = util.unfoldable_constant dense<16.0> : tensor<f32>
+ %input2 = util.unfoldable_constant dense<7.0> : tensor<f32>
+ %result = "stablehlo.remainder"(%input1, %input2) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<2.0> : tensor<f32>) : tensor<f32>
+ return
+}
+
+func.func @tensor() {
+ %input1 = util.unfoldable_constant dense<[16.0, 17.0, 18.0]> : tensor<3xf32>
+ %input2 = util.unfoldable_constant dense<[7.0, 8.0, 9.0]> : tensor<3xf32>
+ %result = "stablehlo.remainder"(%input1, %input2) : (tensor<3xf32>, tensor<3xf32>) -> tensor<3xf32>
+ check.expect_almost_eq_const(%result, dense<[2.0, 1.0, 0.0]> : tensor<3xf32>) : tensor<3xf32>
+ return
+}
+
+func.func @negative_den() {
+ %input1 = util.unfoldable_constant dense<16.0> : tensor<f32>
+ %input2 = util.unfoldable_constant dense<-7.0> : tensor<f32>
+ %result = "stablehlo.remainder"(%input1, %input2) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<2.0> : tensor<f32>) : tensor<f32>
+ return
+}
+
+func.func @negative_num() {
+ %input1 = util.unfoldable_constant dense<-16.0> : tensor<f32>
+ %input2 = util.unfoldable_constant dense<7.0> : tensor<f32>
+ %result = "stablehlo.remainder"(%input1, %input2) : (tensor<f32>, tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<-2.0> : tensor<f32>) : tensor<f32>
+ return
+}
+
+func.func @scalar_int() {
+ %input1 = util.unfoldable_constant dense<16> : tensor<i32>
+ %input2 = util.unfoldable_constant dense<7> : tensor<i32>
+ %result = "stablehlo.remainder"(%input1, %input2) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ check.expect_eq_const(%result, dense<2> : tensor<i32>) : tensor<i32>
+ return
+}
+
+func.func @tensor_int() {
+ %input1 = util.unfoldable_constant dense<[16, 17, 18]> : tensor<3xi32>
+ %input2 = util.unfoldable_constant dense<[7, 8, 9]> : tensor<3xi32>
+ %result = "stablehlo.remainder"(%input1, %input2) : (tensor<3xi32>, tensor<3xi32>) -> tensor<3xi32>
+ check.expect_eq_const(%result, dense<[2, 1, 0]> : tensor<3xi32>) : tensor<3xi32>
+ return
+}
+
+func.func @negative_den_int() {
+ %input1 = util.unfoldable_constant dense<16> : tensor<i32>
+ %input2 = util.unfoldable_constant dense<-7> : tensor<i32>
+ %result = "stablehlo.remainder"(%input1, %input2) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ check.expect_eq_const(%result, dense<2> : tensor<i32>) : tensor<i32>
+ return
+}
+
+func.func @negative_num_int() {
+ %input1 = util.unfoldable_constant dense<-16> : tensor<i32>
+ %input2 = util.unfoldable_constant dense<7> : tensor<i32>
+ %result = "stablehlo.remainder"(%input1, %input2) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ check.expect_eq_const(%result, dense<-2> : tensor<i32>) : tensor<i32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/reshape.mlir b/tests/e2e/stablehlo_ops/reshape.mlir
new file mode 100644
index 0000000..4be3122
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/reshape.mlir
@@ -0,0 +1,32 @@
+func.func @reshape_1D_2D() {
+ %input = util.unfoldable_constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]> : tensor<12xi32>
+ %result = "stablehlo.reshape"(%input) : (tensor<12xi32>) -> tensor<3x4xi32>
+ check.expect_eq_const(%result, dense<[
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]> : tensor<3x4xi32>) : tensor<3x4xi32>
+ return
+}
+
+// func.func @reshape_1D_3D() {
+// %input = util.unfoldable_constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]> : tensor<12xi32>
+// %result = "stablehlo.reshape"(%input) : (tensor<12xi32>) -> tensor<2x2x3xi32>
+// check.expect_eq_const(%result, dense<[
+// [[1, 2, 3], [4, 5, 6]],
+// [[7, 8, 9], [10, 11, 12]]]> : tensor<2x2x3xi32>) : tensor<2x2x3xi32>
+// return
+// }
+
+// func.func @reshape_2D_3D() {
+// %input = util.unfoldable_constant dense<[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]> : tensor<2x6xi32>
+// %result = "stablehlo.reshape"(%input) : (tensor<2x6xi32>) -> tensor<2x1x6xi32>
+// check.expect_eq_const(%result, dense<[[[1, 2, 3, 4, 5, 6]], [[7, 8, 9, 10, 11, 12]]]> : tensor<2x1x6xi32>) : tensor<2x1x6xi32>
+// return
+// }
+
+// func.func @reshape_3D_1D() {
+// %input = util.unfoldable_constant dense<[[[1, 2, 3, 4, 5, 6]], [[7, 8, 9, 10, 11, 12]]]> : tensor<2x1x6xi32>
+// %result = "stablehlo.reshape"(%input) : (tensor<2x1x6xi32>) -> tensor<2x6xi32>
+// check.expect_eq_const(%result, dense<[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]> : tensor<2x6xi32>) : tensor<2x6xi32>
+// return
+// }
diff --git a/tests/e2e/stablehlo_ops/rng_uniform.mlir b/tests/e2e/stablehlo_ops/rng_uniform.mlir
new file mode 100644
index 0000000..e0c3a81
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/rng_uniform.mlir
@@ -0,0 +1,34 @@
+// Note that they are stateless random generators, so they have fixed results.
+func.func @rng_uniform_1d() {
+ %min = util.unfoldable_constant dense<-10.0> : tensor<f32>
+ %max = util.unfoldable_constant dense<10.0> : tensor<f32>
+ %shape = util.unfoldable_constant dense<[10]> : tensor<1xi32>
+ %res = "stablehlo.rng"(%min, %max, %shape) {rng_distribution = #stablehlo<rng_distribution UNIFORM>} : (tensor<f32>, tensor<f32>, tensor<1xi32>) -> tensor<10xf32>
+ check.expect_almost_eq_const(%res, dense<[
+ -9.99994, -4.8613, 0.277344, 5.41599, -9.44537, -4.30673, 0.831918, 5.97056, -8.8908, -3.75215
+ ]> : tensor<10xf32>) : tensor<10xf32>
+ return
+}
+
+func.func @rng_uniform_2d() {
+ %min = util.unfoldable_constant dense<-10.0> : tensor<f32>
+ %max = util.unfoldable_constant dense<10.0> : tensor<f32>
+ %shape = util.unfoldable_constant dense<[3, 3]> : tensor<2xi32>
+ %res = "stablehlo.rng"(%min, %max, %shape) {rng_distribution = #stablehlo<rng_distribution UNIFORM>} : (tensor<f32>, tensor<f32>, tensor<2xi32>) -> tensor<3x3xf32>
+ check.expect_almost_eq_const(%res, dense<[
+ [6.55154, -8.30982, -3.17117],
+ [1.75741, 6.89606, -7.9653],
+ [-3.03671, 2.10193, 7.24057]]> : tensor<3x3xf32>) : tensor<3x3xf32>
+ return
+}
+
+func.func @rng_uniform_3d() {
+ %min = util.unfoldable_constant dense<-10.0> : tensor<f32>
+ %max = util.unfoldable_constant dense<10.0> : tensor<f32>
+ %shape = util.unfoldable_constant dense<[2, 2, 2]> : tensor<3xi32>
+ %res = "stablehlo.rng"(%min, %max, %shape) {rng_distribution = #stablehlo<rng_distribution UNIFORM>} : (tensor<f32>, tensor<f32>, tensor<3xi32>) -> tensor<2x2x2xf32>
+ check.expect_almost_eq_const(%res, dense<[
+ [[3.04814, 8.18679], [-1.74598, 3.39266]],
+ [[-6.91349, -1.77484], [8.29239, -6.56897]]]> : tensor<2x2x2xf32>) : tensor<2x2x2xf32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/round.mlir b/tests/e2e/stablehlo_ops/round.mlir
new file mode 100644
index 0000000..aebd659
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/round.mlir
@@ -0,0 +1,7 @@
+func.func @tensor() {
+ %input = util.unfoldable_constant dense<[-0.7, -0.5, -0.2, 0.0, 0.2, 0.5, 0.7]> : tensor<7xf32>
+ %result = "stablehlo.round_nearest_afz"(%input) : (tensor<7xf32>) -> tensor<7xf32>
+ check.expect_almost_eq_const(%result, dense<[-1.0, -1.0, 0.0, 0.0, 0.0, 1.0, 1.0]> : tensor<7xf32>) : tensor<7xf32>
+ return
+}
+
diff --git a/tests/e2e/stablehlo_ops/rsqrt.mlir b/tests/e2e/stablehlo_ops/rsqrt.mlir
new file mode 100644
index 0000000..049ee85
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/rsqrt.mlir
@@ -0,0 +1,13 @@
+func.func @tensor() {
+ %input = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
+ %result = "stablehlo.rsqrt"(%input) : (tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[1.0, 0.707107, 0.57735, 0.5]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
+
+func.func @scalar() {
+ %input = util.unfoldable_constant dense<16.0> : tensor<f32>
+ %result = "stablehlo.rsqrt"(%input) : (tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<0.25> : tensor<f32>) : tensor<f32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/select.mlir b/tests/e2e/stablehlo_ops/select.mlir
new file mode 100644
index 0000000..f0e435e
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/select.mlir
@@ -0,0 +1,10 @@
+func.func @select() {
+ %input = util.unfoldable_constant dense<[1, 0, 1, 0]> : tensor<4xi1>
+ %zeros = util.unfoldable_constant dense<0> : tensor<4xi1>
+ %cond = "stablehlo.compare"(%input, %zeros) {comparison_direction = #stablehlo<comparison_direction GT>} : (tensor<4xi1>, tensor<4xi1>) -> tensor<4xi1>
+ %lhs = util.unfoldable_constant dense<[1, 2, 3, 4]> : tensor<4xi32>
+ %rhs = util.unfoldable_constant dense<[5, 6, 7, 8]> : tensor<4xi32>
+ %result = "stablehlo.select"(%cond, %lhs, %rhs) : (tensor<4xi1>, tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32>
+ check.expect_eq_const(%result, dense<[1,6, 3, 8]> : tensor<4xi32>) : tensor<4xi32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/sine.mlir b/tests/e2e/stablehlo_ops/sine.mlir
new file mode 100644
index 0000000..2cedaa4
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/sine.mlir
@@ -0,0 +1,13 @@
+func.func @tensor() {
+ %input = util.unfoldable_constant dense<[0.0, 1.0, 1.5, 2.0]> : tensor<4xf32>
+ %result = "stablehlo.sine"(%input) : (tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[0.0, 0.8415, 0.9975, 0.9093]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
+
+func.func @scalar() {
+ %input = util.unfoldable_constant dense<3.0> : tensor<f32>
+ %result = "stablehlo.sine"(%input) : (tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<0.14112> : tensor<f32>) : tensor<f32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/slice.mlir b/tests/e2e/stablehlo_ops/slice.mlir
new file mode 100644
index 0000000..2f0120d
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/slice.mlir
@@ -0,0 +1,60 @@
+func.func @slice_whole_buffer() {
+ %input = util.unfoldable_constant dense<[
+ [01, 02, 03, 04],
+ [05, 06, 07, 08],
+ [09, 10, 11, 12]]> : tensor<3x4xi32>
+ %result = "stablehlo.slice"(%input) {
+ start_indices = dense<[0, 0]> : tensor<2xi64>,
+ limit_indices = dense<[3, 4]> : tensor<2xi64>,
+ strides = dense<1> : tensor<2xi64>
+ } : (tensor<3x4xi32>) -> tensor<3x4xi32>
+ check.expect_eq_const(%result, dense<[
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]> : tensor<3x4xi32>) : tensor<3x4xi32>
+ return
+}
+
+func.func @slice_whole_stride() {
+ %input = util.unfoldable_constant dense<[
+ [01, 02, 03, 04],
+ [05, 06, 07, 08],
+ [09, 10, 11, 12]]> : tensor<3x4xi32>
+ %result = "stablehlo.slice"(%input) {
+ start_indices = dense<[1, 0]> : tensor<2xi64>,
+ limit_indices = dense<[2, 4]> : tensor<2xi64>,
+ strides = dense<1> : tensor<2xi64>
+ } : (tensor<3x4xi32>) -> tensor<1x4xi32>
+ check.expect_eq_const(%result, dense<[[5, 6, 7, 8]]> : tensor<1x4xi32>) : tensor<1x4xi32>
+ return
+}
+
+func.func @slice_stride_part() {
+ %input = util.unfoldable_constant dense<[
+ [01, 02, 03, 04],
+ [05, 06, 07, 08],
+ [09, 10, 11, 12]]> : tensor<3x4xi32>
+ %result = "stablehlo.slice"(%input) {
+ start_indices = dense<[1, 1]> : tensor<2xi64>,
+ limit_indices = dense<[2, 3]> : tensor<2xi64>,
+ strides = dense<1> : tensor<2xi64>
+ } : (tensor<3x4xi32>) -> tensor<1x2xi32>
+ check.expect_eq_const(%result, dense<[[6, 7]]> : tensor<1x2xi32>) : tensor<1x2xi32>
+ return
+}
+
+func.func @slice_multi_stride() {
+ %input = util.unfoldable_constant dense<[
+ [01, 02, 03, 04],
+ [05, 06, 07, 08],
+ [09, 10, 11, 12]]> : tensor<3x4xi32>
+ %result = "stablehlo.slice"(%input) {
+ start_indices = dense<[1, 0]> : tensor<2xi64>,
+ limit_indices = dense<[3, 4]> : tensor<2xi64>,
+ strides = dense<1> : tensor<2xi64>
+ } : (tensor<3x4xi32>) -> tensor<2x4xi32>
+ check.expect_eq_const(%result, dense<[
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]> : tensor<2x4xi32>) : tensor<2x4xi32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/sqrt.mlir b/tests/e2e/stablehlo_ops/sqrt.mlir
new file mode 100644
index 0000000..8377316
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/sqrt.mlir
@@ -0,0 +1,13 @@
+func.func @tensor() {
+ %input = util.unfoldable_constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
+ %result = "stablehlo.sqrt"(%input) : (tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[1.0, 1.4142, 1.7321, 2.0]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
+
+func.func @scalar() {
+ %input = util.unfoldable_constant dense<16.0> : tensor<f32>
+ %result = "stablehlo.sqrt"(%input) : (tensor<f32>) -> tensor<f32>
+ check.expect_almost_eq_const(%result, dense<4.0> : tensor<f32>) : tensor<f32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/subtract.mlir b/tests/e2e/stablehlo_ops/subtract.mlir
new file mode 100644
index 0000000..b4cb249
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/subtract.mlir
@@ -0,0 +1,15 @@
+func.func @i32() {
+ %0 = util.unfoldable_constant dense<[5, 6, 3, 4]> : tensor<4xi32>
+ %1 = util.unfoldable_constant dense<[1, 4, 7, 6]> : tensor<4xi32>
+ %result = "stablehlo.subtract"(%0, %1) : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi32>
+ check.expect_eq_const(%result, dense<[4, 2, -4, -2]> : tensor<4xi32>) : tensor<4xi32>
+ return
+}
+
+func.func @f32() {
+ %0 = util.unfoldable_constant dense<[5.0, 6.0, 3.0, 4.0]> : tensor<4xf32>
+ %1 = util.unfoldable_constant dense<[1.0, 4.0, 7.0, 6.0]> : tensor<4xf32>
+ %result = "stablehlo.subtract"(%0, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
+ check.expect_almost_eq_const(%result, dense<[4.0, 2.0, -4.0, -2.0]> : tensor<4xf32>) : tensor<4xf32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/tanh.mlir b/tests/e2e/stablehlo_ops/tanh.mlir
new file mode 100644
index 0000000..cb77e84
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/tanh.mlir
@@ -0,0 +1,10 @@
+func.func @tanh() {
+ %input = util.unfoldable_constant dense<
+ [[-100.0, -5.0, -0.5, 1.0],
+ [ 1.2, 2.0, 3.0, 100.0]]> : tensor<2x4xf32>
+ %result = "stablehlo.tanh"(%input) : (tensor<2x4xf32>) -> tensor<2x4xf32>
+ check.expect_almost_eq_const(%result, dense<
+ [[-1.0000, -0.9999, -0.4622, 0.7616],
+ [ 0.8337, 0.9640, 0.9951, 1.0000]]> : tensor<2x4xf32>) : tensor<2x4xf32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/torch_index_select.mlir b/tests/e2e/stablehlo_ops/torch_index_select.mlir
new file mode 100644
index 0000000..499a7d2
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/torch_index_select.mlir
@@ -0,0 +1,45 @@
+func.func @torch_select_index_0() {
+ %input = util.unfoldable_constant dense<[
+ [[01, 02, 03, 04, 05]],
+ [[06, 07, 08, 09, 10]],
+ [[11, 12, 13, 14, 15]],
+ [[16, 17, 18, 19, 20]],
+ [[21, 22, 23, 24, 25]]]> : tensor<5x1x5xi32>
+ %indices = util.unfoldable_constant dense<[0, 2]> : tensor<2xi32>
+ %res = "stablehlo.torch_index_select"(%input, %indices) {
+ dim = 0 : i64,
+ batch_dims = 0 : i64
+ } : (tensor<5x1x5xi32>, tensor<2xi32>) -> tensor<2x1x5xi32>
+ check.expect_eq_const(%res, dense<[[[01, 02, 03, 04, 05]], [[11, 12, 13, 14, 15]]]> : tensor<2x1x5xi32>) : tensor<2x1x5xi32>
+ return
+}
+
+func.func @torch_select_index_1() {
+ %input = util.unfoldable_constant dense<[
+ [[ 1, 2],[ 3, 4]],
+ [[ 5, 6],[ 7, 8]],
+ [[ 9, 10],[11, 12]]]> : tensor<3x2x2xi32>
+ %indices = util.unfoldable_constant dense<[0, 1]> : tensor<2xi32>
+ %res = "stablehlo.torch_index_select"(%input, %indices) {
+ dim = 1 : i64,
+ batch_dims = 0 : i64
+ } : (tensor<3x2x2xi32>, tensor<2xi32>) -> tensor<3x2x2xi32>
+ check.expect_eq_const(%res, dense<[[[1, 2], [3, 4]], [[5, 6], [7, 8]],[[9, 10], [11, 12]]]> : tensor<3x2x2xi32>) : tensor<3x2x2xi32>
+ return
+}
+
+func.func @torch_select_index_2() {
+ %input = util.unfoldable_constant dense<[
+ [[01, 02, 03, 04, 05]],
+ [[06, 07, 08, 09, 10]],
+ [[11, 12, 13, 14, 15]],
+ [[16, 17, 18, 19, 20]],
+ [[21, 22, 23, 24, 25]]]> : tensor<5x1x5xi32>
+ %indices = util.unfoldable_constant dense<0> : tensor<i32>
+ %res = "stablehlo.torch_index_select"(%input, %indices) {
+ dim = 0 : i64,
+ batch_dims = 0 : i64
+ } : (tensor<5x1x5xi32>, tensor<i32>) -> tensor<1x5xi32>
+ check.expect_eq_const(%res, dense<[[01, 02, 03, 04, 05]]> : tensor<1x5xi32>) : tensor<1x5xi32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/transpose.mlir b/tests/e2e/stablehlo_ops/transpose.mlir
new file mode 100644
index 0000000..d709f2e
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/transpose.mlir
@@ -0,0 +1,29 @@
+func.func @transpose_2d() {
+ %input = util.unfoldable_constant dense<[[1, 2, 3],
+ [4, 5, 6]]> : tensor<2x3xi32>
+ %0 = "stablehlo.transpose"(%input) {
+ permutation = dense<[1, 0]> : tensor<2xi64>
+ } : (tensor<2x3xi32>) -> tensor<3x2xi32>
+ check.expect_eq_const(%0, dense<[[1, 4],
+ [2, 5],
+ [3, 6]]> : tensor<3x2xi32>) : tensor<3x2xi32>
+ return
+}
+
+func.func @transpose_3d() {
+ %input = util.unfoldable_constant dense<[[[ 1, 2, 3],
+ [ 4, 5, 6]],
+ [[ 7, 8, 9],
+ [10, 11, 12]]]> : tensor<2x2x3xi32>
+ %0 = "stablehlo.transpose"(%input) {
+ permutation = dense<[0, 2, 1]> : tensor<3xi64>
+ } : (tensor<2x2x3xi32>) -> tensor<2x3x2xi32>
+ check.expect_eq_const(%0, dense<[
+ [[ 1, 4],
+ [ 2, 5],
+ [ 3, 6]],
+ [[ 7, 10],
+ [ 8, 11],
+ [ 9, 12]]]> : tensor<2x3x2xi32>) : tensor<2x3x2xi32>
+ return
+}
diff --git a/tests/e2e/stablehlo_ops/while.mlir b/tests/e2e/stablehlo_ops/while.mlir
new file mode 100644
index 0000000..691aaf0
--- /dev/null
+++ b/tests/e2e/stablehlo_ops/while.mlir
@@ -0,0 +1,17 @@
+// NOTE: this has already been legalized to CFG form in the TF import tools.
+func.func @while() {
+ %start = util.unfoldable_constant dense<1> : tensor<i32>
+ %bound = util.unfoldable_constant dense<3> : tensor<i32>
+ %cst_1 = arith.constant dense<4> : tensor<i32>
+ cf.br ^bb1(%start : tensor<i32>)
+^bb1(%2: tensor<i32>):
+ %3 = "stablehlo.compare"(%2, %bound) {comparison_direction = #stablehlo<comparison_direction LT>} : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %4 = tensor.extract %3[] : tensor<i1>
+ cf.cond_br %4, ^bb2(%2 : tensor<i32>), ^bb3(%2 : tensor<i32>)
+^bb2(%5: tensor<i32>):
+ %6 = stablehlo.add %5, %5 : tensor<i32>
+ cf.br ^bb1(%6 : tensor<i32>)
+^bb3(%7: tensor<i32>):
+ check.expect_eq_const(%7, dense<4> : tensor<i32>) : tensor<i32>
+ return
+}