Fix reshape and rem tests and add llvm-ir backend.
Without these splits llvm-ir backend will fail because of jitting the same module multiple times.
Taking with @Ben this is expected but will be fixed in a follow up PR.
PiperOrigin-RevId: 303805540
diff --git a/iree/test/e2e/xla/rem.mlir b/iree/test/e2e/xla/rem.mlir
index 06ff112..913f469 100644
--- a/iree/test/e2e/xla/rem.mlir
+++ b/iree/test/e2e/xla/rem.mlir
@@ -1,4 +1,5 @@
// RUN: iree-run-mlir -iree-hal-target-backends=vmla %s | IreeFileCheck %s
+// RUN: [[ $IREE_LLVMJIT_DISABLE == 1 ]] || (iree-run-mlir -iree-hal-target-backends=llvm-ir %s | IreeFileCheck %s)
// RUN: [[ $IREE_VULKAN_DISABLE == 1 ]] || (iree-run-mlir -iree-hal-target-backends=vulkan-spirv %s | IreeFileCheck %s)
// RUN: [[ $IREE_VULKAN_DISABLE == 1 ]] || (iree-run-mlir -iree-hal-target-backends=vulkan-spirv -iree-use-linalg-to-spirv-path %s | IreeFileCheck %s)
@@ -11,6 +12,8 @@
}
// CHECK: f32=2
+// -----
+
// CHECK-LABEL: EXEC @tensor
func @tensor() -> tensor<3xf32> {
%input1 = iree.unfoldable_constant dense<[16.0, 17.0, 18.0]> : tensor<3xf32>
@@ -20,6 +23,8 @@
}
// CHECK: f32=2 1 0
+// -----
+
// CHECK-LABEL: EXEC @negative_den
func @negative_den() -> tensor<f32> {
%input1 = iree.unfoldable_constant dense<16.0> : tensor<f32>
@@ -29,6 +34,8 @@
}
// CHECK: f32=2
+// -----
+
// CHECK-LABEL: EXEC @negative_num
func @negative_num() -> tensor<f32> {
%input1 = iree.unfoldable_constant dense<-16.0> : tensor<f32>
@@ -38,6 +45,8 @@
}
// CHECK: f32=-2
+// -----
+
// CHECK-LABEL: EXEC @scalar_int
func @scalar_int() -> tensor<i32> {
%input1 = iree.unfoldable_constant dense<16> : tensor<i32>
@@ -47,6 +56,8 @@
}
// CHECK: i32=2
+// -----
+
// CHECK-LABEL: EXEC @tensor_int
func @tensor_int() -> tensor<3xi32> {
%input1 = iree.unfoldable_constant dense<[16, 17, 18]> : tensor<3xi32>
@@ -56,6 +67,8 @@
}
// CHECK: i32=2 1 0
+// -----
+
// CHECK-LABEL: EXEC @negative_den_int
func @negative_den_int() -> tensor<i32> {
%input1 = iree.unfoldable_constant dense<16> : tensor<i32>
@@ -65,6 +78,8 @@
}
// CHECK: i32=2
+// -----
+
// CHECK-LABEL: EXEC @negative_num_int
func @negative_num_int() -> tensor<i32> {
%input1 = iree.unfoldable_constant dense<-16> : tensor<i32>
diff --git a/iree/test/e2e/xla/reshape.mlir b/iree/test/e2e/xla/reshape.mlir
index d34ee63..e88a315 100644
--- a/iree/test/e2e/xla/reshape.mlir
+++ b/iree/test/e2e/xla/reshape.mlir
@@ -1,4 +1,5 @@
// RUN: iree-run-mlir -iree-hal-target-backends=vmla %s -input-value="12xf32=[1 2 3 4 5 6 7 8 9 10 11 12]" | IreeFileCheck %s
+// RUN: [[ $IREE_LLVMJIT_DISABLE == 1 ]] || (iree-run-mlir -iree-hal-target-backends=llvm-ir %s -input-value="12xf32=[1 2 3 4 5 6 7 8 9 10 11 12]" | IreeFileCheck %s)
// RUN: [[ $IREE_VULKAN_DISABLE == 1 ]] || (iree-run-mlir -iree-hal-target-backends=vulkan-spirv %s -input-value="12xf32=[1 2 3 4 5 6 7 8 9 10 11 12]" | IreeFileCheck %s)
// CHECK-LABEL: EXEC @reshape_1D_2D
@@ -8,6 +9,8 @@
}
// CHECK: 3x4xf32=[1 2 3 4][5 6 7 8][9 10 11 12]
+// -----
+
// CHECK-LABEL: EXEC @reshape_1D_3D
func @reshape_1D_3D(%arg : tensor<12xf32>) -> tensor<2x2x3xf32> {
%result = "xla_hlo.reshape"(%arg) : (tensor<12xf32>) -> tensor<2x2x3xf32>