Populate dynamic shaped memref descriptor offset value (#4828)
diff --git a/iree/compiler/Conversion/LinalgToLLVM/ConvertToLLVM.cpp b/iree/compiler/Conversion/LinalgToLLVM/ConvertToLLVM.cpp
index e1030c4..5849a77 100644
--- a/iree/compiler/Conversion/LinalgToLLVM/ConvertToLLVM.cpp
+++ b/iree/compiler/Conversion/LinalgToLLVM/ConvertToLLVM.cpp
@@ -251,6 +251,7 @@
builder, loc, typeConverter->convertType(memRefType));
desc.setAllocatedPtr(builder, loc, typedPtrValue);
desc.setAlignedPtr(builder, loc, typedPtrValue);
+ desc.setConstantOffset(builder, loc, 0);
return desc;
}
}
diff --git a/iree/compiler/Conversion/LinalgToLLVM/test/hal_interface_bindings.mlir b/iree/compiler/Conversion/LinalgToLLVM/test/hal_interface_bindings.mlir
index b37e652..1d620e3 100644
--- a/iree/compiler/Conversion/LinalgToLLVM/test/hal_interface_bindings.mlir
+++ b/iree/compiler/Conversion/LinalgToLLVM/test/hal_interface_bindings.mlir
@@ -14,8 +14,10 @@
// CHECK: %[[DESC_A:.+]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: %[[DESC_B:.+]] = llvm.insertvalue %[[BUFFER_F32]], %[[DESC_A]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: %[[DESC_C:.+]] = llvm.insertvalue %[[BUFFER_F32]], %[[DESC_B]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+ // CHECK: %[[C0:.+]] = llvm.mlir.constant(0 : index) : i64
+ // CHECK: %[[DESC_D:.+]] = llvm.insertvalue %[[C0]], %[[DESC_C]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
%memref = hal.interface.binding.subspan @io::@ret0[%c72] : memref<?xf32>
- // CHECK: "test.sink"(%[[DESC_C]])
+ // CHECK: "test.sink"(%[[DESC_D]])
"test.sink"(%memref) : (memref<?xf32>) -> ()
return
}
@@ -32,24 +34,26 @@
// ...
// CHECK: %[[DYN_MEMREF_T1:.+]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[DYN_MEMREF_T2:.+]] = llvm.insertvalue %{{.+}}, %[[DYN_MEMREF_T1]][0]
- // CHECK: %[[DYN_MEMREF:.+]] = llvm.insertvalue %{{.+}}, %[[DYN_MEMREF_T2]][1]
+ // CHECK: %[[DYN_MEMREF_T3:.+]] = llvm.insertvalue %{{.+}}, %[[DYN_MEMREF_T2]][1]
+ // CHECK: %[[C0:.+]] = llvm.mlir.constant(0 : index) : i64
+ // CHECK: %[[DYN_MEMREF:.+]] = llvm.insertvalue %[[C0]], %[[DYN_MEMREF_T3]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
%memref = hal.interface.binding.subspan @io::@ret0[%c72] : memref<?x2xf32>
// ...
- // CHECK: %[[CDIM0_I32:.+]] = llvm.load %14 : !llvm.ptr<i32>
- // CHECK: %[[CDIM0:.+]] = llvm.zext %[[CDIM0_I32]] : i32 to i64
+ // CHECK: %[[CDIM0_I32:.+]] = llvm.load %16 : !llvm.ptr<i32>
+ // CHECK: %[[CDIM0:.+]] = llvm.zext %[[CDIM0_I32]] : i32 to i64
%dim = hal.interface.load.constant offset = 0 : index
%shape = shapex.make_ranked_shape %dim : (index) -> !shapex.ranked_shape<[?,2]>
- // CHECK: %[[MEMREF_T0:.+]] = llvm.insertvalue %[[CDIM0]], %[[DYN_MEMREF]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[C2:.+]] = llvm.mlir.constant(2 : index) : i64
- // CHECK: %[[MEMREF_T1:.+]] = llvm.insertvalue %[[C2]], %[[MEMREF_T0]][3, 1]
- // CHECK: %[[C1:.+]] = llvm.mlir.constant(1 : index) : i64
- // CHECK: %[[TIED_MEMREF:.+]] = llvm.insertvalue %[[C1]], %[[MEMREF_T1]][4, 1]
- // CHECK: %[[STRIDE1:.+]] = llvm.extractvalue %[[TIED_MEMREF]][4, 1]
- // CHECK: %[[DIM1:.+]] = llvm.extractvalue %[[TIED_MEMREF]][3, 1]
- // CHECK: %[[STRIDE0:.+]] = llvm.mul %[[STRIDE1]], %[[DIM1]] : i64
- // CHECK: %[[FINAL_MEMREF:.+]] = llvm.insertvalue %[[STRIDE0]], %[[TIED_MEMREF]][4, 0]
+ // CHECK: %[[MEMREF_T0:.+]] = llvm.insertvalue %[[CDIM0]], %[[DYN_MEMREF]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+ // CHECK: %[[C2:.+]] = llvm.mlir.constant(2 : index) : i64
+ // CHECK: %[[MEMREF_T1:.+]] = llvm.insertvalue %[[C2]], %[[MEMREF_T0]][3, 1]
+ // CHECK: %[[C1:.+]] = llvm.mlir.constant(1 : index) : i64
+ // CHECK: %[[TIED_MEMREF:.+]] = llvm.insertvalue %[[C1]], %[[MEMREF_T1]][4, 1]
+ // CHECK: %[[STRIDE1:.+]] = llvm.extractvalue %[[TIED_MEMREF]][4, 1]
+ // CHECK: %[[DIM1:.+]] = llvm.extractvalue %[[TIED_MEMREF]][3, 1]
+ // CHECK: %[[STRIDE0:.+]] = llvm.mul %[[STRIDE1]], %[[DIM1]] : i64
+ // CHECK: %[[FINAL_MEMREF:.+]] = llvm.insertvalue %[[STRIDE0]], %[[TIED_MEMREF]][4, 0]
%tied_memref = shapex.tie_shape %memref, %shape : memref<?x2xf32>, !shapex.ranked_shape<[?,2]>
- // CHECK-NEXT: "test.sink"(%[[FINAL_MEMREF]])
+ // CHECK: "test.sink"(%[[FINAL_MEMREF]])
"test.sink"(%tied_memref) : (memref<?x2xf32>) -> ()
return
}
diff --git a/iree/test/e2e/regression/dynamic_abs.mlir b/iree/test/e2e/regression/dynamic_abs.mlir
index 67b3f4c..b87ee1c 100644
--- a/iree/test/e2e/regression/dynamic_abs.mlir
+++ b/iree/test/e2e/regression/dynamic_abs.mlir
@@ -1,5 +1,6 @@
// RUN: iree-run-mlir -export-all -iree-hal-target-backends=vmla %s | IreeFileCheck %s
// RUN: [[ $IREE_LLVMAOT_DISABLE == 1 ]] || (iree-run-mlir -export-all -iree-hal-target-backends=dylib-llvm-aot %s | IreeFileCheck %s)
+// RUN: [[ $IREE_LLVMAOT_DISABLE == 1 ]] || (iree-run-mlir -export-all -iree-hal-target-backends=dylib-llvm-aot -iree-flow-dispatch-linalg-on-tensors -iree-codegen-llvm-experimental-linalg-on-tensors %s | IreeFileCheck %s)
// CHECK-LABEL: EXEC @dynamic_tensor
func @dynamic_tensor() -> tensor<?x?xf32> attributes { iree.module.export } {
diff --git a/iree/test/e2e/regression/dynamic_add.mlir b/iree/test/e2e/regression/dynamic_add.mlir
index 4ba6007..4633a53 100644
--- a/iree/test/e2e/regression/dynamic_add.mlir
+++ b/iree/test/e2e/regression/dynamic_add.mlir
@@ -1,7 +1,7 @@
// RUN: iree-run-mlir -export-all -iree-hal-target-backends=vmla -function-input="2x4xf32=[[1.0, 2.0, 3.0, 4.0], [-1.0, -2.0, -3.0, -4.0]]" -function-input="2x4xf32=[[5.0, 6.0, 7.0, 8.0], [-5.0, -6.0, -7.0, -8.0]]" %s | IreeFileCheck %s
// RUN: [[ $IREE_VULKAN_DISABLE == 1 ]] || (iree-run-mlir -export-all -iree-hal-target-backends=vulkan-spirv -function-input="2x4xf32=[[1.0, 2.0, 3.0, 4.0], [-1.0, -2.0, -3.0, -4.0]]" -function-input="2x4xf32=[[5.0, 6.0, 7.0, 8.0], [-5.0, -6.0, -7.0, -8.0]]" %s | IreeFileCheck %s)
// RUN: [[ $IREE_LLVMAOT_DISABLE == 1 ]] || (iree-run-mlir -export-all -iree-hal-target-backends=dylib-llvm-aot -function-input="2x4xf32=[[1.0, 2.0, 3.0, 4.0], [-1.0, -2.0, -3.0, -4.0]]" -function-input="2x4xf32=[[5.0, 6.0, 7.0, 8.0], [-5.0, -6.0, -7.0, -8.0]]" %s | IreeFileCheck %s)
-
+// RUN: [[ $IREE_LLVMAOT_DISABLE == 1 ]] || (iree-run-mlir -export-all -iree-hal-target-backends=dylib-llvm-aot -iree-flow-dispatch-linalg-on-tensors -iree-codegen-llvm-experimental-linalg-on-tensors -function-input="2x4xf32=[[1.0, 2.0, 3.0, 4.0], [-1.0, -2.0, -3.0, -4.0]]" -function-input="2x4xf32=[[5.0, 6.0, 7.0, 8.0], [-5.0, -6.0, -7.0, -8.0]]" %s | IreeFileCheck %s)
// CHECK: EXEC @main
// CHECK: 2x4xf32=[6 8 10 12][-6 -8 -10 -12]
diff --git a/iree/test/e2e/regression/dynamic_dot.mlir b/iree/test/e2e/regression/dynamic_dot.mlir
new file mode 100644
index 0000000..a7f3418
--- /dev/null
+++ b/iree/test/e2e/regression/dynamic_dot.mlir
@@ -0,0 +1,20 @@
+// RUN: iree-run-mlir -export-all -iree-hal-target-backends=vmla %s | IreeFileCheck %s
+// RUN: [[ $IREE_LLVMAOT_DISABLE == 1 ]] || (iree-run-mlir -export-all -iree-hal-target-backends=dylib-llvm-aot -iree-flow-dispatch-linalg-on-tensors -iree-codegen-llvm-experimental-linalg-on-tensors %s | IreeFileCheck %s)
+
+// CHECK-LABEL: EXEC @dynamic_dot
+func @dynamic_dot() -> tensor<?x?xf32> attributes { iree.module.export } {
+ %lhs = iree.dynamic_shape_constant dense<[
+ [15.0, 14.0, 13.0],
+ [12.0, 11.0, 10.0],
+ [09.0, 08.0, 07.0],
+ [06.0, 05.0, 04.0],
+ [03.0, 02.0, 01.0]]> : tensor<5x3xf32> -> tensor<?x?xf32>
+ %rhs = iree.dynamic_shape_constant dense<[
+ [15.0, 14.0, 13.0, 12.0, 11.0],
+ [10.0, 09.0, 08.0, 07.0, 06.0],
+ [05.0, 04.0, 03.0, 02.0, 01.0]]> : tensor<3x5xf32> -> tensor<?x?xf32>
+ %res = "mhlo.dot"(%lhs, %rhs) : (tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32>
+ return %res : tensor<?x?xf32>
+}
+
+// CHECK: 5x5xf32=[430 388 346 304 262][340 307 274 241 208][250 226 202 178 154][160 145 130 115 100][70 64 58 52 46]