Work around scalar support bug in webgpu compilation. (#14629)
See https://github.com/openxla/iree/issues/11054#issuecomment-1670260451
- this `multiple_results.mlir` test program uses scalars, and
https://github.com/openxla/iree/pull/13711 changed the code path that
scalar dispatches go down. This alternate code path does not appear to
have the same SPIR-V -> WGSL compatibility patches as the tensor path,
so we see compilation errors with messages like `entry point 'd0'
references multiple variables that use the same resource binding`. We
should fix that (and add tests so it doesn't regress again), but I want
to keep webgpu runtime debugging work unblocked.
This modified program exhibits the same runtime-related issue as
reported on https://github.com/openxla/iree/issues/13809 /
https://github.com/openxla/iree/pull/14163:
* The first invocation incorrectly returns
```
2xf32=2.23 2.45
2xf32=0 0
```
* The second invocation correctly returns
```
2xf32=1.23 1.45
2xf32=2.23 2.45
```
diff --git a/experimental/web/sample_webgpu/index.html b/experimental/web/sample_webgpu/index.html
index 9294f2d..5c6e14b 100644
--- a/experimental/web/sample_webgpu/index.html
+++ b/experimental/web/sample_webgpu/index.html
@@ -355,8 +355,8 @@
} else if (sampleName === "multiple_results") {
functionNameInput.value = "multiple_results";
functionArgumentsInput.value = [
- "f32=-1.23",
- "f32=-4.56",
+ "2xf32=-1.23,-1.45",
+ "2xf32=-2.23,-2.45",
].join("\n");
} else if (sampleName === "fullyconnected") {
functionNameInput.value = "main";
diff --git a/experimental/web/sample_webgpu/multiple_results.mlir b/experimental/web/sample_webgpu/multiple_results.mlir
index ab82f97..0cfb9fc 100644
--- a/experimental/web/sample_webgpu/multiple_results.mlir
+++ b/experimental/web/sample_webgpu/multiple_results.mlir
@@ -1,8 +1,8 @@
func.func @multiple_results(
- %input_0 : tensor<f32>,
- %input_1 : tensor<f32>
-) -> (tensor<f32>, tensor<f32>) {
- %result_0 = math.absf %input_0 : tensor<f32>
- %result_1 = math.absf %input_1 : tensor<f32>
- return %result_0, %result_1 : tensor<f32>, tensor<f32>
+ %input_0 : tensor<2xf32>,
+ %input_1 : tensor<2xf32>
+) -> (tensor<2xf32>, tensor<2xf32>) {
+ %result_0 = math.absf %input_0 : tensor<2xf32>
+ %result_1 = math.absf %input_1 : tensor<2xf32>
+ return %result_0, %result_1 : tensor<2xf32>, tensor<2xf32>
}