Quality of life improvements for experimental/regression_suite. (#16415)
* Capture output in test fixtures.
* If compilation or execution fails, this will dump stdout/stderr from
`iree-compile` or `iree-run-module` to the logs.
* The formatting isn't pretty, but the output may help with triage.
* Include full (short) summary reports at the end of pytest output.
* I find it very useful to see which tests ran - both passing and
failing. The default only shows failures.
* Docs:
https://docs.pytest.org/en/stable/how-to/output.html#producing-a-detailed-summary-report
Progress on https://github.com/openxla/iree/issues/16372 (tidying up
`regression_suite` while I evaluating extending it)
ci-exactly: build_packages, regression_test_cpu,
regression_test_amdgpu_rocm, regression_test_amdgpu_vulkan
diff --git a/.github/workflows/pkgci_regression_test_amdgpu_rocm.yml b/.github/workflows/pkgci_regression_test_amdgpu_rocm.yml
index 0554a16..6919547 100644
--- a/.github/workflows/pkgci_regression_test_amdgpu_rocm.yml
+++ b/.github/workflows/pkgci_regression_test_amdgpu_rocm.yml
@@ -48,5 +48,5 @@
source $VENV_DIR/bin/activate
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rocm/lib:/opt/rocm/hip/lib
pytest \
- -s -m "plat_rdna3_rocm and presubmit" \
+ -rA -s -m "plat_rdna3_rocm and presubmit" \
experimental/regression_suite
diff --git a/.github/workflows/pkgci_regression_test_amdgpu_vulkan.yml b/.github/workflows/pkgci_regression_test_amdgpu_vulkan.yml
index b994634..152697e 100644
--- a/.github/workflows/pkgci_regression_test_amdgpu_vulkan.yml
+++ b/.github/workflows/pkgci_regression_test_amdgpu_vulkan.yml
@@ -47,5 +47,5 @@
run: |
source $VENV_DIR/bin/activate
pytest \
- -s -m "plat_rdna3_vulkan and presubmit" \
+ -rA -s -m "plat_rdna3_vulkan and presubmit" \
experimental/regression_suite
diff --git a/.github/workflows/pkgci_regression_test_cpu.yml b/.github/workflows/pkgci_regression_test_cpu.yml
index 6e0140b..e07a895 100644
--- a/.github/workflows/pkgci_regression_test_cpu.yml
+++ b/.github/workflows/pkgci_regression_test_cpu.yml
@@ -52,5 +52,5 @@
run: |
source $VENV_DIR/bin/activate
pytest \
- -s -m "plat_host_cpu and presubmit" \
+ -rA -s -m "plat_host_cpu and presubmit" \
experimental/regression_suite
diff --git a/experimental/regression_suite/ireers/fixtures.py b/experimental/regression_suite/ireers/fixtures.py
index aa0c9b0..543f64e 100644
--- a/experimental/regression_suite/ireers/fixtures.py
+++ b/experimental/regression_suite/ireers/fixtures.py
@@ -43,8 +43,7 @@
print("**************************************************************")
print(f"Compiling {source} -> {vmfb_artifact} with flags:")
print(f" {sep.join(flags)}")
- start_time = time.time()
- subprocess.check_call(
+ exec_args = (
[
"iree-compile",
"-o",
@@ -52,8 +51,11 @@
str(source.path),
]
+ IREE_COMPILE_QOL_FLAGS
- + flags,
- cwd=str(source.group.directory),
+ + flags
+ )
+ start_time = time.time()
+ subprocess.run(
+ exec_args, check=True, capture_output=True, cwd=source.group.directory
)
run_time = time.time() - start_time
print(f"Compilation succeeded in {run_time}s")
@@ -73,7 +75,7 @@
exec_args.extend(args)
print("**************************************************************")
print("Exec:", " ".join(exec_args))
- subprocess.check_call(exec_args, cwd=vmfb.group.directory)
+ subprocess.run(exec_args, check=True, capture_output=True, cwd=vmfb.group.directory)
def iree_benchmark_module(