Only build required benchmark targets in CI (#13665)
Make `build_e2e_test_artifacts.yml` only build the required benchmark
targets depending on the benchmark presets. This avoids building large
benchmark suites in presubmit and postsubmit, which takes lots of time
and GCS space.
diff --git a/.github/workflows/benchmark_long.yml b/.github/workflows/benchmark_long.yml
index 5c39384..54e8717 100644
--- a/.github/workflows/benchmark_long.yml
+++ b/.github/workflows/benchmark_long.yml
@@ -55,7 +55,8 @@
build-dir: ${{ needs.build_all.outputs.build-dir }}
build-dir-archive: ${{ needs.build_all.outputs.build-dir-archive }}
build-dir-gcs-artifact: ${{ needs.build_all.outputs.build-dir-gcs-artifact }}
- benchmark-presets: cuda-long
+ benchmark-presets: cuda-long,comp-stats-long
+ build-default-benchmark-suites: 0
compilation_benchmarks:
needs: [setup, build_e2e_test_artifacts]
diff --git a/.github/workflows/build_e2e_test_artifacts.yml b/.github/workflows/build_e2e_test_artifacts.yml
index b6da8cd..70a1dd5 100644
--- a/.github/workflows/build_e2e_test_artifacts.yml
+++ b/.github/workflows/build_e2e_test_artifacts.yml
@@ -29,8 +29,16 @@
required: true
type: string
benchmark-presets:
+ description: |
+ A comma-separated string of benchmark presets to build.
required: true
type: string
+ build-default-benchmark-suites:
+ description: |
+ Build default benchmark suites for sanity check and e2e model testing
+ if set to 1.
+ default: 1
+ type: number
outputs:
e2e-test-artifacts-dir:
description: |
@@ -86,35 +94,19 @@
- name: "Building e2e test artifacts"
id: build
env:
+ IREE_BENCHMARK_PRESETS: ${{ inputs.benchmark-presets }}
+ IREE_BUILD_DEFAULT_BENCHMARK_SUITES: ${{ inputs.build-default-benchmark-suites }}
BUILD_E2E_TEST_ARTIFACTS_DIR: build-e2e-test-artifacts
run: |
build_tools/github_actions/docker_run.sh \
--env "IREE_HOST_BIN_DIR=${HOST_BUILD_DIR}/install/bin" \
+ --env "IREE_BENCHMARK_PRESETS=${IREE_BENCHMARK_PRESETS}" \
+ --env "IREE_BUILD_DEFAULT_BENCHMARK_SUITES=${IREE_BUILD_DEFAULT_BENCHMARK_SUITES}" \
gcr.io/iree-oss/frontends@sha256:da48ff70cc3b956ba22465a3bef285711b66dfcd605ccba605db2278b96dbc5c \
build_tools/cmake/build_e2e_test_artifacts.sh \
"${BUILD_E2E_TEST_ARTIFACTS_DIR}"
echo "e2e-test-artifacts-dir=${BUILD_E2E_TEST_ARTIFACTS_DIR}/e2e_test_artifacts" >> "${GITHUB_OUTPUT}"
echo "e2e-test-artifacts-build-log=${BUILD_E2E_TEST_ARTIFACTS_DIR}/.ninja_log" >> "${GITHUB_OUTPUT}"
- - name: "Exporting benchmark configs"
- id: export
- if: inputs.benchmark-presets != ''
- env:
- COMPILATION_BENCHMARK_CONFIG: ${{ steps.build.outputs.e2e-test-artifacts-dir }}/compilation-benchmark-config.json
- EXECUTION_BENCHMARK_CONFIG: ${{ steps.build.outputs.e2e-test-artifacts-dir }}/execution-benchmark-config.json
- BENCHMARK_FLAG_DUMP: ${{ steps.build.outputs.e2e-test-artifacts-dir }}/benchmark-flag-dump.txt
- BENCHMARK_PRESETS: ${{ inputs.benchmark-presets }}
- run: |
- ./build_tools/benchmarks/export_benchmark_config.py \
- compilation \
- --output="${COMPILATION_BENCHMARK_CONFIG}"
- ./build_tools/benchmarks/export_benchmark_config.py \
- execution \
- --benchmark_presets="${BENCHMARK_PRESETS}" \
- --output="${EXECUTION_BENCHMARK_CONFIG}"
- ./build_tools/benchmarks/benchmark_helper.py dump-cmds \
- --execution_benchmark_config="${EXECUTION_BENCHMARK_CONFIG}" \
- --compilation_benchmark_config="${COMPILATION_BENCHMARK_CONFIG}" \
- > "${BENCHMARK_FLAG_DUMP}"
- name: "Uploading e2e test artifacts"
id: upload
env:
diff --git a/build_tools/benchmarks/export_benchmark_config.py b/build_tools/benchmarks/export_benchmark_config.py
index 0deedc7..9808ed7 100755
--- a/build_tools/benchmarks/export_benchmark_config.py
+++ b/build_tools/benchmarks/export_benchmark_config.py
@@ -68,11 +68,6 @@
(config.target_device_spec.architecture.type == common_definitions.
ArchitectureType.GPU and config.target_device_spec.host_environment.
platform == "android"),
- # Not a preset for execution benchmarks.
- "comp-stats":
- lambda _: False,
- "comp-stats-long":
- lambda _: False,
}
COMPILATION_BENCHMARK_PRESET_MATCHERS: Dict[str, PresetMatcher] = {
diff --git a/build_tools/cmake/build_e2e_test_artifacts.sh b/build_tools/cmake/build_e2e_test_artifacts.sh
index 8ae8bf1..c4d8348 100755
--- a/build_tools/cmake/build_e2e_test_artifacts.sh
+++ b/build_tools/cmake/build_e2e_test_artifacts.sh
@@ -13,7 +13,11 @@
# migrated.
#
# The required IREE_HOST_BIN_DIR environment variable indicates the location
-# of the precompiled IREE binaries.
+# of the precompiled IREE binaries. The IREE_BENCHMARK_PRESETS environment
+# variable can be set to build required artifacts for the comma-separated
+# benchmark presets. By default `iree-benchmark-suites` is built for sanity
+# check and e2e model testing. It can be disabled with the environment variable
+# `IREE_BUILD_DEFAULT_BENCHMARK_SUITES=0`.
#
# Designed for CI, but can be run locally. The desired build directory can be
# passed as the first argument. Otherwise, it uses the environment variable
@@ -26,10 +30,53 @@
BUILD_DIR="${1:-${IREE_BUILD_E2E_TEST_ARTIFACTS_DIR:-build-e2e-test-artifacts}}"
IREE_HOST_BIN_DIR="$(realpath ${IREE_HOST_BIN_DIR})"
+BENCHMARK_PRESETS="${IREE_BENCHMARK_PRESETS:-}"
+BUILD_DEFAULT_BENCHMARK_SUITES="${IREE_BUILD_DEFAULT_BENCHMARK_SUITES:-1}"
source build_tools/cmake/setup_build.sh
source build_tools/cmake/setup_tf_python.sh
+declare -a BUILD_TARGETS
+
+if (( "${BUILD_DEFAULT_BENCHMARK_SUITES}" == 1 )); then
+ BUILD_TARGETS+=("iree-benchmark-suites")
+fi
+
+# Separate the presets into the execution and compilation benchmark presets to
+# export different configs with export_benchmark_config.py.
+COMPILATION_PRESETS=""
+EXECUTION_PRESETS=""
+if [[ -n "${BENCHMARK_PRESETS}" ]]; then
+ IFS=, read -r -a PRESET_ARRAY <<< "${BENCHMARK_PRESETS}"
+ for PRESET in "${PRESET_ARRAY[@]}"; do
+ case "${PRESET}" in
+ comp-stats)
+ BUILD_TARGETS+=(iree-e2e-compile-stats-suites)
+ COMPILATION_PRESETS="${COMPILATION_PRESETS},${PRESET}"
+ ;;
+ comp-stats-long)
+ BUILD_TARGETS+=(iree-e2e-compile-stats-suites-long)
+ COMPILATION_PRESETS="${COMPILATION_PRESETS},${PRESET}"
+ ;;
+ *-long)
+ BUILD_TARGETS+=(iree-benchmark-suites-long)
+ EXECUTION_PRESETS="${EXECUTION_PRESETS},${PRESET}"
+ ;;
+ *)
+ # Build target of the default preset has been added above.
+ EXECUTION_PRESETS="${EXECUTION_PRESETS},${PRESET}"
+ ;;
+ esac
+ done
+ COMPILATION_PRESETS="${COMPILATION_PRESETS#,}"
+ EXECUTION_PRESETS="${EXECUTION_PRESETS#,}"
+fi
+
+if (( "${#BUILD_TARGETS[@]}" == 0 )); then
+ echo "No target to build."
+ exit 1
+fi
+
echo "Configuring to build e2e test artifacts"
"${CMAKE_BIN}" -B "${BUILD_DIR}" \
-G Ninja \
@@ -44,5 +91,22 @@
echo "Building e2e test artifacts"
"${CMAKE_BIN}" \
--build "${BUILD_DIR}" \
- --target iree-e2e-test-artifacts \
+ --target "${BUILD_TARGETS[@]}" \
-- -k 0
+
+E2E_TEST_ARTIFACTS_DIR="${BUILD_DIR}/e2e_test_artifacts"
+COMPILATION_CONFIG="${E2E_TEST_ARTIFACTS_DIR}/compilation-benchmark-config.json"
+EXECUTION_CONFIG="${E2E_TEST_ARTIFACTS_DIR}/execution-benchmark-config.json"
+FLAG_DUMP="${E2E_TEST_ARTIFACTS_DIR}/benchmark-flag-dump.txt"
+./build_tools/benchmarks/export_benchmark_config.py \
+ compilation \
+ --benchmark_presets="${COMPILATION_PRESETS}" \
+ --output="${COMPILATION_CONFIG}"
+./build_tools/benchmarks/export_benchmark_config.py \
+ execution \
+ --benchmark_presets="${EXECUTION_PRESETS}" \
+ --output="${EXECUTION_CONFIG}"
+./build_tools/benchmarks/benchmark_helper.py dump-cmds \
+ --execution_benchmark_config="${EXECUTION_CONFIG}" \
+ --compilation_benchmark_config="${COMPILATION_CONFIG}" \
+ > "${FLAG_DUMP}"
diff --git a/build_tools/github_actions/configure_ci.py b/build_tools/github_actions/configure_ci.py
index 3f5b6c2..3e68f92 100755
--- a/build_tools/github_actions/configure_ci.py
+++ b/build_tools/github_actions/configure_ci.py
@@ -302,6 +302,11 @@
preset_options.remove(DEFAULT_BENCHMARK_PRESET)
preset_options.update(DEFAULT_BENCHMARK_PRESET_GROUP)
+ if preset_options.intersection(DEFAULT_BENCHMARK_PRESET_GROUP):
+ # The is a sugar to run the compilation benchmarks when any default
+ # benchmark preset is present.
+ preset_options.add("comp-stats")
+
preset_options = sorted(preset_options)
for preset_option in preset_options:
if preset_option not in BENCHMARK_PRESET_OPTIONS:
diff --git a/build_tools/github_actions/configure_ci_test.py b/build_tools/github_actions/configure_ci_test.py
index 2f77166..5b33ba6 100644
--- a/build_tools/github_actions/configure_ci_test.py
+++ b/build_tools/github_actions/configure_ci_test.py
@@ -32,7 +32,7 @@
is_pr=True,
is_llvm_integrate_pr=False)
- self.assertEqual(presets_str, "cuda,x86_64")
+ self.assertEqual(presets_str, "comp-stats,cuda,x86_64")
def test_get_benchmark_presets_from_trailers_and_labels(self):
presets_str = configure_ci.get_benchmark_presets(
@@ -41,7 +41,8 @@
is_pr=True,
is_llvm_integrate_pr=False)
- self.assertEqual(presets_str, "android-cpu,android-gpu,cuda,x86_64")
+ self.assertEqual(presets_str,
+ "android-cpu,android-gpu,comp-stats,cuda,x86_64")
def test_get_benchmark_presets_from_default_group(self):
presets_str = configure_ci.get_benchmark_presets(