blob: 26e60f531855444ee6a80ab9948d28c4139f6566 [file] [log] [blame]
# Copyright 2022 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# Workflow for execution-benchmark-related jobs. It is designed to be called
# from the main workflow ci.yml. The concurrency of this workflow is controlled
# by the caller's job.
name: Benchmark execution
on:
workflow_call:
inputs:
runner-group:
required: true
type: string
runner-env:
required: true
type: string
e2e-test-artifacts-dir:
required: true
type: string
e2e-test-artifacts-gcs-artifact-dir:
required: true
type: string
benchmark-tools-gcs-artifact-dir:
required: true
type: string
outputs:
benchmark-results-dir:
description: |
Local path that stores all benchmark results.
Empty if no benchmark runs.
value: ${{ jobs.run_benchmarks.outputs.benchmark-results-dir }}
benchmark-results-gcs-artifact-dir:
description: |
GCS path that stores all benchmark results.
Empty if no benchmark runs.
value: ${{ jobs.run_benchmarks.outputs.benchmark-results-gcs-artifact-dir }}
env:
# This duplicates the variable from ci.yml. The variable needs to be in env
# instead of the outputs of setup because it contains the run attempt and we
# want that to be the current attempt, not whatever attempt the setup step
# last ran in. It therefore can't be passed in via inputs because the env
# context isn't available there.
GCS_DIR: gs://iree-github-actions-${{ github.event_name == 'pull_request' && 'presubmit' || 'postsubmit' }}-artifacts/${{ github.run_id }}/${{ github.run_attempt }}
BENCHMARK_CONFIG: ${{ inputs.e2e-test-artifacts-dir }}/execution-benchmark-config.json
BENCHMARK_CONFIG_GCS_ARTIFACT: ${{ inputs.e2e-test-artifacts-gcs-artifact-dir }}/execution-benchmark-config.json
jobs:
generate_matrix:
runs-on:
- self-hosted # must come first
- runner-group=${{ inputs.runner-group }}
- environment=${{ inputs.runner-env }}
- cpu
- os-family=Linux
outputs:
benchmark-matrix: ${{ steps.generate.outputs.benchmark-matrix }}
steps:
- name: "Checking out repository"
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
- name: "Checking out runtime submodules"
run: ./build_tools/scripts/git/update_runtime_submodules.sh
- name: "Generating benchmark matrix"
id: generate
run: |
gcloud storage cp "${BENCHMARK_CONFIG_GCS_ARTIFACT}" "${BENCHMARK_CONFIG}"
# This jq command takes a benchmark config with the following structure:
#
# { "target_device_name" : {
# "host_environment" : { ... },
# "shards" : [ { ... }, { ... } ]
# },
# "other_target_device_name" : { ... }
#
# and turns it into a flat list of benchmark jobs:
#
# [
# { "device_name" : "target_device_name",
# "host_environment" : { ... },
# "shard" : { index: 0, count: 2 }
# },
# { "device_name" : "target_device_name",
# "host_environment" : { ... },
# "shard" : { index: 1, count: 2 }
# },
# { "device_name" : "other_target_device_name",
# "host_environment" : { ... },
# "shard" : { index: 0, count: N }
# },
# ...
# ]
echo benchmark-matrix="$(jq -c '[ . | to_entries[]
| .key as $device_name
| .value.host_environment as $host_environment
| (.value.shards | length) as $count
| .value.shards[]
| {$device_name, $host_environment, shard: {index, $count}}
]' "${BENCHMARK_CONFIG}")" >> "${GITHUB_OUTPUT}"
run_benchmarks:
needs: [generate_matrix]
if: needs.generate_matrix.outputs.benchmark-matrix != '[]'
strategy:
# Matrix is dynamically generated by the job generate_matrix. So we only
# runs the benchmarks specified in ${BENCHMARK_CONFIG}.
# All tasks in matrix are seen as a single job in Github CI and the job
# can output a single set of values.
matrix:
benchmark: ${{ fromJSON(needs.generate_matrix.outputs.benchmark-matrix) }}
runs-on:
- self-hosted # must come first
- runner-group=${{ inputs.runner-group }}
- environment=${{ inputs.runner-env }}
- machine-type=${{ matrix.benchmark.device_name }}
env:
DEVICE_NAME: ${{ matrix.benchmark.device_name }}
SHARD_INDEX: ${{ matrix.benchmark.shard.index }}
SHARD_COUNT: ${{ matrix.benchmark.shard.count }}
PLATFORM_ARCH: ${{ matrix.benchmark.host_environment.platform }}-${{ matrix.benchmark.host_environment.architecture }}
E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR: ${{ inputs.e2e-test-artifacts-gcs-artifact-dir }}
E2E_TEST_ARTIFACTS_DIR: ${{ inputs.e2e-test-artifacts-dir }}
BENCHMARK_RESULTS_DIR: benchmark-results
outputs:
benchmark-results-dir: ${{ env.BENCHMARK_RESULTS_DIR }}
# Ideally this should be defined in env, so it can be used in the upload
# step. But Github CI doesn't allow us to access env.GCS in env.
benchmark-results-gcs-artifact-dir: ${{ env.GCS_DIR }}/${{ env.BENCHMARK_RESULTS_DIR }}
steps:
- name: "Checking out repository"
uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
- name: "Checking out runtime submodules"
run: ./build_tools/scripts/git/update_runtime_submodules.sh
- name: "Downloading benchmark tools"
id: download-tools
env:
# See `build_benchmark_tools` step in ci.yml for the name format of
# benchmark tools artifacts.
BENCHMARK_TOOLS_ARCHIVE: ${{ env.PLATFORM_ARCH }}-benchmark-tools.tar
BENCHMARK_TOOLS_GCS_ARTIFACT: ${{ inputs.benchmark-tools-gcs-artifact-dir }}/${{ env.PLATFORM_ARCH }}-benchmark-tools.tar
run: |
gcloud storage cp "${BENCHMARK_TOOLS_GCS_ARTIFACT}" "${BENCHMARK_TOOLS_ARCHIVE}"
echo "benchmark-tools-archive=${BENCHMARK_TOOLS_ARCHIVE}" >> "${GITHUB_OUTPUT}"
- name: "Downloading benchmark assets"
id: download-assets
run: |
gcloud storage cp "${BENCHMARK_CONFIG_GCS_ARTIFACT}" "${BENCHMARK_CONFIG}"
mkdir -p "${E2E_TEST_ARTIFACTS_DIR}"
jq -r \
--arg DEVICE_NAME "${DEVICE_NAME}" \
--arg SHARD_INDEX "${SHARD_INDEX}" \
--arg GCS_ARTIFACT_DIR "${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}" \
'.[$DEVICE_NAME].shards[($SHARD_INDEX | tonumber)] | .module_dir_paths[] | "\($GCS_ARTIFACT_DIR)/\(.)"' \
"${BENCHMARK_CONFIG}" | \
gcloud storage cp -r --read-paths-from-stdin "${E2E_TEST_ARTIFACTS_DIR}"
echo "benchmark-config=${BENCHMARK_CONFIG}" >> "${GITHUB_OUTPUT}"
- name: "Unpacking benchmark tools"
id: unpack-tools
env:
BENCHMARK_TOOLS_ARCHIVE: ${{ steps.download-tools.outputs.benchmark-tools-archive }}
# See `build_benchmark_tools` step in ci.yml for the name format of
# benchmark tools directory.
BENCHMARK_TOOLS_DIR: ${{ env.PLATFORM_ARCH }}-benchmark-tools-dir
run: |
tar -xf ${BENCHMARK_TOOLS_ARCHIVE}
echo "normal-benchmark-tools-dir=${BENCHMARK_TOOLS_DIR}/build/tools" >> "${GITHUB_OUTPUT}"
echo "traced-benchmark-tools-dir=${BENCHMARK_TOOLS_DIR}/build-traced/tools" >> "${GITHUB_OUTPUT}"
echo "tracy-capture-tool=${BENCHMARK_TOOLS_DIR}/build-traced/tracy-capture" >> "${GITHUB_OUTPUT}"
- name: "Determine Shard Suffix"
id: sharding
run: |
if (( SHARD_COUNT > 1 )); then
echo "suffix=$(printf -- "-%02d-of-%02d" "${SHARD_INDEX}" "${SHARD_COUNT}")" >> "${GITHUB_OUTPUT}"
else
echo "suffix=" >> "${GITHUB_OUTPUT}"
fi
- name: "Running benchmarks"
env:
IREE_EXECUTION_BENCHMARK_CONFIG: ${{ steps.download-assets.outputs.benchmark-config }}
IREE_DOCKER_WRAPPER: ./build_tools/github_actions/docker_run.sh
IREE_NORMAL_BENCHMARK_TOOLS_DIR: ${{ steps.unpack-tools.outputs.normal-benchmark-tools-dir }}
IREE_TRACED_BENCHMARK_TOOLS_DIR: ${{ steps.unpack-tools.outputs.traced-benchmark-tools-dir }}
IREE_TRACY_CAPTURE_TOOL: ${{ steps.unpack-tools.outputs.tracy-capture-tool }}
IREE_TARGET_DEVICE_NAME: ${{ env.DEVICE_NAME }}
IREE_SHARD_INDEX: ${{ matrix.benchmark.shard.index }}
IREE_E2E_TEST_ARTIFACTS_DIR: ${{ env.E2E_TEST_ARTIFACTS_DIR }}
IREE_BENCHMARK_RESULTS: ${{ env.BENCHMARK_RESULTS_DIR }}/benchmark-results-${{ matrix.benchmark.device_name }}${{ steps.sharding.outputs.suffix }}.json
IREE_BENCHMARK_TRACES: ${{ env.BENCHMARK_RESULTS_DIR }}/benchmark-traces-${{ matrix.benchmark.device_name }}${{ steps.sharding.outputs.suffix }}.tar.gz
run: |
mkdir -p ${BENCHMARK_RESULTS_DIR}
./build_tools/benchmarks/run_benchmarks.sh
- name: "Uploading benchmark results"
run: gcloud storage cp -r "${BENCHMARK_RESULTS_DIR}" "${GCS_DIR}/"