Create separate benchmark workflow for running large benchmarks (#12890)

Creates a separate workflow for running large benchmarks. Adds ResNet50
at various batch sizes to this workflow. ResNet50 is part of our
progression suite that we will be monitoring closely.

Also rearranges the model_groups to be specific to the target hardware.
diff --git a/.github/workflows/benchmark_compilation.yml b/.github/workflows/benchmark_compilation.yml
new file mode 100644
index 0000000..aaa2e06
--- /dev/null
+++ b/.github/workflows/benchmark_compilation.yml
@@ -0,0 +1,122 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# Workflow for running compilation benchmarks.
+# It is designed to be called from a parent workflow.
+# The concurrency of this workflow is controlled by the caller's job.
+
+name: Benchmark Compilation
+
+on:
+  workflow_call:
+    inputs:
+      runner-group:
+        required: true
+        type: string
+      runner-env:
+        required: true
+        type: string
+      e2e-test-artifacts-dir:
+        required: true
+        type: string
+      e2e-test-artifacts-gcs-artifact-dir:
+        required: true
+        type: string
+      e2e-test-artifacts-build-log:
+        required: true
+        type: string
+      e2e-test-artifacts-build-log-gcs-artifact:
+        required: true
+        type: string
+    outputs:
+      compile-stats-results:
+        description: |
+          Local path to the compilation benchmark results.
+        value: ${{ jobs.compilation_benchmarks.outputs.compile-stats-results }}
+      compile-stats-results-gcs-artifact:
+        description: |
+          GCS path to the uploaded compilation benchmark results.
+        value: ${{ jobs.compilation_benchmarks.outputs.compile-stats-results-gcs-artifact }}
+
+env:
+  # This duplicates the variable from ci.yml. The variable needs to be in env
+  # instead of the outputs of setup because it contains the run attempt and we
+  # want that to be the current attempt, not whatever attempt the setup step
+  # last ran in. It therefore can't be passed in via inputs because the env
+  # context isn't available there.
+  GCS_DIR: gs://iree-github-actions-${{ github.event_name == 'pull_request' && 'presubmit' || 'postsubmit' }}-artifacts/${{ github.run_id }}/${{ github.run_attempt }}
+
+jobs:
+  compilation_benchmarks:
+    runs-on:
+      - self-hosted # must come first
+      - runner-group=${{ inputs.runner-group }}
+      - environment=${{ inputs.runner-env }}
+      - cpu
+      - os-family=Linux
+    env:
+      E2E_TEST_ARTIFACTS_DIR: ${{ inputs.e2e-test-artifacts-dir }}
+      E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR: ${{ inputs.e2e-test-artifacts-gcs-artifact-dir }}
+      E2E_TEST_ARTIFACTS_BUILD_LOG: ${{ inputs.e2e-test-artifacts-build-log }}
+      E2E_TEST_ARTIFACTS_BUILD_LOG_GCS_ARTIFACT: ${{ inputs.e2e-test-artifacts-build-log-gcs-artifact }}
+    outputs:
+      compile-stats-results: ${{ steps.collect.outputs.compile-stats-results }}
+      compile-stats-results-gcs-artifact: ${{ steps.upload.outputs.compile-stats-results-gcs-artifact }}
+    steps:
+      - name: "Checking out repository"
+        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
+      - name: "Exporting configs"
+        id: "export"
+        env:
+          COMPILATION_CONFIG: "compilation-config.json"
+        run: |
+          ./build_tools/benchmarks/export_benchmark_config.py \
+            compilation \
+            --output="${COMPILATION_CONFIG}"
+          echo "compilation-config=${COMPILATION_CONFIG}" >> "${GITHUB_OUTPUT}"
+      - name: "Downloading assets"
+        id: "download-assets"
+        env:
+          COMPILATION_CONFIG: ${{ steps.export.outputs.compilation-config }}
+        run: |
+          gcloud storage cp \
+            "${E2E_TEST_ARTIFACTS_BUILD_LOG_GCS_ARTIFACT}" \
+            "${E2E_TEST_ARTIFACTS_BUILD_LOG}"
+          mkdir -p "${E2E_TEST_ARTIFACTS_DIR}"
+          jq -r \
+            --arg GCS_ARTIFACT_DIR "${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}" \
+            '.module_dir_paths | map("\($GCS_ARTIFACT_DIR)/\(.)") | join("\n")' \
+            "${COMPILATION_CONFIG}" | \
+            gcloud storage cp -r --read-paths-from-stdin \
+              "${E2E_TEST_ARTIFACTS_DIR}"
+      - name: "Collecting compilation statistics"
+        id: collect
+        env:
+          COMPILATION_CONFIG: ${{ steps.export.outputs.compilation-config }}
+          GENERATION_CONFIG: generation-config.json
+          COMPILE_STATS_RESULTS: benchmark-results/compile-stats-results.json
+        run: |
+          mkdir -p benchmark-results
+          ./build_tools/benchmarks/collect_compilation_statistics.py alpha \
+            --e2e_test_artifacts_dir="${E2E_TEST_ARTIFACTS_DIR}" \
+            --build_log="${E2E_TEST_ARTIFACTS_BUILD_LOG}" \
+            --compilation_benchmark_config="${COMPILATION_CONFIG}" \
+            --output="${COMPILE_STATS_RESULTS}"
+          echo "compile-stats-results=${COMPILE_STATS_RESULTS}" >> "${GITHUB_OUTPUT}"
+      - name: "Uploading benchmark results"
+        id: upload
+        env:
+          COMPILATION_CONFIG: ${{ steps.export.outputs.compilation-config }}
+          COMPILE_STATS_RESULTS: ${{ steps.collect.outputs.compile-stats-results }}
+          COMPILE_STATS_RESULTS_GCS_ARTIFACT: ${{ env.GCS_DIR }}/${{ steps.collect.outputs.compile-stats-results }}
+        run: |
+          # Upload files with two commands since they go into different GCS
+          # directories.
+          gcloud storage cp "${COMPILATION_CONFIG}" "${GCS_DIR}"
+          gcloud storage cp \
+            "${COMPILE_STATS_RESULTS}" \
+            "${COMPILE_STATS_RESULTS_GCS_ARTIFACT}"
+          echo "compile-stats-results-gcs-artifact=${COMPILE_STATS_RESULTS_GCS_ARTIFACT}" >> "${GITHUB_OUTPUT}"
diff --git a/.github/workflows/benchmark_long.yml b/.github/workflows/benchmark_long.yml
new file mode 100644
index 0000000..704e8d6
--- /dev/null
+++ b/.github/workflows/benchmark_long.yml
@@ -0,0 +1,129 @@
+# Copyright 2023 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# Executes long-running benchmarks that are expected to take multple hours.
+
+name: Benchmark Long
+
+on:
+  schedule:
+    # Scheduled to run at 09:00 UTC and 21:00 UTC.
+    - cron: '0 09,21 * * *'
+  workflow_dispatch:
+
+env:
+  # This needs to be in env instead of the outputs of setup because it contains
+  # the run attempt and we want that to be the current attempt, not whatever
+  # attempt the setup step last ran in.
+  GCS_DIR: gs://iree-github-actions-${{ github.event_name == 'pull_request' && 'presubmit' || 'postsubmit' }}-artifacts/${{ github.run_id }}/${{ github.run_attempt }}
+
+# Jobs are organized into groups and topologically sorted by dependencies
+jobs:
+  setup:
+    uses: ./.github/workflows/setup.yml
+
+  build_all:
+    needs: setup
+    if: fromJson(needs.setup.outputs.should-run)
+    uses: ./.github/workflows/build_all.yml
+    with:
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      write-caches: ${{ needs.setup.outputs.write-caches }}
+
+  build_tf_integrations:
+    needs: setup
+    if: fromJson(needs.setup.outputs.should-run)
+    uses: ./.github/workflows/build_tf_integrations.yml
+    with:
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      write-caches: ${{ needs.setup.outputs.write-caches }}
+
+  build_benchmark_tools:
+    needs: [setup, build_all]
+    if: fromJson(needs.setup.outputs.should-run)
+    uses: ./.github/workflows/build_benchmark_tools.yml
+    with:
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      build-dir: ${{ needs.build_all.outputs.build-dir }}
+      build-dir-archive: ${{ needs.build_all.outputs.build-dir-archive }}
+      build-dir-gcs-artifact: ${{ needs.build_all.outputs.build-dir-gcs-artifact }}
+
+  build_e2e_test_artifacts:
+    needs: [setup, build_all, build_tf_integrations]
+    if: fromJson(needs.setup.outputs.should-run)
+    uses: ./.github/workflows/build_e2e_test_artifacts.yml
+    with:
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      build-dir: ${{ needs.build_all.outputs.build-dir }}
+      build-dir-archive: ${{ needs.build_all.outputs.build-dir-archive }}
+      build-dir-gcs-artifact: ${{ needs.build_all.outputs.build-dir-gcs-artifact }}
+      tf-binaries-dir: ${{ needs.build_tf_integrations.outputs.binaries-dir }}
+      tf-binaries-archive: ${{ needs.build_tf_integrations.outputs.binaries-archive }}
+      tf-binaries-gcs-artifact: ${{ needs.build_tf_integrations.outputs.binaries-gcs-artifact }}
+
+  compilation_benchmarks:
+    needs: [setup, build_e2e_test_artifacts]
+    if: fromJson(needs.setup.outputs.should-run)
+    uses: ./.github/workflows/benchmark_compilation.yml
+    with:
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      e2e-test-artifacts-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
+      e2e-test-artifacts-gcs-artifact-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
+      e2e-test-artifacts-build-log: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log }}
+      e2e-test-artifacts-build-log-gcs-artifact: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log-gcs-artifact }}
+
+  execution_benchmarks:
+    needs: [setup, build_benchmark_tools, build_e2e_test_artifacts]
+    if: fromJson(needs.setup.outputs.should-run)
+    uses: ./.github/workflows/benchmark_execution.yml
+    with:
+      # env.GCS_DIR is also duplicated in this workflow. See the note there on
+      # why this is.
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      e2e-test-artifacts-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
+      e2e-test-artifacts-gcs-artifact-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
+      benchmark-tools-gcs-artifact-dir: ${{ needs.build_benchmark_tools.outputs.benchmark-tools-gcs-artifact-dir }}
+      benchmark-presets: cuda-long
+
+  summary:
+    # Even if you have an explicit if condition, you still need to override
+    # GitHub's default behavior of not running if any dependencies failed.
+    if: always()
+    runs-on: ubuntu-20.04
+    needs:
+      - setup
+      - build_all
+      - build_tf_integrations
+      - build_benchmark_tools
+      - build_e2e_test_artifacts
+      - compilation_benchmarks
+      - execution_benchmarks
+    steps:
+      - name: Getting failed jobs
+        id: failed_jobs
+        run: |
+          echo '${{ toJson(needs) }}'
+          FAILED_JOBS="$(echo '${{ toJson(needs) }}' \
+            | jq --raw-output \
+            'map_values(select(.result!="success" and .result!="skipped")) | keys | join(",")' \
+          )"
+          echo "failed-jobs=${FAILED_JOBS}" >> $GITHUB_OUTPUT
+          if [[ "${FAILED_JOBS}" != "" ]]; then
+            echo "The following jobs failed: ${FAILED_JOBS}"
+            exit 1
+          fi
+      - name: Posting to Discord
+        uses: sarisia/actions-status-discord@61114b793b460ee85fe38ad3fccc78c7ead38d55 # v1.11.1
+        if: failure() && github.ref_name == 'main'
+        with:
+          webhook: ${{ secrets.DISCORD_WEBHOOK }}
+          description: "The following jobs failed: ${{ steps.failed_jobs.outputs.failed-jobs }}"
diff --git a/.github/workflows/build_all.yml b/.github/workflows/build_all.yml
new file mode 100644
index 0000000..f7a3849
--- /dev/null
+++ b/.github/workflows/build_all.yml
@@ -0,0 +1,108 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# Workflow for building IREE. It is designed to be called from a parent workflow.
+# The concurrency of this workflow is controlled by the caller's job.
+
+name: Build All
+
+on:
+  workflow_call:
+    inputs:
+      runner-group:
+        required: true
+        type: string
+      runner-env:
+        required: true
+        type: string
+      write-caches:
+        required: true
+        type: string
+    outputs:
+      build-dir:
+        description: |
+          Local path that stores compiled artifacts.
+        value: ${{ jobs.build_all.outputs.build-dir }}
+      build-dir-archive:
+        description: |
+          Local path to the zipped build directory.
+        value: ${{ jobs.build_all.outputs.build-dir-archive }}
+      build-dir-gcs-artifact:
+        description: |
+          GCS path to the uploaded build archive.
+        value: ${{ jobs.build_all.outputs.build-dir-gcs-artifact }}
+
+env:
+  # This duplicates the variable from ci.yml. The variable needs to be in env
+  # instead of the outputs of setup because it contains the run attempt and we
+  # want that to be the current attempt, not whatever attempt the setup step
+  # last ran in. It therefore can't be passed in via inputs because the env
+  # context isn't available there.
+  GCS_DIR: gs://iree-github-actions-${{ github.event_name == 'pull_request' && 'presubmit' || 'postsubmit' }}-artifacts/${{ github.run_id }}/${{ github.run_attempt }}
+
+jobs:
+  build_all:
+    runs-on:
+      - self-hosted # must come first
+      - runner-group=${{ inputs.runner-group }}
+      - environment=${{ inputs.runner-env }}
+      - cpu
+      - os-family=Linux
+    env:
+      BUILD_DIR: full-build-dir
+    outputs:
+      # Pass through the build directory as output so it's available to
+      # dependent jobs.
+      build-dir: ${{ env.BUILD_DIR }}
+      build-dir-archive: ${{ steps.archive.outputs.build-dir-archive }}
+      build-dir-gcs-artifact: ${{ steps.upload.outputs.build-dir-gcs-artifact }}
+    steps:
+      - name: "Checking out repository"
+        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
+        with:
+          submodules: true
+      - name: "Building IREE"
+        env:
+          IREE_WRITE_REMOTE_CCACHE: ${{ inputs.write-caches }}
+        run: |
+          ./build_tools/github_actions/docker_run.sh \
+            --env "IREE_CCACHE_GCP_TOKEN=$(gcloud auth application-default print-access-token)" \
+            --env "IREE_WRITE_REMOTE_CCACHE=${IREE_WRITE_REMOTE_CCACHE}" \
+            --env "CCACHE_NAMESPACE=gcr.io/iree-oss/base@sha256:24fb5467da30c7b4c0f4c191cdf6124bda63b172d3ae98906e53b3d55ed6ddcb" \
+            gcr.io/iree-oss/base@sha256:24fb5467da30c7b4c0f4c191cdf6124bda63b172d3ae98906e53b3d55ed6ddcb \
+            ./build_tools/cmake/build_all.sh \
+            "${BUILD_DIR}"
+      # The archive step below doesn't include these files. Remove them first to
+      # save disk space.
+      # TODO(#10739): This step can be removed once we enlarge the disk sapce.
+      - name: "Removing unused files"
+        run: |
+          find "${BUILD_DIR}" -type f -name "*.a" -o -type f -name "*.o" \
+            -print \
+            -delete
+      # Things get more complicated here than when we're just building the
+      # runtime. The build directory is way bigger. We're also using on our own
+      # runners on GCE. So uploading to GitHub actions artifact storage hosted
+      # on Azure is dirt slow. We drop static libraries and object files, which
+      # aren't needed for testing. Then we do some minimal compression locally
+      # *in parallel* and upload to GCS. This can be further optimized.
+      # Especially decompression is still pretty slow. See #9881.
+      - name: "Creating build dir archive"
+        id: archive
+        env:
+          BUILD_DIR_ARCHIVE: ${{ env.BUILD_DIR }}.tar.zst
+        run: |
+          tar -I 'zstd -T0' \
+            -cf ${BUILD_DIR_ARCHIVE} ${BUILD_DIR}
+          echo "build-dir-archive=${BUILD_DIR_ARCHIVE}" >> "${GITHUB_OUTPUT}"
+      - name: "Uploading build dir archive"
+        id: upload
+        env:
+          BUILD_DIR_ARCHIVE: ${{ steps.archive.outputs.build-dir-archive }}
+          BUILD_DIR_GCS_ARTIFACT: ${{ env.GCS_DIR }}/${{ steps.archive.outputs.build-dir-archive }}
+        run: |
+          gcloud storage cp "${BUILD_DIR_ARCHIVE}" "${BUILD_DIR_GCS_ARTIFACT}"
+          echo "build-dir-gcs-artifact=${BUILD_DIR_GCS_ARTIFACT}" >> "${GITHUB_OUTPUT}"
diff --git a/.github/workflows/build_benchmark_tools.yml b/.github/workflows/build_benchmark_tools.yml
new file mode 100644
index 0000000..4c56c94
--- /dev/null
+++ b/.github/workflows/build_benchmark_tools.yml
@@ -0,0 +1,151 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# Workflow for building benchmark tools.
+# It is designed to be called from a parent workflow.
+# The concurrency of this workflow is controlled by the caller's job.
+
+name: Build Benchmark Tools
+
+on:
+  workflow_call:
+    inputs:
+      runner-group:
+        required: true
+        type: string
+      runner-env:
+        required: true
+        type: string
+      build-dir:
+        required: true
+        type: string
+      build-dir-archive:
+        required: true
+        type: string
+      build-dir-gcs-artifact:
+        required: true
+        type: string
+    outputs:
+      benchmark-tools-gcs-artifact-dir:
+        description: |
+          GCS path to benchmark tools archive.
+        value: ${{ jobs.build_benchmark_tools.outputs.benchmark-tools-gcs-artifact-dir }}
+
+env:
+  # This duplicates the variable from ci.yml. The variable needs to be in env
+  # instead of the outputs of setup because it contains the run attempt and we
+  # want that to be the current attempt, not whatever attempt the setup step
+  # last ran in. It therefore can't be passed in via inputs because the env
+  # context isn't available there.
+  GCS_DIR: gs://iree-github-actions-${{ github.event_name == 'pull_request' && 'presubmit' || 'postsubmit' }}-artifacts/${{ github.run_id }}/${{ github.run_attempt }}
+
+jobs:
+  build_benchmark_tools:
+    runs-on:
+      - self-hosted # must come first
+      - runner-group=${{ inputs.runner-group }}
+      - environment=${{ inputs.runner-env }}
+      - cpu
+      - os-family=Linux
+    outputs:
+      # We can't collect all outputs from the matrix jobs due to Github's
+      # limitation (https://github.com/orgs/community/discussions/17245).
+      # Therefore, the output is the GCS directory that stores all benchmark
+      # tools archives. The following jobs need to construct the archive names
+      # by themselves and combine with path of GCS directory here to fetch the
+      # archives.
+      benchmark-tools-gcs-artifact-dir: ${{ steps.upload.outputs.benchmark-tools-gcs-artifact-dir }}
+    strategy:
+      matrix:
+        target:
+          - platform: "linux"
+            arch: "x86_64"
+            docker_image: "gcr.io/iree-oss/base-bleeding-edge@sha256:3ea6d37221a452058a7f5a5c25b4f8a82625e4b98c9e638ebdf19bb21917e6fd"
+            # Builds tools on the host and assumes the builder is Linux x86_64.
+            build_script: "./build_tools/cmake/build_runtime.sh"
+            tracy_capture: "gs://iree-shared-files/tracy-capture-linux-x86_64-52b6af88"
+          - platform: "linux"
+            arch: "riscv_64"
+            docker_image: "gcr.io/iree-oss/riscv@sha256:2e71c052d11b2526651af16e64816a30d164efcdfe6fb64623fb4737c37c466a"
+            build_script: "./build_tools/cmake/build_riscv.sh"
+            tracy_capture: "gs://iree-shared-files/tracy-capture-linux-x86_64-52b6af88"
+    env:
+      PLATFORM: ${{ matrix.target.platform }}
+      ARCH: ${{ matrix.target.arch }}
+      DOCKER_IMAGE: ${{ matrix.target.docker_image }}
+      BUILD_SCRIPT: ${{ matrix.target.build_script }}
+      BUILD_TOOLS_DIR: ${{ matrix.target.platform }}-${{ matrix.target.arch }}-benchmark-tools-dir
+      BUILD_DIR: ${{ inputs.build-dir }}
+      BUILD_DIR_ARCHIVE: ${{ inputs.build-dir-archive }}
+      BUILD_DIR_GCS_ARTIFACT: ${{ inputs.build-dir-gcs-artifact }}
+    steps:
+      - name: "Checking out repository"
+        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
+      - name: "Checking out runtime submodules"
+        run: ./build_tools/scripts/git/update_runtime_submodules.sh
+      - name: "Downloading build dir archive"
+        run: gcloud storage cp "${BUILD_DIR_GCS_ARTIFACT}" "${BUILD_DIR_ARCHIVE}"
+      - name: "Extracting host binaries"
+        run: tar -xf "${BUILD_DIR_ARCHIVE}" "${BUILD_DIR}/install"
+      - name: "Compiling the benchmark tools"
+        id: build
+        run: |
+          ./build_tools/github_actions/docker_run.sh \
+            --env "IREE_TARGET_PLATFORM=${PLATFORM}" \
+            --env "IREE_TARGET_ARCH=${ARCH}" \
+            --env "BUILD_PRESET=benchmark" \
+            --env "IREE_HOST_BIN_DIR=${BUILD_DIR}/install/bin" \
+            "${DOCKER_IMAGE}" "${BUILD_SCRIPT}" "${BUILD_TOOLS_DIR}/build"
+      - name: "Compiling the benchmark tools with tracing"
+        id: build-with-tracing
+        run: |
+          ./build_tools/github_actions/docker_run.sh \
+            --env "IREE_TARGET_PLATFORM=${PLATFORM}" \
+            --env "IREE_TARGET_ARCH=${ARCH}" \
+            --env "BUILD_PRESET=benchmark-with-tracing" \
+            --env "IREE_HOST_BIN_DIR=${BUILD_DIR}/install/bin" \
+            "${DOCKER_IMAGE}" "${BUILD_SCRIPT}" "${BUILD_TOOLS_DIR}/build-traced"
+      - name: "Downloading pre-built tracy capture tool"
+        id: download-tracy-capture
+        env:
+          TRACY_CAPTURE_GCS_ARTIFACT: ${{ matrix.target.tracy_capture }}
+          TRACY_CAPTURE: ${{ env.BUILD_TOOLS_DIR }}/build-traced/tracy-capture
+        run: |
+          gcloud storage cp "${TRACY_CAPTURE_GCS_ARTIFACT}" "${TRACY_CAPTURE}"
+          chmod +x "${TRACY_CAPTURE}"
+          echo "tracy-capture=${TRACY_CAPTURE}" >> "${GITHUB_OUTPUT}"
+      - name: "Creating the benchmark tools archive"
+        # Here we pack a tracy-capture binary (~7MB) into each benchmark tools
+        # archive. This could be wasteful because multiple benchmark tools
+        # archives might pack the same tracy-capture binary. But it simplifies
+        # the process to fetch required tools to run benchmarks and we only have
+        # a few versions of the benchmark tools archives.
+        # Detailed reason: The generated benchmark matrix fetches the benchmark
+        # tools archive based on the target device. However, there is a scenario
+        # that the tracy-capture is running on a different host. E.g. Mobile
+        # benchmarks have the benchmark tool forwarding the data from the phones
+        # to the tracy-capture running on Linux host. Embedding the host of
+        # tracy-capture will overloads the benchmark matrix and puts too much
+        # unrelated machine setup in it. So we simply pack everything needed
+        # into each benchmark tools archive.
+        id: archive
+        env:
+          TRACY_CAPTURE: ${{ steps.download-tracy-capture.outputs.tracy-capture }}
+          BENCHMARK_TOOLS_ARCHIVE: ${{ env.PLATFORM }}-${{ env.ARCH }}-benchmark-tools.tar
+        run: |
+          tar -cf "${BENCHMARK_TOOLS_ARCHIVE}" \
+            "${BUILD_TOOLS_DIR}"/*/tools/iree-benchmark-module \
+            "${BUILD_TOOLS_DIR}"/*/tools/build_config.txt \
+            "${TRACY_CAPTURE}"
+          echo "benchmark-tools-archive=${BENCHMARK_TOOLS_ARCHIVE}" >> "${GITHUB_OUTPUT}"
+      - name: "Uploading the benchmark tools archive"
+        id: upload
+        env:
+          BENCHMARK_TOOLS_ARCHIVE: ${{ steps.archive.outputs.benchmark-tools-archive }}
+          BENCHMARK_TOOLS_GCS_ARTIFACT_DIR: ${{ env.GCS_DIR }}/benchmark-tools
+        run: |
+          gcloud storage cp "${BENCHMARK_TOOLS_ARCHIVE}" "${BENCHMARK_TOOLS_GCS_ARTIFACT_DIR}/"
+          echo "benchmark-tools-gcs-artifact-dir=${BENCHMARK_TOOLS_GCS_ARTIFACT_DIR}" >> "${GITHUB_OUTPUT}"
diff --git a/.github/workflows/build_e2e_test_artifacts.yml b/.github/workflows/build_e2e_test_artifacts.yml
new file mode 100644
index 0000000..6c1a66a
--- /dev/null
+++ b/.github/workflows/build_e2e_test_artifacts.yml
@@ -0,0 +1,131 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# Workflow for building E2E test artifacts.
+# It is designed to be called from a parent workflow.
+# The concurrency of this workflow is controlled by the caller's job.
+
+name: Build E2E Test Artifacts
+
+on:
+  workflow_call:
+    inputs:
+      runner-group:
+        required: true
+        type: string
+      runner-env:
+        required: true
+        type: string
+      build-dir:
+        required: true
+        type: string
+      build-dir-archive:
+        required: true
+        type: string
+      build-dir-gcs-artifact:
+        required: true
+        type: string
+      tf-binaries-dir:
+        required: true
+        type: string
+      tf-binaries-archive:
+        required: true
+        type: string
+      tf-binaries-gcs-artifact:
+        required: true
+        type: string
+    outputs:
+      e2e-test-artifacts-dir:
+        description: |
+          Local path that stores compiled test artifacts.
+        value: ${{ jobs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
+      e2e-test-artifacts-gcs-artifact-dir:
+        description: |
+          GCS path to the uploaded test artifacts.
+        value: ${{ jobs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
+      e2e-test-artifacts-build-log:
+        description: |
+          Local path that contains the build log.
+        value: ${{ jobs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log }}
+      e2e-test-artifacts-build-log-gcs-artifact:
+        description: |
+          GCS path to the uploaded build log.
+        value: ${{ jobs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log-gcs-artifact }}
+
+env:
+  # This duplicates the variable from ci.yml. The variable needs to be in env
+  # instead of the outputs of setup because it contains the run attempt and we
+  # want that to be the current attempt, not whatever attempt the setup step
+  # last ran in. It therefore can't be passed in via inputs because the env
+  # context isn't available there.
+  GCS_DIR: gs://iree-github-actions-${{ github.event_name == 'pull_request' && 'presubmit' || 'postsubmit' }}-artifacts/${{ github.run_id }}/${{ github.run_attempt }}
+
+jobs:
+  build_e2e_test_artifacts:
+    runs-on:
+      - self-hosted # must come first
+      - runner-group=${{ inputs.runner-group }}
+      - environment=${{ inputs.runner-env }}
+      - cpu
+      - os-family=Linux
+    env:
+      HOST_BUILD_DIR: ${{ inputs.build-dir }}
+      HOST_BUILD_DIR_ARCHIVE: ${{ inputs.build-dir-archive }}
+      HOST_BUILD_DIR_GCS_ARTIFACT: ${{ inputs.build-dir-gcs-artifact }}
+      TF_BINARIES_DIR: ${{ inputs.tf-binaries-dir }}
+      TF_BINARIES_ARCHIVE: ${{ inputs.tf-binaries-archive }}
+      TF_BINARIES_GCS_ARTIFACT: ${{ inputs.tf-binaries-gcs-artifact }}
+    outputs:
+      e2e-test-artifacts-dir: ${{ steps.build.outputs.e2e-test-artifacts-dir }}
+      e2e-test-artifacts-gcs-artifact-dir: ${{ steps.upload.outputs.e2e-test-artifacts-gcs-artifact-dir }}
+      e2e-test-artifacts-build-log: ${{ steps.build.outputs.e2e-test-artifacts-build-log }}
+      e2e-test-artifacts-build-log-gcs-artifact: ${{ steps.upload.outputs.e2e-test-artifacts-build-log-gcs-artifact }}
+    steps:
+      - name: "Checking out repository"
+        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
+      - name: "Checking out runtime submodules"
+        run: ./build_tools/scripts/git/update_runtime_submodules.sh
+      - name: "Downloading build dir archive"
+        run: gcloud storage cp "${HOST_BUILD_DIR_GCS_ARTIFACT}" "${HOST_BUILD_DIR_ARCHIVE}"
+      - name: "Extracting install from build dir archive"
+        run: tar -xf "${HOST_BUILD_DIR_ARCHIVE}" "${HOST_BUILD_DIR}/install"
+      - name: "Downloading TF binaries archive"
+        run: gcloud storage cp "${TF_BINARIES_GCS_ARTIFACT}" "${TF_BINARIES_ARCHIVE}"
+      - name: "Extracting TF binaries archive"
+        run: tar -xf "${TF_BINARIES_ARCHIVE}"
+      - name: "Building e2e test artifacts"
+        id: build
+        env:
+          BUILD_E2E_TEST_ARTIFACTS_DIR: build-e2e-test-artifacts
+        run: |
+          build_tools/github_actions/docker_run.sh \
+            --env "IREE_TF_BINARIES_DIR=${TF_BINARIES_DIR}" \
+            --env "IREE_HOST_BIN_DIR=${HOST_BUILD_DIR}/install/bin" \
+            gcr.io/iree-oss/base@sha256:24fb5467da30c7b4c0f4c191cdf6124bda63b172d3ae98906e53b3d55ed6ddcb \
+            build_tools/cmake/build_e2e_test_artifacts.sh \
+            "${BUILD_E2E_TEST_ARTIFACTS_DIR}"
+          echo "e2e-test-artifacts-dir=${BUILD_E2E_TEST_ARTIFACTS_DIR}/e2e_test_artifacts" >> "${GITHUB_OUTPUT}"
+          echo "e2e-test-artifacts-build-log=${BUILD_E2E_TEST_ARTIFACTS_DIR}/.ninja_log" >> "${GITHUB_OUTPUT}"
+      - name: "Uploading e2e test artifacts"
+        id: upload
+        env:
+          E2E_TEST_ARTIFACTS_DIR: ${{ steps.build.outputs.e2e-test-artifacts-dir }}
+          E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR: ${{ env.GCS_DIR }}/e2e-test-artifacts
+          E2E_TEST_ARTIFACTS_BUILD_LOG: ${{ steps.build.outputs.e2e-test-artifacts-build-log }}
+          E2E_TEST_ARTIFACTS_BUILD_LOG_GCS_ARTIFACT: ${{ env.GCS_DIR }}/e2e-test-artifacts/ninja_log
+        run: |
+          # Uploads all IREE artifacts and MLIR files (including the imported
+          # MLIR files and MLIR source models).
+          # Not archiving the directory to allow fetching each file as needed
+          # separately.
+          find "${E2E_TEST_ARTIFACTS_DIR}" -maxdepth 1 \
+            -name "iree_*" -o -name "model_*.mlir" | \
+            gcloud storage cp --read-paths-from-stdin -r \
+              "${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}"
+          gcloud storage cp "${E2E_TEST_ARTIFACTS_BUILD_LOG}" \
+              "${E2E_TEST_ARTIFACTS_BUILD_LOG_GCS_ARTIFACT}"
+          echo "e2e-test-artifacts-gcs-artifact-dir=${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}" >> "${GITHUB_OUTPUT}"
+          echo "e2e-test-artifacts-build-log-gcs-artifact=${E2E_TEST_ARTIFACTS_BUILD_LOG_GCS_ARTIFACT}" >> "${GITHUB_OUTPUT}"
diff --git a/.github/workflows/build_tf_integrations.yml b/.github/workflows/build_tf_integrations.yml
new file mode 100644
index 0000000..57d41bb
--- /dev/null
+++ b/.github/workflows/build_tf_integrations.yml
@@ -0,0 +1,91 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# Workflow for building Tensorflow binaries.
+# It is designed to be called from a parent workflow.
+# The concurrency of this workflow is controlled by the caller's job.
+
+name: Build Tensorflow Integrations
+
+on:
+  workflow_call:
+    inputs:
+      runner-group:
+        required: true
+        type: string
+      runner-env:
+        required: true
+        type: string
+      write-caches:
+        required: true
+        type: string
+    outputs:
+      binaries-dir:
+        description: |
+          Local path that stores compiled Tensorflow binaries.
+        value: ${{ jobs.build_tf_integrations.outputs.binaries-dir }}
+      binaries-archive:
+        description: |
+          Local path to the zipped binary directory.
+        value: ${{ jobs.build_tf_integrations.outputs.binaries-archive }}
+      binaries-gcs-artifact:
+        description: |
+          GCS path to the uploaded binary archive.
+        value: ${{ jobs.build_tf_integrations.outputs.binaries-gcs-artifact }}
+
+env:
+  # This duplicates the variable from ci.yml. The variable needs to be in env
+  # instead of the outputs of setup because it contains the run attempt and we
+  # want that to be the current attempt, not whatever attempt the setup step
+  # last ran in. It therefore can't be passed in via inputs because the env
+  # context isn't available there.
+  GCS_DIR: gs://iree-github-actions-${{ github.event_name == 'pull_request' && 'presubmit' || 'postsubmit' }}-artifacts/${{ github.run_id }}/${{ github.run_attempt }}
+
+jobs:
+  build_tf_integrations:
+    runs-on:
+      - self-hosted # must come first
+      - runner-group=${{ inputs.runner-group }}
+      - environment=${{ inputs.runner-env }}
+      - cpu
+      - os-family=Linux
+    outputs:
+      binaries-dir: ${{ steps.build.outputs.binaries-dir }}
+      binaries-archive: ${{ steps.archive.outputs.binaries-archive }}
+      binaries-gcs-artifact: ${{ steps.upload.outputs.binaries-gcs-artifact }}
+    steps:
+      - name: "Checking out repository"
+        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
+        with:
+          submodules: true
+      - name: "Building TF binaries"
+        id: build
+        env:
+          IREE_TF_BINARIES_OUTPUT_DIR: iree-tf-binaries
+          IREE_WRITE_REMOTE_BAZEL_CACHE: ${{ inputs.write-caches }}
+        run: |
+          ./build_tools/github_actions/docker_run.sh \
+            --env "IREE_WRITE_REMOTE_BAZEL_CACHE=${IREE_WRITE_REMOTE_BAZEL_CACHE}" \
+            --env "IREE_TF_BINARIES_OUTPUT_DIR=${IREE_TF_BINARIES_OUTPUT_DIR}" \
+            gcr.io/iree-oss/frontends-swiftshader@sha256:800c9bbefc2f396f99b91a29bead233d85b746fa1effa7845a4336c9b6106dd6 \
+            build_tools/cmake/build_tf_binaries.sh
+          echo "binaries-dir=${IREE_TF_BINARIES_OUTPUT_DIR}" >> "${GITHUB_OUTPUT}"
+      - name: "Creating archive of binaries"
+        id: archive
+        env:
+          BINARIES_ARCHIVE: tf-binaries.tar
+          BINARIES_DIR: ${{ steps.build.outputs.binaries-dir }}
+        run: |
+          tar -cf "${BINARIES_ARCHIVE}" "${BINARIES_DIR}"
+          echo "binaries-archive=${BINARIES_ARCHIVE}" >> "${GITHUB_OUTPUT}"
+      - name: "Uploading binaries archive"
+        id: upload
+        env:
+          BINARIES_ARCHIVE: ${{ steps.archive.outputs.binaries-archive }}
+          BINARIES_GCS_ARTIFACT: ${{ env.GCS_DIR }}/${{ steps.archive.outputs.binaries-archive }}
+        run: |
+          gcloud storage cp "${BINARIES_ARCHIVE}" "${BINARIES_GCS_ARTIFACT}"
+          echo "binaries-gcs-artifact=${BINARIES_GCS_ARTIFACT}" >> "${GITHUB_OUTPUT}"
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 203f9a6..1079d63 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -58,72 +58,7 @@
 # Jobs are organized into groups and topologically sorted by dependencies
 jobs:
   setup:
-    runs-on: ubuntu-20.04
-    env:
-      # The commit being checked out is the merge commit for the PR. Its first
-      # parent will be the tip of main.
-      BASE_REF: HEAD^
-    outputs:
-      should-run: ${{ steps.configure.outputs.should-run }}
-      is-pr: ${{ steps.configure.outputs.is-pr }}
-      runner-env: ${{ steps.configure.outputs.runner-env }}
-      runner-group: ${{ steps.configure.outputs.runner-group }}
-      write-caches: ${{ steps.configure.outputs.write-caches }}
-      benchmark-presets: ${{ steps.configure.outputs.benchmark-presets }}
-    steps:
-      - name: "Checking out repository"
-        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
-        with:
-          # We need the parent commit to do a diff
-          fetch-depth: 2
-      - name: "Fetching PR description"
-        # We fetch the latest pull request data (description, labels, ...) from
-        # API instead of using stale one from pull_request event. This makes it
-        # possible to update the trailers, labels on the pull request and re-run
-        # the workflow to make them take effect.
-        # This is majorly for triggering benchmarks without pushing new commits.
-        # See https://github.com/openxla/iree/issues/10042#issuecomment-1449250094
-        # for more details.
-        id: fetch-pr
-        if: github.event_name == 'pull_request'
-        env:
-          PR_NUMBER: ${{ github.event.number }}
-          PR_JSON: pull_request.json
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        run: |
-          gh api "/repos/${GITHUB_REPOSITORY}/pulls/${PR_NUMBER}" > "${PR_JSON}"
-          # It requires delimiter to pass multiline strings through
-          # GITHUB_OUTPUT. Since these are already escaped JSON strings, pass
-          # the JSON strings and later use fromJSON to decode them.
-          echo "pr-title=$(jq '.title' ${PR_JSON})" >> "${GITHUB_OUTPUT}"
-          echo "pr-body=$(jq '.body' ${PR_JSON})" >> "${GITHUB_OUTPUT}"
-          # Use --compact-output to avoid multiline JSON.
-          echo "pr-labels=$(jq --compact-output '.labels | map(.name)' \
-            ${PR_JSON})" >> "${GITHUB_OUTPUT}"
-      - name: "Configuring CI options"
-        id: configure
-        env:
-          PR_TITLE: ${{ fromJSON(steps.fetch-pr.outputs.pr-title || '""') }}
-          PR_BODY: ${{ fromJSON(steps.fetch-pr.outputs.pr-body || '""') }}
-          PR_LABELS: ${{ steps.fetch-pr.outputs.pr-labels || '[]' }}
-          ORIGINAL_PR_TITLE: ${{ github.event.pull_request.title }}
-          ORIGINAL_PR_BODY: ${{ github.event.pull_request.body }}
-          ORIGINAL_PR_LABELS: ${{ toJSON(github.event.pull_request.labels.*.name) }}
-        run: |
-          # Just informative logging. There should only be two commits in the
-          # history here, but limiting the depth helps when copying from a local
-          # repo instead of using checkout, e.g. with
-          # https://github.com/nektos/act where there will be more.
-          git log --oneline --graph --max-count=3
-
-          ./build_tools/github_actions/configure_ci.py
-
-      - name: "Show benchmark presets"
-        env:
-          BENCHMARK_PRESETS: ${{ steps.configure.outputs.benchmark-presets }}
-        run: |
-          echo ":stopwatch: Enabled benchmarks: \`${BENCHMARK_PRESETS}\`" \
-            >> "${GITHUB_STEP_SUMMARY}"
+    uses: ./.github/workflows/setup.yml
 
   ################################### Basic ####################################
   # Jobs that build all of IREE "normally"
@@ -131,67 +66,11 @@
   build_all:
     needs: setup
     if: fromJson(needs.setup.outputs.should-run)
-    runs-on:
-      - self-hosted # must come first
-      - runner-group=${{ needs.setup.outputs.runner-group }}
-      - environment=${{ needs.setup.outputs.runner-env }}
-      - cpu
-      - os-family=Linux
-    env:
-      BUILD_DIR: full-build-dir
-    outputs:
-      # Pass through the build directory as output so it's available to
-      # dependent jobs.
-      build-dir: ${{ env.BUILD_DIR }}
-      build-dir-archive: ${{ steps.archive.outputs.build-dir-archive }}
-      build-dir-gcs-artifact: ${{ steps.upload.outputs.build-dir-gcs-artifact }}
-    steps:
-      - name: "Checking out repository"
-        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
-        with:
-          submodules: true
-      - name: "Building IREE"
-        env:
-          IREE_WRITE_REMOTE_CCACHE: ${{ needs.setup.outputs.write-caches }}
-        run: |
-          ./build_tools/github_actions/docker_run.sh \
-            --env "IREE_CCACHE_GCP_TOKEN=$(gcloud auth application-default print-access-token)" \
-            --env "IREE_WRITE_REMOTE_CCACHE=${IREE_WRITE_REMOTE_CCACHE}" \
-            --env "CCACHE_NAMESPACE=gcr.io/iree-oss/base@sha256:24fb5467da30c7b4c0f4c191cdf6124bda63b172d3ae98906e53b3d55ed6ddcb" \
-            gcr.io/iree-oss/base@sha256:24fb5467da30c7b4c0f4c191cdf6124bda63b172d3ae98906e53b3d55ed6ddcb \
-            ./build_tools/cmake/build_all.sh \
-            "${BUILD_DIR}"
-      # The archive step below doesn't include these files. Remove them first to
-      # save disk space.
-      # TODO(#10739): This step can be removed once we enlarge the disk sapce.
-      - name: "Removing unused files"
-        run: |
-          find "${BUILD_DIR}" -type f -name "*.a" -o -type f -name "*.o" \
-            -print \
-            -delete
-      # Things get more complicated here than when we're just building the
-      # runtime. The build directory is way bigger. We're also using on our own
-      # runners on GCE. So uploading to GitHub actions artifact storage hosted
-      # on Azure is dirt slow. We drop static libraries and object files, which
-      # aren't needed for testing. Then we do some minimal compression locally
-      # *in parallel* and upload to GCS. This can be further optimized.
-      # Especially decompression is still pretty slow. See #9881.
-      - name: "Creating build dir archive"
-        id: archive
-        env:
-          BUILD_DIR_ARCHIVE: ${{ env.BUILD_DIR }}.tar.zst
-        run: |
-          tar -I 'zstd -T0' \
-            -cf ${BUILD_DIR_ARCHIVE} ${BUILD_DIR}
-          echo "build-dir-archive=${BUILD_DIR_ARCHIVE}" >> "${GITHUB_OUTPUT}"
-      - name: "Uploading build dir archive"
-        id: upload
-        env:
-          BUILD_DIR_ARCHIVE: ${{ steps.archive.outputs.build-dir-archive }}
-          BUILD_DIR_GCS_ARTIFACT: ${{ env.GCS_DIR }}/${{ steps.archive.outputs.build-dir-archive }}
-        run: |
-          gcloud storage cp "${BUILD_DIR_ARCHIVE}" "${BUILD_DIR_GCS_ARTIFACT}"
-          echo "build-dir-gcs-artifact=${BUILD_DIR_GCS_ARTIFACT}" >> "${GITHUB_OUTPUT}"
+    uses: ./.github/workflows/build_all.yml
+    with:
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      write-caches: ${{ needs.setup.outputs.write-caches }}
 
   build_test_all_windows:
     needs: setup
@@ -512,49 +391,11 @@
   build_tf_integrations:
     needs: setup
     if: fromJson(needs.setup.outputs.should-run)
-    runs-on:
-      - self-hosted # must come first
-      - runner-group=${{ needs.setup.outputs.runner-group }}
-      - environment=${{ needs.setup.outputs.runner-env }}
-      - cpu
-      - os-family=Linux
-    outputs:
-      binaries-dir: ${{ steps.build.outputs.binaries-dir }}
-      binaries-archive: ${{ steps.archive.outputs.binaries-archive }}
-      binaries-gcs-artifact: ${{ steps.upload.outputs.binaries-gcs-artifact }}
-    steps:
-      - name: "Checking out repository"
-        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
-        with:
-          submodules: true
-      - name: "Building TF binaries"
-        id: build
-        env:
-          IREE_TF_BINARIES_OUTPUT_DIR: iree-tf-binaries
-          IREE_WRITE_REMOTE_BAZEL_CACHE: ${{ needs.setup.outputs.write-caches }}
-        run: |
-          ./build_tools/github_actions/docker_run.sh \
-            --env "IREE_WRITE_REMOTE_BAZEL_CACHE=${IREE_WRITE_REMOTE_BAZEL_CACHE}" \
-            --env "IREE_TF_BINARIES_OUTPUT_DIR=${IREE_TF_BINARIES_OUTPUT_DIR}" \
-            gcr.io/iree-oss/frontends-swiftshader@sha256:800c9bbefc2f396f99b91a29bead233d85b746fa1effa7845a4336c9b6106dd6 \
-            build_tools/cmake/build_tf_binaries.sh
-          echo "binaries-dir=${IREE_TF_BINARIES_OUTPUT_DIR}" >> "${GITHUB_OUTPUT}"
-      - name: "Creating archive of binaries"
-        id: archive
-        env:
-          BINARIES_ARCHIVE: tf-binaries.tar
-          BINARIES_DIR: ${{ steps.build.outputs.binaries-dir }}
-        run: |
-          tar -cf "${BINARIES_ARCHIVE}" "${BINARIES_DIR}"
-          echo "binaries-archive=${BINARIES_ARCHIVE}" >> "${GITHUB_OUTPUT}"
-      - name: "Uploading binaries archive"
-        id: upload
-        env:
-          BINARIES_ARCHIVE: ${{ steps.archive.outputs.binaries-archive }}
-          BINARIES_GCS_ARTIFACT: ${{ env.GCS_DIR }}/${{ steps.archive.outputs.binaries-archive }}
-        run: |
-          gcloud storage cp "${BINARIES_ARCHIVE}" "${BINARIES_GCS_ARTIFACT}"
-          echo "binaries-gcs-artifact=${BINARIES_GCS_ARTIFACT}" >> "${GITHUB_OUTPUT}"
+    uses: ./.github/workflows/build_tf_integrations.yml
+    with:
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      write-caches: ${{ needs.setup.outputs.write-caches }}
 
   test_tf_integrations:
     needs: [setup, build_all, build_tf_integrations]
@@ -902,252 +743,39 @@
   build_benchmark_tools:
     needs: [setup, build_all]
     if: fromJson(needs.setup.outputs.should-run)
-    runs-on:
-      - self-hosted # must come first
-      - runner-group=${{ needs.setup.outputs.runner-group }}
-      - environment=${{ needs.setup.outputs.runner-env }}
-      - cpu
-      - os-family=Linux
-    outputs:
-      # We can't collect all outputs from the matrix jobs due to Github's
-      # limitation (https://github.com/orgs/community/discussions/17245).
-      # Therefore, the output is the GCS directory that stores all benchmark
-      # tools archives. The following jobs need to construct the archive names
-      # by themselves and combine with path of GCS directory here to fetch the
-      # archives.
-      benchmark-tools-gcs-artifact-dir: ${{ steps.upload.outputs.benchmark-tools-gcs-artifact-dir }}
-    strategy:
-      matrix:
-        target:
-          - platform: "linux"
-            arch: "x86_64"
-            docker_image: "gcr.io/iree-oss/base-bleeding-edge@sha256:3ea6d37221a452058a7f5a5c25b4f8a82625e4b98c9e638ebdf19bb21917e6fd"
-            # Builds tools on the host and assumes the builder is Linux x86_64.
-            build_script: "./build_tools/cmake/build_runtime.sh"
-            tracy_capture: "gs://iree-shared-files/tracy-capture-linux-x86_64-52b6af88"
-          - platform: "linux"
-            arch: "riscv_64"
-            docker_image: "gcr.io/iree-oss/riscv@sha256:2e71c052d11b2526651af16e64816a30d164efcdfe6fb64623fb4737c37c466a"
-            build_script: "./build_tools/cmake/build_riscv.sh"
-            tracy_capture: "gs://iree-shared-files/tracy-capture-linux-x86_64-52b6af88"
-    env:
-      PLATFORM: ${{ matrix.target.platform }}
-      ARCH: ${{ matrix.target.arch }}
-      DOCKER_IMAGE: ${{ matrix.target.docker_image }}
-      BUILD_SCRIPT: ${{ matrix.target.build_script }}
-      BUILD_TOOLS_DIR: ${{ matrix.target.platform }}-${{ matrix.target.arch }}-benchmark-tools-dir
-      BUILD_DIR: ${{ needs.build_all.outputs.build-dir }}
-      BUILD_DIR_ARCHIVE: ${{ needs.build_all.outputs.build-dir-archive }}
-      BUILD_DIR_GCS_ARTIFACT: ${{ needs.build_all.outputs.build-dir-gcs-artifact }}
-    steps:
-      - name: "Checking out repository"
-        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
-      - name: "Checking out runtime submodules"
-        run: ./build_tools/scripts/git/update_runtime_submodules.sh
-      - name: "Downloading build dir archive"
-        run: gcloud storage cp "${BUILD_DIR_GCS_ARTIFACT}" "${BUILD_DIR_ARCHIVE}"
-      - name: "Extracting host binaries"
-        run: tar -xf "${BUILD_DIR_ARCHIVE}" "${BUILD_DIR}/install"
-      - name: "Compiling the benchmark tools"
-        id: build
-        run: |
-          ./build_tools/github_actions/docker_run.sh \
-            --env "IREE_TARGET_PLATFORM=${PLATFORM}" \
-            --env "IREE_TARGET_ARCH=${ARCH}" \
-            --env "BUILD_PRESET=benchmark" \
-            --env "IREE_HOST_BIN_DIR=${BUILD_DIR}/install/bin" \
-            "${DOCKER_IMAGE}" "${BUILD_SCRIPT}" "${BUILD_TOOLS_DIR}/build"
-      - name: "Compiling the benchmark tools with tracing"
-        id: build-with-tracing
-        run: |
-          ./build_tools/github_actions/docker_run.sh \
-            --env "IREE_TARGET_PLATFORM=${PLATFORM}" \
-            --env "IREE_TARGET_ARCH=${ARCH}" \
-            --env "BUILD_PRESET=benchmark-with-tracing" \
-            --env "IREE_HOST_BIN_DIR=${BUILD_DIR}/install/bin" \
-            "${DOCKER_IMAGE}" "${BUILD_SCRIPT}" "${BUILD_TOOLS_DIR}/build-traced"
-      - name: "Downloading pre-built tracy capture tool"
-        id: download-tracy-capture
-        env:
-          TRACY_CAPTURE_GCS_ARTIFACT: ${{ matrix.target.tracy_capture }}
-          TRACY_CAPTURE: ${{ env.BUILD_TOOLS_DIR }}/build-traced/tracy-capture
-        run: |
-          gcloud storage cp "${TRACY_CAPTURE_GCS_ARTIFACT}" "${TRACY_CAPTURE}"
-          chmod +x "${TRACY_CAPTURE}"
-          echo "tracy-capture=${TRACY_CAPTURE}" >> "${GITHUB_OUTPUT}"
-      - name: "Creating the benchmark tools archive"
-        # Here we pack a tracy-capture binary (~7MB) into each benchmark tools
-        # archive. This could be wasteful because multiple benchmark tools
-        # archives might pack the same tracy-capture binary. But it simplifies
-        # the process to fetch required tools to run benchmarks and we only have
-        # a few versions of the benchmark tools archives.
-        # Detailed reason: The generated benchmark matrix fetches the benchmark
-        # tools archive based on the target device. However, there is a scenario
-        # that the tracy-capture is running on a different host. E.g. Mobile
-        # benchmarks have the benchmark tool forwarding the data from the phones
-        # to the tracy-capture running on Linux host. Embedding the host of
-        # tracy-capture will overloads the benchmark matrix and puts too much
-        # unrelated machine setup in it. So we simply pack everything needed
-        # into each benchmark tools archive.
-        id: archive
-        env:
-          TRACY_CAPTURE: ${{ steps.download-tracy-capture.outputs.tracy-capture }}
-          BENCHMARK_TOOLS_ARCHIVE: ${{ env.PLATFORM }}-${{ env.ARCH }}-benchmark-tools.tar
-        run: |
-          tar -cf "${BENCHMARK_TOOLS_ARCHIVE}" \
-            "${BUILD_TOOLS_DIR}"/*/tools/iree-benchmark-module \
-            "${BUILD_TOOLS_DIR}"/*/tools/build_config.txt \
-            "${TRACY_CAPTURE}"
-          echo "benchmark-tools-archive=${BENCHMARK_TOOLS_ARCHIVE}" >> "${GITHUB_OUTPUT}"
-      - name: "Uploading the benchmark tools archive"
-        id: upload
-        env:
-          BENCHMARK_TOOLS_ARCHIVE: ${{ steps.archive.outputs.benchmark-tools-archive }}
-          BENCHMARK_TOOLS_GCS_ARTIFACT_DIR: ${{ env.GCS_DIR }}/benchmark-tools
-        run: |
-          gcloud storage cp "${BENCHMARK_TOOLS_ARCHIVE}" "${BENCHMARK_TOOLS_GCS_ARTIFACT_DIR}/"
-          echo "benchmark-tools-gcs-artifact-dir=${BENCHMARK_TOOLS_GCS_ARTIFACT_DIR}" >> "${GITHUB_OUTPUT}"
+    uses: ./.github/workflows/build_benchmark_tools.yml
+    with:
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      build-dir: ${{ needs.build_all.outputs.build-dir }}
+      build-dir-archive: ${{ needs.build_all.outputs.build-dir-archive }}
+      build-dir-gcs-artifact: ${{ needs.build_all.outputs.build-dir-gcs-artifact }}
 
   build_e2e_test_artifacts:
     needs: [setup, build_all, build_tf_integrations]
     if: fromJson(needs.setup.outputs.should-run)
-    runs-on:
-      - self-hosted # must come first
-      - runner-group=${{ needs.setup.outputs.runner-group }}
-      - environment=${{ needs.setup.outputs.runner-env }}
-      - cpu
-      - os-family=Linux
-    env:
-      HOST_BUILD_DIR: ${{ needs.build_all.outputs.build-dir }}
-      HOST_BUILD_DIR_ARCHIVE: ${{ needs.build_all.outputs.build-dir-archive }}
-      HOST_BUILD_DIR_GCS_ARTIFACT: ${{ needs.build_all.outputs.build-dir-gcs-artifact }}
-      TF_BINARIES_DIR: ${{ needs.build_tf_integrations.outputs.binaries-dir }}
-      TF_BINARIES_ARCHIVE: ${{ needs.build_tf_integrations.outputs.binaries-archive }}
-      TF_BINARIES_GCS_ARTIFACT: ${{ needs.build_tf_integrations.outputs.binaries-gcs-artifact }}
-    outputs:
-      e2e-test-artifacts-dir: ${{ steps.build.outputs.e2e-test-artifacts-dir }}
-      e2e-test-artifacts-gcs-artifact-dir: ${{ steps.upload.outputs.e2e-test-artifacts-gcs-artifact-dir }}
-      e2e-test-artifacts-build-log: ${{ steps.build.outputs.e2e-test-artifacts-build-log }}
-      e2e-test-artifacts-build-log-gcs-artifact: ${{ steps.upload.outputs.e2e-test-artifacts-build-log-gcs-artifact }}
-    steps:
-      - name: "Checking out repository"
-        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
-      - name: "Checking out runtime submodules"
-        run: ./build_tools/scripts/git/update_runtime_submodules.sh
-      - name: "Downloading build dir archive"
-        run: gcloud storage cp "${HOST_BUILD_DIR_GCS_ARTIFACT}" "${HOST_BUILD_DIR_ARCHIVE}"
-      - name: "Extracting install from build dir archive"
-        run: tar -xf "${HOST_BUILD_DIR_ARCHIVE}" "${HOST_BUILD_DIR}/install"
-      - name: "Downloading TF binaries archive"
-        run: gcloud storage cp "${TF_BINARIES_GCS_ARTIFACT}" "${TF_BINARIES_ARCHIVE}"
-      - name: "Extracting TF binaries archive"
-        run: tar -xf "${TF_BINARIES_ARCHIVE}"
-      - name: "Building e2e test artifacts"
-        id: build
-        env:
-          BUILD_E2E_TEST_ARTIFACTS_DIR: build-e2e-test-artifacts
-        run: |
-          build_tools/github_actions/docker_run.sh \
-            --env "IREE_TF_BINARIES_DIR=${TF_BINARIES_DIR}" \
-            --env "IREE_HOST_BIN_DIR=${HOST_BUILD_DIR}/install/bin" \
-            gcr.io/iree-oss/base@sha256:24fb5467da30c7b4c0f4c191cdf6124bda63b172d3ae98906e53b3d55ed6ddcb \
-            build_tools/cmake/build_e2e_test_artifacts.sh \
-            "${BUILD_E2E_TEST_ARTIFACTS_DIR}"
-          echo "e2e-test-artifacts-dir=${BUILD_E2E_TEST_ARTIFACTS_DIR}/e2e_test_artifacts" >> "${GITHUB_OUTPUT}"
-          echo "e2e-test-artifacts-build-log=${BUILD_E2E_TEST_ARTIFACTS_DIR}/.ninja_log" >> "${GITHUB_OUTPUT}"
-      - name: "Uploading e2e test artifacts"
-        id: upload
-        env:
-          E2E_TEST_ARTIFACTS_DIR: ${{ steps.build.outputs.e2e-test-artifacts-dir }}
-          E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR: ${{ env.GCS_DIR }}/e2e-test-artifacts
-          E2E_TEST_ARTIFACTS_BUILD_LOG: ${{ steps.build.outputs.e2e-test-artifacts-build-log }}
-          E2E_TEST_ARTIFACTS_BUILD_LOG_GCS_ARTIFACT: ${{ env.GCS_DIR }}/e2e-test-artifacts/ninja_log
-        run: |
-          # Uploads all IREE artifacts and MLIR files (including the imported
-          # MLIR files and MLIR source models).
-          # Not archiving the directory to allow fetching each file as needed
-          # separately.
-          find "${E2E_TEST_ARTIFACTS_DIR}" -maxdepth 1 \
-            -name "iree_*" -o -name "model_*.mlir" | \
-            gcloud storage cp --read-paths-from-stdin -r \
-              "${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}"
-          gcloud storage cp "${E2E_TEST_ARTIFACTS_BUILD_LOG}" \
-              "${E2E_TEST_ARTIFACTS_BUILD_LOG_GCS_ARTIFACT}"
-          echo "e2e-test-artifacts-gcs-artifact-dir=${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}" >> "${GITHUB_OUTPUT}"
-          echo "e2e-test-artifacts-build-log-gcs-artifact=${E2E_TEST_ARTIFACTS_BUILD_LOG_GCS_ARTIFACT}" >> "${GITHUB_OUTPUT}"
+    uses: ./.github/workflows/build_e2e_test_artifacts.yml
+    with:
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      build-dir: ${{ needs.build_all.outputs.build-dir }}
+      build-dir-archive: ${{ needs.build_all.outputs.build-dir-archive }}
+      build-dir-gcs-artifact: ${{ needs.build_all.outputs.build-dir-gcs-artifact }}
+      tf-binaries-dir: ${{ needs.build_tf_integrations.outputs.binaries-dir }}
+      tf-binaries-archive: ${{ needs.build_tf_integrations.outputs.binaries-archive }}
+      tf-binaries-gcs-artifact: ${{ needs.build_tf_integrations.outputs.binaries-gcs-artifact }}
 
   compilation_benchmarks:
     needs: [setup, build_e2e_test_artifacts]
     if: fromJson(needs.setup.outputs.should-run) && needs.setup.outputs.benchmark-presets != ''
-    runs-on:
-      - self-hosted # must come first
-      - runner-group=${{ needs.setup.outputs.runner-group }}
-      - environment=${{ needs.setup.outputs.runner-env }}
-      - cpu
-      - os-family=Linux
-    env:
-      E2E_TEST_ARTIFACTS_DIR: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
-      E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
-      E2E_TEST_ARTIFACTS_BUILD_LOG: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log }}
-      E2E_TEST_ARTIFACTS_BUILD_LOG_GCS_ARTIFACT: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log-gcs-artifact }}
-    outputs:
-      compile-stats-results: ${{ steps.collect.outputs.compile-stats-results }}
-      compile-stats-results-gcs-artifact: ${{ steps.upload.outputs.compile-stats-results-gcs-artifact }}
-    steps:
-      - name: "Checking out repository"
-        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
-      - name: "Exporting configs"
-        id: "export"
-        env:
-          COMPILATION_CONFIG: "compilation-config.json"
-        run: |
-          ./build_tools/benchmarks/export_benchmark_config.py \
-            compilation \
-            --output="${COMPILATION_CONFIG}"
-          echo "compilation-config=${COMPILATION_CONFIG}" >> "${GITHUB_OUTPUT}"
-      - name: "Downloading assets"
-        id: "download-assets"
-        env:
-          COMPILATION_CONFIG: ${{ steps.export.outputs.compilation-config }}
-        run: |
-          gcloud storage cp \
-            "${E2E_TEST_ARTIFACTS_BUILD_LOG_GCS_ARTIFACT}" \
-            "${E2E_TEST_ARTIFACTS_BUILD_LOG}"
-          mkdir -p "${E2E_TEST_ARTIFACTS_DIR}"
-          jq -r \
-            --arg GCS_ARTIFACT_DIR "${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}" \
-            '.module_dir_paths | map("\($GCS_ARTIFACT_DIR)/\(.)") | join("\n")' \
-            "${COMPILATION_CONFIG}" | \
-            gcloud storage cp -r --read-paths-from-stdin \
-              "${E2E_TEST_ARTIFACTS_DIR}"
-      - name: "Collecting compilation statistics"
-        id: collect
-        env:
-          COMPILATION_CONFIG: ${{ steps.export.outputs.compilation-config }}
-          GENERATION_CONFIG: generation-config.json
-          COMPILE_STATS_RESULTS: benchmark-results/compile-stats-results.json
-        run: |
-          mkdir -p benchmark-results
-          ./build_tools/benchmarks/collect_compilation_statistics.py alpha \
-            --e2e_test_artifacts_dir="${E2E_TEST_ARTIFACTS_DIR}" \
-            --build_log="${E2E_TEST_ARTIFACTS_BUILD_LOG}" \
-            --compilation_benchmark_config="${COMPILATION_CONFIG}" \
-            --output="${COMPILE_STATS_RESULTS}"
-          echo "compile-stats-results=${COMPILE_STATS_RESULTS}" >> "${GITHUB_OUTPUT}"
-      - name: "Uploading benchmark results"
-        id: upload
-        env:
-          COMPILATION_CONFIG: ${{ steps.export.outputs.compilation-config }}
-          COMPILE_STATS_RESULTS: ${{ steps.collect.outputs.compile-stats-results }}
-          COMPILE_STATS_RESULTS_GCS_ARTIFACT: ${{ env.GCS_DIR }}/${{ steps.collect.outputs.compile-stats-results }}
-        run: |
-          # Upload files with two commands since they go into different GCS
-          # directories.
-          gcloud storage cp "${COMPILATION_CONFIG}" "${GCS_DIR}"
-          gcloud storage cp \
-            "${COMPILE_STATS_RESULTS}" \
-            "${COMPILE_STATS_RESULTS_GCS_ARTIFACT}"
-          echo "compile-stats-results-gcs-artifact=${COMPILE_STATS_RESULTS_GCS_ARTIFACT}" >> "${GITHUB_OUTPUT}"
+    uses: ./.github/workflows/benchmark_compilation.yml
+    with:
+      runner-group: ${{ needs.setup.outputs.runner-group }}
+      runner-env: ${{ needs.setup.outputs.runner-env }}
+      e2e-test-artifacts-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
+      e2e-test-artifacts-gcs-artifact-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
+      e2e-test-artifacts-build-log: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log }}
+      e2e-test-artifacts-build-log-gcs-artifact: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log-gcs-artifact }}
 
   execution_benchmarks:
     needs: [setup, build_benchmark_tools, build_e2e_test_artifacts]
diff --git a/.github/workflows/setup.yml b/.github/workflows/setup.yml
new file mode 100644
index 0000000..9a3ab91
--- /dev/null
+++ b/.github/workflows/setup.yml
@@ -0,0 +1,117 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# Sets up a github workflow.
+# It is designed to be called from a parent workflow.
+# The concurrency of this workflow is controlled by the caller's job.
+
+name: Setup
+
+on:
+  workflow_call:
+    outputs:
+      should-run:
+        description: |
+          Whether CI should run.
+        value: ${{ jobs.setup.outputs.should-run }}
+      is-pr:
+        description: |
+          Whether the workflow has been triggered by a pull request.
+        value: ${{ jobs.setup.outputs.is-pr }}
+      runner-env:
+        description: |
+          The runner environment to use.
+        value: ${{ jobs.setup.outputs.runner-env }}
+      runner-group:
+        description: |
+          The runner group to use.
+        value: ${{ jobs.setup.outputs.runner-group }}
+      write-caches:
+        description: |
+          Whether to write to caches.
+        value: ${{ jobs.setup.outputs.write-caches }}
+      benchmark-presets:
+        description: |
+          A comma-separated string of benchmarks to run.
+        value: ${{ jobs.setup.outputs.benchmark-presets }}
+
+
+env:
+  # This duplicates the variable from ci.yml. The variable needs to be in env
+  # instead of the outputs of setup because it contains the run attempt and we
+  # want that to be the current attempt, not whatever attempt the setup step
+  # last ran in. It therefore can't be passed in via inputs because the env
+  # context isn't available there.
+  GCS_DIR: gs://iree-github-actions-${{ github.event_name == 'pull_request' && 'presubmit' || 'postsubmit' }}-artifacts/${{ github.run_id }}/${{ github.run_attempt }}
+
+jobs:
+  setup:
+    runs-on: ubuntu-20.04
+    env:
+      # The commit being checked out is the merge commit for the PR. Its first
+      # parent will be the tip of main.
+      BASE_REF: HEAD^
+    outputs:
+      should-run: ${{ steps.configure.outputs.should-run }}
+      is-pr: ${{ steps.configure.outputs.is-pr }}
+      runner-env: ${{ steps.configure.outputs.runner-env }}
+      runner-group: ${{ steps.configure.outputs.runner-group }}
+      write-caches: ${{ steps.configure.outputs.write-caches }}
+      benchmark-presets: ${{ steps.configure.outputs.benchmark-presets }}
+    steps:
+      - name: "Checking out repository"
+        uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
+        with:
+          # We need the parent commit to do a diff
+          fetch-depth: 2
+      - name: "Fetching PR description"
+        # We fetch the latest pull request data (description, labels, ...) from
+        # API instead of using stale one from pull_request event. This makes it
+        # possible to update the trailers, labels on the pull request and re-run
+        # the workflow to make them take effect.
+        # This is majorly for triggering benchmarks without pushing new commits.
+        # See https://github.com/openxla/iree/issues/10042#issuecomment-1449250094
+        # for more details.
+        id: fetch-pr
+        if: github.event_name == 'pull_request'
+        env:
+          PR_NUMBER: ${{ github.event.number }}
+          PR_JSON: pull_request.json
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        run: |
+          gh api "/repos/${GITHUB_REPOSITORY}/pulls/${PR_NUMBER}" > "${PR_JSON}"
+          # It requires delimiter to pass multiline strings through
+          # GITHUB_OUTPUT. Since these are already escaped JSON strings, pass
+          # the JSON strings and later use fromJSON to decode them.
+          echo "pr-title=$(jq '.title' ${PR_JSON})" >> "${GITHUB_OUTPUT}"
+          echo "pr-body=$(jq '.body' ${PR_JSON})" >> "${GITHUB_OUTPUT}"
+          # Use --compact-output to avoid multiline JSON.
+          echo "pr-labels=$(jq --compact-output '.labels | map(.name)' \
+            ${PR_JSON})" >> "${GITHUB_OUTPUT}"
+      - name: "Configuring CI options"
+        id: configure
+        env:
+          PR_TITLE: ${{ fromJSON(steps.fetch-pr.outputs.pr-title || '""') }}
+          PR_BODY: ${{ fromJSON(steps.fetch-pr.outputs.pr-body || '""') }}
+          PR_LABELS: ${{ steps.fetch-pr.outputs.pr-labels || '[]' }}
+          ORIGINAL_PR_TITLE: ${{ github.event.pull_request.title }}
+          ORIGINAL_PR_BODY: ${{ github.event.pull_request.body }}
+          ORIGINAL_PR_LABELS: ${{ toJSON(github.event.pull_request.labels.*.name) }}
+        run: |
+          # Just informative logging. There should only be two commits in the
+          # history here, but limiting the depth helps when copying from a local
+          # repo instead of using checkout, e.g. with
+          # https://github.com/nektos/act where there will be more.
+          git log --oneline --graph --max-count=3
+
+          ./build_tools/github_actions/configure_ci.py
+
+      - name: "Show benchmark presets"
+        env:
+          BENCHMARK_PRESETS: ${{ steps.configure.outputs.benchmark-presets }}
+        run: |
+          echo ":stopwatch: Enabled benchmarks: \`${BENCHMARK_PRESETS}\`" \
+            >> "${GITHUB_STEP_SUMMARY}"
diff --git a/build_tools/benchmarks/export_benchmark_config.py b/build_tools/benchmarks/export_benchmark_config.py
index 315dbdf..a7ff0a1 100755
--- a/build_tools/benchmarks/export_benchmark_config.py
+++ b/build_tools/benchmarks/export_benchmark_config.py
@@ -52,7 +52,10 @@
         "x86_64",
     "cuda":
         lambda config: config.target_device_spec.architecture.architecture ==
-        "cuda",
+        "cuda" and "long-running" not in config.tags,
+    "cuda-long":
+        lambda config: config.target_device_spec.architecture.architecture ==
+        "cuda" and "long-running" in config.tags,
     "android-cpu":
         lambda config:
         (config.target_device_spec.architecture.type == common_definitions.
diff --git a/build_tools/python/benchmark_suites/iree/cuda_benchmarks.py b/build_tools/python/benchmark_suites/iree/cuda_benchmarks.py
index 0d484f7..6acf004 100644
--- a/build_tools/python/benchmark_suites/iree/cuda_benchmarks.py
+++ b/build_tools/python/benchmark_suites/iree/cuda_benchmarks.py
@@ -5,7 +5,7 @@
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 """Defines IREE CUDA benchmarks."""
 
-from typing import List, Tuple
+from typing import List, Tuple, Sequence
 from benchmark_suites.iree import module_execution_configs
 from e2e_test_framework.models import tf_models
 from e2e_test_framework import unique_ids
@@ -27,16 +27,17 @@
       tags=["default-flags"],
       compile_targets=[SM_80_GPU_TARGET])
 
-  def generate(
-      self
+  def _generate_configs(
+      self,
+      models: Sequence[common_definitions.Model],
+      run_tags: Sequence[str] = [],
   ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
              List[iree_definitions.E2EModelRunConfig]]:
-    """Generates IREE compile and run configs."""
     gen_configs = [
         iree_definitions.ModuleGenerationConfig.build(
             compile_config=self.SM_80_COMPILE_CONFIG,
             imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in model_groups.CUDA_MODELS
+        for model in models
     ]
     sm80_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
         architecture=common_definitions.DeviceArchitecture.CUDA_SM80,
@@ -44,6 +45,20 @@
     run_module_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
         module_generation_configs=gen_configs,
         module_execution_configs=[module_execution_configs.CUDA_CONFIG],
-        device_specs=sm80_devices)
+        device_specs=sm80_devices,
+        tags=run_tags)
 
     return (gen_configs, run_module_configs)
+
+  def generate(
+      self
+  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
+             List[iree_definitions.E2EModelRunConfig]]:
+    """Generates IREE compile and run configs."""
+    default_gen_configs, default_run_module_configs = self._generate_configs(
+        model_groups.CUDA_MODELS)
+    long_running_gen_configs, long_running_module_configs = self._generate_configs(
+        model_groups.CUDA_MODELS_LONG, run_tags=["long-running"])
+
+    return (default_gen_configs + long_running_gen_configs,
+            default_run_module_configs + long_running_module_configs)
diff --git a/build_tools/python/benchmark_suites/iree/utils.py b/build_tools/python/benchmark_suites/iree/utils.py
index c817083..0b614d1 100644
--- a/build_tools/python/benchmark_suites/iree/utils.py
+++ b/build_tools/python/benchmark_suites/iree/utils.py
@@ -4,7 +4,7 @@
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 
-from typing import List, Sequence
+from typing import List, Sequence, Optional
 import itertools
 
 from e2e_test_framework.definitions import common_definitions, iree_definitions
@@ -15,6 +15,7 @@
         iree_definitions.ModuleGenerationConfig],
     module_execution_configs: Sequence[iree_definitions.ModuleExecutionConfig],
     device_specs: Sequence[common_definitions.DeviceSpec],
+    tags: Optional[Sequence[str]] = None,
     tool: iree_definitions.E2EModelRunTool = iree_definitions.E2EModelRunTool.
     IREE_BENCHMARK_MODULE
 ) -> List[iree_definitions.E2EModelRunConfig]:
@@ -26,7 +27,8 @@
           module_execution_config=module_execution_config,
           target_device_spec=device_spec,
           input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-          tool=tool) for module_generation_config,
+          tool=tool,
+          tags=tags) for module_generation_config,
       module_execution_config, device_spec in itertools.product(
           module_generation_configs, module_execution_configs, device_specs)
   ]
diff --git a/build_tools/python/e2e_test_framework/definitions/iree_definitions.py b/build_tools/python/e2e_test_framework/definitions/iree_definitions.py
index 798e448..55cd6fd 100644
--- a/build_tools/python/e2e_test_framework/definitions/iree_definitions.py
+++ b/build_tools/python/e2e_test_framework/definitions/iree_definitions.py
@@ -309,6 +309,7 @@
   """Describes an e2e run."""
   composite_id: str
   name: str
+  tags: List[str]
   module_generation_config: ModuleGenerationConfig
   module_execution_config: ModuleExecutionConfig
   target_device_spec: common_definitions.DeviceSpec
@@ -328,11 +329,13 @@
     return utils.substitute_flag_vars(flags=self.run_flags, GPU_ID=gpu_id)
 
   @classmethod
-  def build(cls, module_generation_config: ModuleGenerationConfig,
+  def build(cls,
+            module_generation_config: ModuleGenerationConfig,
             module_execution_config: ModuleExecutionConfig,
             target_device_spec: common_definitions.DeviceSpec,
             input_data: common_definitions.ModelInputData,
-            tool: E2EModelRunTool):
+            tool: E2EModelRunTool,
+            tags: Optional[Sequence[str]] = None):
     composite_id = unique_ids.hash_composite_id([
         module_generation_config.composite_id, module_execution_config.id,
         target_device_spec.id, input_data.id
@@ -344,8 +347,10 @@
         input_data=input_data,
         module_execution_config=module_execution_config,
         gpu_id=r"${GPU_ID}")
+    tags_list = [] if tags is None else list(tags)
     return cls(composite_id=composite_id,
                name=name,
+               tags=tags_list,
                module_generation_config=module_generation_config,
                module_execution_config=module_execution_config,
                target_device_spec=target_device_spec,
diff --git a/build_tools/python/e2e_test_framework/models/model_groups.py b/build_tools/python/e2e_test_framework/models/model_groups.py
index 0a183d5..5a28963 100644
--- a/build_tools/python/e2e_test_framework/models/model_groups.py
+++ b/build_tools/python/e2e_test_framework/models/model_groups.py
@@ -5,8 +5,7 @@
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 """Defines the groups of models."""
 
-from e2e_test_framework.models import tf_models, tflite_models, torch_models
-
+from e2e_test_framework.models import tflite_models, torch_models, tf_models
 
 # A list of models with thread configurations.
 # Note `0` represents sync execution.
@@ -29,7 +28,7 @@
     (tflite_models.MOBILEBERT_INT8, [1, 8]),
     (tf_models.EFFICIENTNET_V2_S_FP32, [1, 8]),
     (tf_models.MINILM_L12_H384_UNCASED_INT32_SEQLEN128, [1, 8]),
-    (tf_models.RESNET50_TF_FP32, [1, 8]),
+    (tf_models.RESNET50_1X3X224X224_FP32_TF, [1, 8]),
     (torch_models.EFFICIENTNET_V2_S_FP32_TORCH, [1, 8]),
     # Large models.
     # TODO: These models should be running at 8, 13, 28 threads but we use 8 for now until new hardware becomes available.
@@ -56,7 +55,7 @@
     (tf_models.EFFICIENTNET_V2_S_FP32, [8]),
     (tf_models.MINILM_L12_H384_UNCASED_INT32_SEQLEN128, [8]),
     # Disabled due to https://github.com/openxla/iree/issues/11174.
-    # (tf_models.RESNET50_TF_FP32, [8]),
+    # (tf_models.RESNET50_1X3X224X224_FP32_TF, [8]),
     # Disabled due to https://github.com/openxla/iree/issues/12772.
     # (torch_models.EFFICIENTNET_V2_S_FP32_TORCH, [8]),
     # Large models.
@@ -89,13 +88,24 @@
     #torch_models.RESNET50_2048X3X224X224_FP32_TORCH,
 ]
 
+RESNET50_TF_BATCHES = [
+    tf_models.RESNET50_1X3X224X224_FP32_TF,
+    tf_models.RESNET50_8X3X224X224_FP32_TF,
+    tf_models.RESNET50_64X3X224X224_FP32_TF,
+    tf_models.RESNET50_128X3X224X224_FP32_TF,
+    tf_models.RESNET50_256X3X224X224_FP32_TF,
+    # Disabled due to https://github.com/openxla/iree/issues/12774.
+    #tf_models.RESNET50_2048X3X224X224_FP32_TF,
+]
+
 CUDA_MODELS = BERT_LARGE_TORCH_BATCHES + RESNET50_TORCH_BATCHES + [
     tf_models.EFFICIENTNET_V2_S_FP32,
     tf_models.MINILM_L12_H384_UNCASED_INT32_SEQLEN128,
-    tf_models.RESNET50_TF_FP32,
     tf_models.BERT_FOR_MASKED_LM_FP32_SEQLEN512,
     tf_models.BERT_LARGE_TF_FP32_SEQLEN384,
     torch_models.MODEL_CLIP_TEXT_SEQLEN64_FP32_TORCH,
     torch_models.MODEL_UNET_2D_FP32_TORCH,
     torch_models.EFFICIENTNET_B7_FP32_TORCH,
 ]
+
+CUDA_MODELS_LONG = RESNET50_TF_BATCHES
diff --git a/build_tools/python/e2e_test_framework/models/tf_models.py b/build_tools/python/e2e_test_framework/models/tf_models.py
index 521dc63..75b23d4 100644
--- a/build_tools/python/e2e_test_framework/models/tf_models.py
+++ b/build_tools/python/e2e_test_framework/models/tf_models.py
@@ -41,17 +41,6 @@
     entry_function="forward",
     input_types=["1x384x384x3xf32"])
 
-RESNET50_TF_FP32 = common_definitions.Model(
-    id=unique_ids.MODEL_RESNET50_TF_FP32,
-    name="Resnet50TF",
-    tags=["fp32"],
-    source_type=common_definitions.ModelSourceType.EXPORTED_TF_V2,
-    # Derived from https://github.com/keras-team/keras/blob/v2.10.0/keras/applications/resnet.py.
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/resnet50-tf-model.tar.gz",
-    entry_function="forward",
-    input_types=["1x224x224x3xf32"])
-
 # This is the model used in the MLPerf Inference Suite.
 BERT_LARGE_TF_FP32_SEQLEN384 = common_definitions.Model(
     id=unique_ids.MODEL_BERT_LARGE_TF_FP32_SEQLEN384,
@@ -64,3 +53,64 @@
     "https://storage.googleapis.com/iree-model-artifacts/bert-large-seq384-tf-model.tar.gz",
     entry_function="serving_default",
     input_types=["1x384xi32", "1x384xi32", "1x384xi32"])
+
+# Converted from https://www.tensorflow.org/api_docs/python/tf/keras/applications/resnet50/ResNet50
+RESNET50_1X3X224X224_FP32_TF = common_definitions.Model(
+    id=unique_ids.MODEL_RESNET50_1X3X224X224_FP32_TF,
+    name="Resnet50TFBatch1",
+    tags=["fp32", "cnn", "batch-1"],
+    source_type=common_definitions.ModelSourceType.EXPORTED_TF_V2,
+    source_url=
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_1/tf-model.tar.gz",
+    entry_function="forward",
+    input_types=["1x224x224x3xf32"])
+
+RESNET50_8X3X224X224_FP32_TF = common_definitions.Model(
+    id=unique_ids.MODEL_RESNET50_8X3X224X224_FP32_TF,
+    name="Resnet50TFBatch8",
+    tags=["fp32", "cnn", "batch-8"],
+    source_type=common_definitions.ModelSourceType.EXPORTED_TF_V2,
+    source_url=
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_8/tf-model.tar.gz",
+    entry_function="forward",
+    input_types=["8x224x224x3xf32"])
+
+RESNET50_64X3X224X224_FP32_TF = common_definitions.Model(
+    id=unique_ids.MODEL_RESNET50_64X3X224X224_FP32_TF,
+    name="Resnet50TFBatch64",
+    tags=["fp32", "cnn", "batch-64"],
+    source_type=common_definitions.ModelSourceType.EXPORTED_TF_V2,
+    source_url=
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_64/tf-model.tar.gz",
+    entry_function="forward",
+    input_types=["64x224x224x3xf32"])
+
+RESNET50_128X3X224X224_FP32_TF = common_definitions.Model(
+    id=unique_ids.MODEL_RESNET50_128X3X224X224_FP32_TF,
+    name="Resnet50TFBatch128",
+    tags=["fp32", "cnn", "batch-128"],
+    source_type=common_definitions.ModelSourceType.EXPORTED_TF_V2,
+    source_url=
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_128/tf-model.tar.gz",
+    entry_function="forward",
+    input_types=["128x224x224x3xf32"])
+
+RESNET50_256X3X224X224_FP32_TF = common_definitions.Model(
+    id=unique_ids.MODEL_RESNET50_256X3X224X224_FP32_TF,
+    name="Resnet50TFBatch256",
+    tags=["fp32", "cnn", "batch-256"],
+    source_type=common_definitions.ModelSourceType.EXPORTED_TF_V2,
+    source_url=
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_256/tf-model.tar.gz",
+    entry_function="forward",
+    input_types=["256x224x224x3xf32"])
+
+RESNET50_2048X3X224X224_FP32_TF = common_definitions.Model(
+    id=unique_ids.MODEL_RESNET50_2048X3X224X224_FP32_TF,
+    name="Resnet50TFBatch2048",
+    tags=["fp32", "cnn", "batch-2048"],
+    source_type=common_definitions.ModelSourceType.EXPORTED_TF_V2,
+    source_url=
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_2048/tf-model.tar.gz",
+    entry_function="forward",
+    input_types=["2048x224x224x3xf32"])
diff --git a/build_tools/python/e2e_test_framework/unique_ids.py b/build_tools/python/e2e_test_framework/unique_ids.py
index 7e01513..9e138ba 100644
--- a/build_tools/python/e2e_test_framework/unique_ids.py
+++ b/build_tools/python/e2e_test_framework/unique_ids.py
@@ -75,13 +75,21 @@
 MODEL_MOBILENET_V3SMALL = "58855e40-eba9-4a71-b878-6b35e3460244"
 MODEL_PERSON_DETECT_INT8 = "bc1338be-e3df-44fd-82e4-40ba9560a073"
 MODEL_EFFICIENTNET_INT8 = "4a6f545e-1b4e-41a5-9236-792aa578184b"
+
 #    Tensorflow.
 MODEL_MINILM_L12_H384_UNCASED_INT32_SEQLEN128 = "ecf5c970-ee97-49f0-a4ed-df1f34e9d493"
 MODEL_BERT_FOR_MASKED_LM_FP32_SEQLEN512_TF = "39d157ad-f0ec-4a76-963b-d783beaed60f"
 MODEL_EFFICIENTNET_V2_S_FP32_TF = "ebe7897f-5613-435b-a330-3cb967704e5e"
-MODEL_RESNET50_TF_FP32 = "c393b4fa-beb4-45d5-982a-c6328aa05d08"
 MODEL_BERT_LARGE_TF_FP32_SEQLEN384 = "8871f602-571c-4eb8-b94d-554cc8ceec5a"
 MODEL_MOBILENET_V2_INT8 = "3dd5a95e-92a9-4486-9062-9a33224f28db"
+
+MODEL_RESNET50_1X3X224X224_FP32_TF = "9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-1"
+MODEL_RESNET50_8X3X224X224_FP32_TF = "9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-8"
+MODEL_RESNET50_64X3X224X224_FP32_TF = "9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-64"
+MODEL_RESNET50_128X3X224X224_FP32_TF = "9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-128"
+MODEL_RESNET50_256X3X224X224_FP32_TF = "9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-256"
+MODEL_RESNET50_2048X3X224X224_FP32_TF = "9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-2048"
+
 #    PyTorch.
 MODEL_CLIP_TEXT_SEQLEN64_FP32_TORCH = "9a9515c7-cb68-4c34-b1d2-0e8c0a3620b8"
 MODEL_UNET_2D_FP32_TORCH = "340553d1-e6fe-41b6-b2c7-687c74ccec56"
diff --git a/tests/e2e/test_artifacts/generated_e2e_test_fetch_models.cmake b/tests/e2e/test_artifacts/generated_e2e_test_fetch_models.cmake
index d54c56e..0cbc96a 100644
--- a/tests/e2e/test_artifacts/generated_e2e_test_fetch_models.cmake
+++ b/tests/e2e/test_artifacts/generated_e2e_test_fetch_models.cmake
@@ -140,11 +140,11 @@
 
 iree_fetch_artifact(
   NAME
-    "model-c393b4fa-beb4-45d5-982a-c6328aa05d08"
+    "model-9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-1"
   SOURCE_URL
-    "https://storage.googleapis.com/iree-model-artifacts/resnet50-tf-model.tar.gz"
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_1/tf-model.tar.gz"
   OUTPUT
-    "${ROOT_ARTIFACTS_DIR}/model_c393b4fa-beb4-45d5-982a-c6328aa05d08_Resnet50TF"
+    "${ROOT_ARTIFACTS_DIR}/model_9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-1_Resnet50TFBatch1"
   UNPACK
 )
 
@@ -287,3 +287,43 @@
     "${ROOT_ARTIFACTS_DIR}/model_340553d1-e6fe-41b6-b2c7-687c74ccec56_Unet2dPT.mlir"
   UNPACK
 )
+
+iree_fetch_artifact(
+  NAME
+    "model-9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-8"
+  SOURCE_URL
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_8/tf-model.tar.gz"
+  OUTPUT
+    "${ROOT_ARTIFACTS_DIR}/model_9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-8_Resnet50TFBatch8"
+  UNPACK
+)
+
+iree_fetch_artifact(
+  NAME
+    "model-9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-64"
+  SOURCE_URL
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_64/tf-model.tar.gz"
+  OUTPUT
+    "${ROOT_ARTIFACTS_DIR}/model_9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-64_Resnet50TFBatch64"
+  UNPACK
+)
+
+iree_fetch_artifact(
+  NAME
+    "model-9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-128"
+  SOURCE_URL
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_128/tf-model.tar.gz"
+  OUTPUT
+    "${ROOT_ARTIFACTS_DIR}/model_9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-128_Resnet50TFBatch128"
+  UNPACK
+)
+
+iree_fetch_artifact(
+  NAME
+    "model-9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-256"
+  SOURCE_URL
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1680486104/RESNET50/batch_256/tf-model.tar.gz"
+  OUTPUT
+    "${ROOT_ARTIFACTS_DIR}/model_9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-256_Resnet50TFBatch256"
+  UNPACK
+)
diff --git a/tests/e2e/test_artifacts/generated_e2e_test_iree_artifacts.cmake b/tests/e2e/test_artifacts/generated_e2e_test_iree_artifacts.cmake
index 5de07b6..d409a1c 100644
--- a/tests/e2e/test_artifacts/generated_e2e_test_iree_artifacts.cmake
+++ b/tests/e2e/test_artifacts/generated_e2e_test_iree_artifacts.cmake
@@ -158,15 +158,15 @@
 
 iree_import_tf_model(
   TARGET_NAME
-    "${PACKAGE_NAME}_iree-imported-model-a122dabcac56c201a4c98d3474265f15adba14bff88353f421b1a11cadcdea1f"
+    "${PACKAGE_NAME}_iree-imported-model-b697188baed496a2e596578a28633d58a583aa970264235992ca623861483635"
   SOURCE
-    "${ROOT_ARTIFACTS_DIR}/model_c393b4fa-beb4-45d5-982a-c6328aa05d08_Resnet50TF"
+    "${ROOT_ARTIFACTS_DIR}/model_9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-1_Resnet50TFBatch1"
   IMPORT_FLAGS
     "--output-format=mlir-bytecode"
     "--tf-import-type=savedmodel_v2"
     "--tf-savedmodel-exported-names=forward"
   OUTPUT_MLIR_FILE
-    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_a122dabcac56c201a4c98d3474265f15adba14bff88353f421b1a11cadcdea1f.mlir"
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_b697188baed496a2e596578a28633d58a583aa970264235992ca623861483635.mlir"
 )
 
 iree_import_tf_model(
@@ -195,6 +195,58 @@
     "${ROOT_ARTIFACTS_DIR}/iree_BertLargeTF_2494ed4b5c065c4a78b03d46161d4c9cccef27edf9568170c7dd2158281fe697.mlir"
 )
 
+iree_import_tf_model(
+  TARGET_NAME
+    "${PACKAGE_NAME}_iree-imported-model-830d6f2775a6d19551826e6aa0bb4822feef6c5908023c81c8b02ef29fdb7a41"
+  SOURCE
+    "${ROOT_ARTIFACTS_DIR}/model_9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-8_Resnet50TFBatch8"
+  IMPORT_FLAGS
+    "--output-format=mlir-bytecode"
+    "--tf-import-type=savedmodel_v2"
+    "--tf-savedmodel-exported-names=forward"
+  OUTPUT_MLIR_FILE
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch8_830d6f2775a6d19551826e6aa0bb4822feef6c5908023c81c8b02ef29fdb7a41.mlir"
+)
+
+iree_import_tf_model(
+  TARGET_NAME
+    "${PACKAGE_NAME}_iree-imported-model-de3ec8610ae583fef77e064e7e93ca0c71ab93c471458e6538fa0f722ce64d25"
+  SOURCE
+    "${ROOT_ARTIFACTS_DIR}/model_9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-64_Resnet50TFBatch64"
+  IMPORT_FLAGS
+    "--output-format=mlir-bytecode"
+    "--tf-import-type=savedmodel_v2"
+    "--tf-savedmodel-exported-names=forward"
+  OUTPUT_MLIR_FILE
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch64_de3ec8610ae583fef77e064e7e93ca0c71ab93c471458e6538fa0f722ce64d25.mlir"
+)
+
+iree_import_tf_model(
+  TARGET_NAME
+    "${PACKAGE_NAME}_iree-imported-model-4ddc42daca54a45b7c253bb2eb9020408caff7af1eeac74e66824690358fb618"
+  SOURCE
+    "${ROOT_ARTIFACTS_DIR}/model_9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-128_Resnet50TFBatch128"
+  IMPORT_FLAGS
+    "--output-format=mlir-bytecode"
+    "--tf-import-type=savedmodel_v2"
+    "--tf-savedmodel-exported-names=forward"
+  OUTPUT_MLIR_FILE
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch128_4ddc42daca54a45b7c253bb2eb9020408caff7af1eeac74e66824690358fb618.mlir"
+)
+
+iree_import_tf_model(
+  TARGET_NAME
+    "${PACKAGE_NAME}_iree-imported-model-96010ccd86d78b4f59d4cb1ed29d9077700cb3690bdacbd89792cd0645d5975b"
+  SOURCE
+    "${ROOT_ARTIFACTS_DIR}/model_9a5a8b8c-6e7a-4b51-bb4f-84e738957238-batch-256_Resnet50TFBatch256"
+  IMPORT_FLAGS
+    "--output-format=mlir-bytecode"
+    "--tf-import-type=savedmodel_v2"
+    "--tf-savedmodel-exported-names=forward"
+  OUTPUT_MLIR_FILE
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch256_96010ccd86d78b4f59d4cb1ed29d9077700cb3690bdacbd89792cd0645d5975b.mlir"
+)
+
 iree_bytecode_module(
   NAME
     "iree-module-eb56e91246a131fa41bd335c1c072ffb6e7ffe651ecf65f4eeb171b12848b0ed"
@@ -435,18 +487,18 @@
 
 iree_bytecode_module(
   NAME
-    "iree-module-7a0add4835462bc66025022cdb6e87569da79cf103825a809863b8bd57a49055"
+    "iree-module-10a9a8a00ac0f36f45b5e35a032ca177b68621e3137dc57b9d6e08225301441b"
   SRC
-    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_a122dabcac56c201a4c98d3474265f15adba14bff88353f421b1a11cadcdea1f.mlir"
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_b697188baed496a2e596578a28633d58a583aa970264235992ca623861483635.mlir"
   MODULE_FILE_NAME
-    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_module_7a0add4835462bc66025022cdb6e87569da79cf103825a809863b8bd57a49055/module.vmfb"
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_module_10a9a8a00ac0f36f45b5e35a032ca177b68621e3137dc57b9d6e08225301441b/module.vmfb"
   FLAGS
     "--iree-hal-target-backends=llvm-cpu"
     "--iree-input-type=mhlo"
     "--iree-llvmcpu-target-triple=x86_64-unknown-linux-gnu"
     "--iree-llvmcpu-target-cpu=cascadelake"
   FRIENDLY_NAME
-    "Resnet50TF(tf_v2) [x86_64-cascadelake-linux_gnu-llvm_cpu][default-flags]"
+    "Resnet50TFBatch1(tf_v2) [x86_64-cascadelake-linux_gnu-llvm_cpu][default-flags]"
   PUBLIC
 )
 
@@ -927,22 +979,6 @@
 
 iree_bytecode_module(
   NAME
-    "iree-module-fd81a89e9f8773bae142040775c7e3c4774f96b64f07f8d9f66b00191864ff40"
-  SRC
-    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_a122dabcac56c201a4c98d3474265f15adba14bff88353f421b1a11cadcdea1f.mlir"
-  MODULE_FILE_NAME
-    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_module_fd81a89e9f8773bae142040775c7e3c4774f96b64f07f8d9f66b00191864ff40/module.vmfb"
-  FLAGS
-    "--iree-hal-target-backends=cuda"
-    "--iree-input-type=mhlo"
-    "--iree-hal-cuda-llvm-target-arch=sm_80"
-  FRIENDLY_NAME
-    "Resnet50TF(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags]"
-  PUBLIC
-)
-
-iree_bytecode_module(
-  NAME
     "iree-module-bdd904cc5614ebf77609c7802a2dfc09f139aee2a247a247d10d320de72b0e28"
   SRC
     "${ROOT_ARTIFACTS_DIR}/iree_BertForMaskedLMTF_a3a701aaac95a47e7e0c1875793fbe88c976864cac611ccdf7d373d43d670225.mlir"
@@ -1023,6 +1059,86 @@
 
 iree_bytecode_module(
   NAME
+    "iree-module-58f67edb580a13aa98d3fd05ad31f207d62462c9de990825656b19d2fbacda3c"
+  SRC
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_b697188baed496a2e596578a28633d58a583aa970264235992ca623861483635.mlir"
+  MODULE_FILE_NAME
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_module_58f67edb580a13aa98d3fd05ad31f207d62462c9de990825656b19d2fbacda3c/module.vmfb"
+  FLAGS
+    "--iree-hal-target-backends=cuda"
+    "--iree-input-type=mhlo"
+    "--iree-hal-cuda-llvm-target-arch=sm_80"
+  FRIENDLY_NAME
+    "Resnet50TFBatch1(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags]"
+  PUBLIC
+)
+
+iree_bytecode_module(
+  NAME
+    "iree-module-1655ee92ee23748886eef280d7d6f96c8a227fae7d6d78abd3466ce8614f16ba"
+  SRC
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch8_830d6f2775a6d19551826e6aa0bb4822feef6c5908023c81c8b02ef29fdb7a41.mlir"
+  MODULE_FILE_NAME
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch8_module_1655ee92ee23748886eef280d7d6f96c8a227fae7d6d78abd3466ce8614f16ba/module.vmfb"
+  FLAGS
+    "--iree-hal-target-backends=cuda"
+    "--iree-input-type=mhlo"
+    "--iree-hal-cuda-llvm-target-arch=sm_80"
+  FRIENDLY_NAME
+    "Resnet50TFBatch8(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags]"
+  PUBLIC
+)
+
+iree_bytecode_module(
+  NAME
+    "iree-module-496bdea78d06c9c32d8deae84b088b0eb34f6d1982ac790e2580a50dec4281ae"
+  SRC
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch64_de3ec8610ae583fef77e064e7e93ca0c71ab93c471458e6538fa0f722ce64d25.mlir"
+  MODULE_FILE_NAME
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch64_module_496bdea78d06c9c32d8deae84b088b0eb34f6d1982ac790e2580a50dec4281ae/module.vmfb"
+  FLAGS
+    "--iree-hal-target-backends=cuda"
+    "--iree-input-type=mhlo"
+    "--iree-hal-cuda-llvm-target-arch=sm_80"
+  FRIENDLY_NAME
+    "Resnet50TFBatch64(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags]"
+  PUBLIC
+)
+
+iree_bytecode_module(
+  NAME
+    "iree-module-5aa3c57b33d354d5ebfe51626e2dac78485a548aa9bf2fba7a3e7a248f8b909e"
+  SRC
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch128_4ddc42daca54a45b7c253bb2eb9020408caff7af1eeac74e66824690358fb618.mlir"
+  MODULE_FILE_NAME
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch128_module_5aa3c57b33d354d5ebfe51626e2dac78485a548aa9bf2fba7a3e7a248f8b909e/module.vmfb"
+  FLAGS
+    "--iree-hal-target-backends=cuda"
+    "--iree-input-type=mhlo"
+    "--iree-hal-cuda-llvm-target-arch=sm_80"
+  FRIENDLY_NAME
+    "Resnet50TFBatch128(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags]"
+  PUBLIC
+)
+
+iree_bytecode_module(
+  NAME
+    "iree-module-a52ef9f5a8c064a4cbf52ea82b694d2c0bb9a0dd43d3576131335ab5453b62a4"
+  SRC
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch256_96010ccd86d78b4f59d4cb1ed29d9077700cb3690bdacbd89792cd0645d5975b.mlir"
+  MODULE_FILE_NAME
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch256_module_a52ef9f5a8c064a4cbf52ea82b694d2c0bb9a0dd43d3576131335ab5453b62a4/module.vmfb"
+  FLAGS
+    "--iree-hal-target-backends=cuda"
+    "--iree-input-type=mhlo"
+    "--iree-hal-cuda-llvm-target-arch=sm_80"
+  FRIENDLY_NAME
+    "Resnet50TFBatch256(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags]"
+  PUBLIC
+)
+
+iree_bytecode_module(
+  NAME
     "iree-module-68f0eb37bb72d0d6605ecdf42691c64125960e122844b0beeae350871a445b1c"
   SRC
     "${ROOT_ARTIFACTS_DIR}/iree_DeepLabV3_fp32_05c50f54ffea1fce722d07588e7de026ce10324eccc5d83d1eac2c5a9f5d639d.mlir"
@@ -2598,11 +2714,11 @@
 
 iree_bytecode_module(
   NAME
-    "iree-module-846b19afd4c14b3e71d59087c5a2987edd65753d39db432961ce915688d457ac"
+    "iree-module-ee88f7648c08cb775827b3afa89a2aaccd34eff697306c2b35598ba7e4c78df8"
   SRC
-    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_a122dabcac56c201a4c98d3474265f15adba14bff88353f421b1a11cadcdea1f.mlir"
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_b697188baed496a2e596578a28633d58a583aa970264235992ca623861483635.mlir"
   MODULE_FILE_NAME
-    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_module_846b19afd4c14b3e71d59087c5a2987edd65753d39db432961ce915688d457ac/module.vmfb"
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_module_ee88f7648c08cb775827b3afa89a2aaccd34eff697306c2b35598ba7e4c78df8/module.vmfb"
   FLAGS
     "--iree-hal-target-backends=llvm-cpu"
     "--iree-input-type=mhlo"
@@ -2611,9 +2727,9 @@
     "--iree-vm-emit-polyglot-zip=true"
     "--iree-llvmcpu-debug-symbols=false"
     "--iree-scheduling-dump-statistics-format=json"
-    "--iree-scheduling-dump-statistics-file=${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_module_846b19afd4c14b3e71d59087c5a2987edd65753d39db432961ce915688d457ac/scheduling_stats.json"
+    "--iree-scheduling-dump-statistics-file=${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_module_ee88f7648c08cb775827b3afa89a2aaccd34eff697306c2b35598ba7e4c78df8/scheduling_stats.json"
   FRIENDLY_NAME
-    "Resnet50TF(tf_v2) [x86_64-cascadelake-linux_gnu-llvm_cpu][default-flags,compile-stats]"
+    "Resnet50TFBatch1(tf_v2) [x86_64-cascadelake-linux_gnu-llvm_cpu][default-flags,compile-stats]"
   PUBLIC
 )
 
@@ -3202,26 +3318,6 @@
 
 iree_bytecode_module(
   NAME
-    "iree-module-f770b1916e0b7a9a0b4aa9480791d21a46a352002ac1e38dfcea49ec0b63ed4e"
-  SRC
-    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_a122dabcac56c201a4c98d3474265f15adba14bff88353f421b1a11cadcdea1f.mlir"
-  MODULE_FILE_NAME
-    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_module_f770b1916e0b7a9a0b4aa9480791d21a46a352002ac1e38dfcea49ec0b63ed4e/module.vmfb"
-  FLAGS
-    "--iree-hal-target-backends=cuda"
-    "--iree-input-type=mhlo"
-    "--iree-hal-cuda-llvm-target-arch=sm_80"
-    "--iree-vm-emit-polyglot-zip=true"
-    "--iree-llvmcpu-debug-symbols=false"
-    "--iree-scheduling-dump-statistics-format=json"
-    "--iree-scheduling-dump-statistics-file=${ROOT_ARTIFACTS_DIR}/iree_Resnet50TF_module_f770b1916e0b7a9a0b4aa9480791d21a46a352002ac1e38dfcea49ec0b63ed4e/scheduling_stats.json"
-  FRIENDLY_NAME
-    "Resnet50TF(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags,compile-stats]"
-  PUBLIC
-)
-
-iree_bytecode_module(
-  NAME
     "iree-module-8b19868be1c797cb585551c871c4171e78817e0efc49d30d91b9d722be283de9"
   SRC
     "${ROOT_ARTIFACTS_DIR}/iree_BertForMaskedLMTF_a3a701aaac95a47e7e0c1875793fbe88c976864cac611ccdf7d373d43d670225.mlir"
@@ -3322,6 +3418,106 @@
 
 iree_bytecode_module(
   NAME
+    "iree-module-2fe6a4ac93d74732ebabbd87e84e6ec56d2aeb9f6a94f5b595f74f947498c579"
+  SRC
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_b697188baed496a2e596578a28633d58a583aa970264235992ca623861483635.mlir"
+  MODULE_FILE_NAME
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_module_2fe6a4ac93d74732ebabbd87e84e6ec56d2aeb9f6a94f5b595f74f947498c579/module.vmfb"
+  FLAGS
+    "--iree-hal-target-backends=cuda"
+    "--iree-input-type=mhlo"
+    "--iree-hal-cuda-llvm-target-arch=sm_80"
+    "--iree-vm-emit-polyglot-zip=true"
+    "--iree-llvmcpu-debug-symbols=false"
+    "--iree-scheduling-dump-statistics-format=json"
+    "--iree-scheduling-dump-statistics-file=${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch1_module_2fe6a4ac93d74732ebabbd87e84e6ec56d2aeb9f6a94f5b595f74f947498c579/scheduling_stats.json"
+  FRIENDLY_NAME
+    "Resnet50TFBatch1(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags,compile-stats]"
+  PUBLIC
+)
+
+iree_bytecode_module(
+  NAME
+    "iree-module-32dfb79e574ef282dd352a156620d44d545dda0f49ea1ac27394028d2ced46a8"
+  SRC
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch8_830d6f2775a6d19551826e6aa0bb4822feef6c5908023c81c8b02ef29fdb7a41.mlir"
+  MODULE_FILE_NAME
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch8_module_32dfb79e574ef282dd352a156620d44d545dda0f49ea1ac27394028d2ced46a8/module.vmfb"
+  FLAGS
+    "--iree-hal-target-backends=cuda"
+    "--iree-input-type=mhlo"
+    "--iree-hal-cuda-llvm-target-arch=sm_80"
+    "--iree-vm-emit-polyglot-zip=true"
+    "--iree-llvmcpu-debug-symbols=false"
+    "--iree-scheduling-dump-statistics-format=json"
+    "--iree-scheduling-dump-statistics-file=${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch8_module_32dfb79e574ef282dd352a156620d44d545dda0f49ea1ac27394028d2ced46a8/scheduling_stats.json"
+  FRIENDLY_NAME
+    "Resnet50TFBatch8(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags,compile-stats]"
+  PUBLIC
+)
+
+iree_bytecode_module(
+  NAME
+    "iree-module-3421bd5d457a3ed62e7aa74559afb7199105c1f60b65090a247ee3e6d9aec69c"
+  SRC
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch64_de3ec8610ae583fef77e064e7e93ca0c71ab93c471458e6538fa0f722ce64d25.mlir"
+  MODULE_FILE_NAME
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch64_module_3421bd5d457a3ed62e7aa74559afb7199105c1f60b65090a247ee3e6d9aec69c/module.vmfb"
+  FLAGS
+    "--iree-hal-target-backends=cuda"
+    "--iree-input-type=mhlo"
+    "--iree-hal-cuda-llvm-target-arch=sm_80"
+    "--iree-vm-emit-polyglot-zip=true"
+    "--iree-llvmcpu-debug-symbols=false"
+    "--iree-scheduling-dump-statistics-format=json"
+    "--iree-scheduling-dump-statistics-file=${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch64_module_3421bd5d457a3ed62e7aa74559afb7199105c1f60b65090a247ee3e6d9aec69c/scheduling_stats.json"
+  FRIENDLY_NAME
+    "Resnet50TFBatch64(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags,compile-stats]"
+  PUBLIC
+)
+
+iree_bytecode_module(
+  NAME
+    "iree-module-5663bccd89aa0616691d5b804b337c8989a2ca94c1bf2673d41dbbab9e511f20"
+  SRC
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch128_4ddc42daca54a45b7c253bb2eb9020408caff7af1eeac74e66824690358fb618.mlir"
+  MODULE_FILE_NAME
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch128_module_5663bccd89aa0616691d5b804b337c8989a2ca94c1bf2673d41dbbab9e511f20/module.vmfb"
+  FLAGS
+    "--iree-hal-target-backends=cuda"
+    "--iree-input-type=mhlo"
+    "--iree-hal-cuda-llvm-target-arch=sm_80"
+    "--iree-vm-emit-polyglot-zip=true"
+    "--iree-llvmcpu-debug-symbols=false"
+    "--iree-scheduling-dump-statistics-format=json"
+    "--iree-scheduling-dump-statistics-file=${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch128_module_5663bccd89aa0616691d5b804b337c8989a2ca94c1bf2673d41dbbab9e511f20/scheduling_stats.json"
+  FRIENDLY_NAME
+    "Resnet50TFBatch128(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags,compile-stats]"
+  PUBLIC
+)
+
+iree_bytecode_module(
+  NAME
+    "iree-module-353a915204b8eb401d0caac543588920cfab76f826463394cc1e99546cab13bb"
+  SRC
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch256_96010ccd86d78b4f59d4cb1ed29d9077700cb3690bdacbd89792cd0645d5975b.mlir"
+  MODULE_FILE_NAME
+    "${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch256_module_353a915204b8eb401d0caac543588920cfab76f826463394cc1e99546cab13bb/module.vmfb"
+  FLAGS
+    "--iree-hal-target-backends=cuda"
+    "--iree-input-type=mhlo"
+    "--iree-hal-cuda-llvm-target-arch=sm_80"
+    "--iree-vm-emit-polyglot-zip=true"
+    "--iree-llvmcpu-debug-symbols=false"
+    "--iree-scheduling-dump-statistics-format=json"
+    "--iree-scheduling-dump-statistics-file=${ROOT_ARTIFACTS_DIR}/iree_Resnet50TFBatch256_module_353a915204b8eb401d0caac543588920cfab76f826463394cc1e99546cab13bb/scheduling_stats.json"
+  FRIENDLY_NAME
+    "Resnet50TFBatch256(tf_v2) [cuda-sm_80-linux_gnu-cuda][default-flags,compile-stats]"
+  PUBLIC
+)
+
+iree_bytecode_module(
+  NAME
     "iree-module-16b5b80aaf1271b5ad782570340cc0c7c1c97e10b7e6c6cc6e5f3ede8393cb6c"
   SRC
     "${ROOT_ARTIFACTS_DIR}/iree_DeepLabV3_fp32_05c50f54ffea1fce722d07588e7de026ce10324eccc5d83d1eac2c5a9f5d639d.mlir"
@@ -4908,7 +5104,7 @@
   ${PACKAGE_NAME}_iree-imported-model-3bda9f3a5eb6a0fd3adc80187495d7ab840e409f379c70e3fd687934fafdd3b6
   ${PACKAGE_NAME}_iree-imported-model-213fe9a8738a01f2b02b6f0614a40a31c83a2603ca3e3ae0aeab8090fedbe3a0
   ${PACKAGE_NAME}_iree-imported-model-d4a10c6d3e8a11d808baf398822ea8b61be07673517ff9be30fbe199b7fdd960
-  ${PACKAGE_NAME}_iree-imported-model-a122dabcac56c201a4c98d3474265f15adba14bff88353f421b1a11cadcdea1f
+  ${PACKAGE_NAME}_iree-imported-model-b697188baed496a2e596578a28633d58a583aa970264235992ca623861483635
   ${PACKAGE_NAME}_model-cc474102-7d2f-4ec1-92ae-84e83ba0f390
   ${PACKAGE_NAME}_iree-imported-model-a3a701aaac95a47e7e0c1875793fbe88c976864cac611ccdf7d373d43d670225
   ${PACKAGE_NAME}_iree-imported-model-2494ed4b5c065c4a78b03d46161d4c9cccef27edf9568170c7dd2158281fe697
@@ -4923,6 +5119,10 @@
   ${PACKAGE_NAME}_model-fd05da43-5e37-4fa0-88f8-3ceec1682345-batch-8
   ${PACKAGE_NAME}_model-9a9515c7-cb68-4c34-b1d2-0e8c0a3620b8
   ${PACKAGE_NAME}_model-340553d1-e6fe-41b6-b2c7-687c74ccec56
+  ${PACKAGE_NAME}_iree-imported-model-830d6f2775a6d19551826e6aa0bb4822feef6c5908023c81c8b02ef29fdb7a41
+  ${PACKAGE_NAME}_iree-imported-model-de3ec8610ae583fef77e064e7e93ca0c71ab93c471458e6538fa0f722ce64d25
+  ${PACKAGE_NAME}_iree-imported-model-4ddc42daca54a45b7c253bb2eb9020408caff7af1eeac74e66824690358fb618
+  ${PACKAGE_NAME}_iree-imported-model-96010ccd86d78b4f59d4cb1ed29d9077700cb3690bdacbd89792cd0645d5975b
 )
 
 add_dependencies(iree-benchmark-suites
@@ -4940,7 +5140,7 @@
   ${PACKAGE_NAME}_iree-module-78154d58dddac432100d656b22fa9bcb45e4207a9ea2bc371bf089a68bad397a
   ${PACKAGE_NAME}_iree-module-c8949024e2472bec7b18c4e3757412715c248273005ca6f8d5769656ed425a84
   ${PACKAGE_NAME}_iree-module-a30b64a3d7850881ee9db94e8f75c661af3f76f48d10b3342a6912e1c8879252
-  ${PACKAGE_NAME}_iree-module-7a0add4835462bc66025022cdb6e87569da79cf103825a809863b8bd57a49055
+  ${PACKAGE_NAME}_iree-module-10a9a8a00ac0f36f45b5e35a032ca177b68621e3137dc57b9d6e08225301441b
   ${PACKAGE_NAME}_iree-module-047e75c462648b5fe1133f4ffbc3d1c7bdda154081d3eaa3be0b5445725b272b
   ${PACKAGE_NAME}_iree-module-1c7402f88ba881ec6abb39204faa4b5fedb2ffff4a6066555fcff0c7c4b74732
   ${PACKAGE_NAME}_iree-module-9c849d0ccfc89c0bca0740949572db8735832012a43c4c9f15c3a8ef0d9cca04
@@ -4968,12 +5168,16 @@
   ${PACKAGE_NAME}_iree-module-39556c12d84502be71243197b99b0f8c22949093c561c55c01ca906812a86288
   ${PACKAGE_NAME}_iree-module-04ca0a5077b7dd5ace66d803c9b822dff3428b24e7620a61995aff0907af9533
   ${PACKAGE_NAME}_iree-module-deafafd0926321a4b8e4dc73ed4a30b2ed9317d26488246461415be2ee857eb1
-  ${PACKAGE_NAME}_iree-module-fd81a89e9f8773bae142040775c7e3c4774f96b64f07f8d9f66b00191864ff40
   ${PACKAGE_NAME}_iree-module-bdd904cc5614ebf77609c7802a2dfc09f139aee2a247a247d10d320de72b0e28
   ${PACKAGE_NAME}_iree-module-45565cae821666fd34bca97be2e4cce3bd61e71308785728737d89acbb9bc9d2
   ${PACKAGE_NAME}_iree-module-c4b43b31944dbd567e48efacfdb33f707eb248538cf70fa7dcf1085c6c7dbd3f
   ${PACKAGE_NAME}_iree-module-c8ec2db5ee884e0af17814e61b13d7f7f1f2d4f7028e8c1920d0d968c27de2bb
   ${PACKAGE_NAME}_iree-module-9470c46965ea67794da45496454c82eade29b5a519d8037b1314738621e02260
+  ${PACKAGE_NAME}_iree-module-58f67edb580a13aa98d3fd05ad31f207d62462c9de990825656b19d2fbacda3c
+  ${PACKAGE_NAME}_iree-module-1655ee92ee23748886eef280d7d6f96c8a227fae7d6d78abd3466ce8614f16ba
+  ${PACKAGE_NAME}_iree-module-496bdea78d06c9c32d8deae84b088b0eb34f6d1982ac790e2580a50dec4281ae
+  ${PACKAGE_NAME}_iree-module-5aa3c57b33d354d5ebfe51626e2dac78485a548aa9bf2fba7a3e7a248f8b909e
+  ${PACKAGE_NAME}_iree-module-a52ef9f5a8c064a4cbf52ea82b694d2c0bb9a0dd43d3576131335ab5453b62a4
   ${PACKAGE_NAME}_iree-module-68f0eb37bb72d0d6605ecdf42691c64125960e122844b0beeae350871a445b1c
   ${PACKAGE_NAME}_iree-module-a7a1553d0739151f06bbc00a3ef8b67b0606463eab4b6607069aa94ea0bfd92f
   ${PACKAGE_NAME}_iree-module-e80d71ed8e86c0756226b2323e27e2c7c0fff8eddde59ba69e9222d36ee3eef6
@@ -5064,7 +5268,7 @@
   ${PACKAGE_NAME}_iree-module-78511a42a50f705b944437a040e1ee3bb5b2595a3b1d4db788586fe48f9a2453
   ${PACKAGE_NAME}_iree-module-01d35de2a55b9800e05151455eace0bf4493337ac1210fcc4904d630b075599a
   ${PACKAGE_NAME}_iree-module-2957930127e9b01e90ccddb7290e1c4b4abf6373cc36929809040e2c144d3fd7
-  ${PACKAGE_NAME}_iree-module-846b19afd4c14b3e71d59087c5a2987edd65753d39db432961ce915688d457ac
+  ${PACKAGE_NAME}_iree-module-ee88f7648c08cb775827b3afa89a2aaccd34eff697306c2b35598ba7e4c78df8
   ${PACKAGE_NAME}_iree-module-b5078b9d2031b69ec5ce9b775c8701cef73add8ebfb786d9189ca3fb6474cf73
   ${PACKAGE_NAME}_iree-module-ddd1657bc5433ccca5c8ce562f581626457a793670958cd8b4016c426191a9c4
   ${PACKAGE_NAME}_iree-module-8ee3c7b136703472b53bc8a19d8d28945aca93953612ccc65e55cd1b3dfda6c8
@@ -5092,12 +5296,16 @@
   ${PACKAGE_NAME}_iree-module-3b0ae1403ef444d812f0c7b37fda7311e2cc4ea407850ee7b91e6984b9c86100
   ${PACKAGE_NAME}_iree-module-25ad2815eb690276e9c2183aaafaf17a3df734bb6164071ad92dbf1e7faf7509
   ${PACKAGE_NAME}_iree-module-65586f1e5b51439dd951529c35fa9000a928f90039cc6cfb66d5c81d07a6c62b
-  ${PACKAGE_NAME}_iree-module-f770b1916e0b7a9a0b4aa9480791d21a46a352002ac1e38dfcea49ec0b63ed4e
   ${PACKAGE_NAME}_iree-module-8b19868be1c797cb585551c871c4171e78817e0efc49d30d91b9d722be283de9
   ${PACKAGE_NAME}_iree-module-c2085883b1f5c767f37508ab998a4bcd17d169fe6a5197d28e4dca8772c90253
   ${PACKAGE_NAME}_iree-module-88b6b5f712cd2f40d07a136e7f911c05b976c390e07f104c970292dee9a77e9a
   ${PACKAGE_NAME}_iree-module-925cdb19f2aa31a1907c81b5a9e179d91280c77b08a039c1cbf146f71683dde9
   ${PACKAGE_NAME}_iree-module-3c94ab45ad76bd8b2083729b65340b987da3247c854faf7d06431cb05a3b0a23
+  ${PACKAGE_NAME}_iree-module-2fe6a4ac93d74732ebabbd87e84e6ec56d2aeb9f6a94f5b595f74f947498c579
+  ${PACKAGE_NAME}_iree-module-32dfb79e574ef282dd352a156620d44d545dda0f49ea1ac27394028d2ced46a8
+  ${PACKAGE_NAME}_iree-module-3421bd5d457a3ed62e7aa74559afb7199105c1f60b65090a247ee3e6d9aec69c
+  ${PACKAGE_NAME}_iree-module-5663bccd89aa0616691d5b804b337c8989a2ca94c1bf2673d41dbbab9e511f20
+  ${PACKAGE_NAME}_iree-module-353a915204b8eb401d0caac543588920cfab76f826463394cc1e99546cab13bb
   ${PACKAGE_NAME}_iree-module-16b5b80aaf1271b5ad782570340cc0c7c1c97e10b7e6c6cc6e5f3ede8393cb6c
   ${PACKAGE_NAME}_iree-module-65fa033050b916e8143d44b5081ee45db3b1946a5d77de223328a7fe92a1cc66
   ${PACKAGE_NAME}_iree-module-16ef56b6869d10b17e983fec62e9f48e6bb87e9a348ab52a0b2faabca2b03578