Don't post benchmark comment by default (#12260)

diff --git a/.github/workflows/benchmark_execution.yml b/.github/workflows/benchmark_execution.yml
index eff04ac..de0b04d 100644
--- a/.github/workflows/benchmark_execution.yml
+++ b/.github/workflows/benchmark_execution.yml
@@ -33,10 +33,14 @@
         type: string
     outputs:
       benchmark-results-dir:
-        description: "Local path that stores all benchmark results."
+        description: |
+          Local path that stores all benchmark results.
+          Empty if no benchmark runs.
         value: ${{ jobs.run_benchmarks.outputs.benchmark-results-dir }}
       benchmark-results-gcs-artifact-dir:
-        description: "GCS path that stores all benchmark results."
+        description: |
+          GCS path that stores all benchmark results.
+          Empty if no benchmark runs.
         value: ${{ jobs.run_benchmarks.outputs.benchmark-results-gcs-artifact-dir }}
 
 env:
@@ -81,6 +85,7 @@
             >> "${GITHUB_OUTPUT}"
       - name: "Uploading benchmark config"
         id: upload
+        if: steps.export.outputs.benchmark-matrix != '[]'
         env:
           BENCHMARK_CONFIG: ${{ steps.export.outputs.benchmark-config }}
           BENCHMARK_CONFIG_GCS_ARTIFACT: ${{ env.GCS_DIR }}/${{ steps.export.outputs.benchmark-config }}
@@ -92,6 +97,7 @@
 
   run_benchmarks:
     needs: [export_benchmark_config]
+    if: needs.export_benchmark_config.outputs.benchmark-matrix != '[]'
     strategy:
       # Matrix is dynamically generated by the job export_benchmark_config. So
       # we only runs the benchmarks specified in inputs.benchmark-presets.
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 713e400..b59ef9b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -889,7 +889,7 @@
 
   compilation_benchmarks:
     needs: [setup, build_e2e_test_artifacts]
-    if: needs.setup.outputs.should-run == 'true'
+    if: needs.setup.outputs.should-run == 'true' && needs.setup.outputs.benchmark-presets != ''
     runs-on:
       - self-hosted  # must come first
       - runner-group=${{ needs.setup.outputs.runner-group }}
@@ -977,13 +977,7 @@
 
   process_benchmark_results:
     needs: [setup, compilation_benchmarks, execution_benchmarks]
-    # execution_benchmarks is the optional dependency and skipped in presubmit
-    # if no benchmark is specified to run.
-    if: |
-      always() &&
-      needs.setup.outputs.should-run == 'true' &&
-      needs.compilation_benchmarks.result == 'success' &&
-      contains(fromJSON('["success", "skipped"]'), needs.execution_benchmarks.result)
+    if: needs.setup.outputs.should-run == 'true' && needs.setup.outputs.benchmark-presets != ''
     runs-on:
       - self-hosted  # must come first
       - runner-group=${{ needs.setup.outputs.runner-group }}
@@ -993,7 +987,9 @@
     env:
       COMPILE_STATS_RESULTS: ${{ needs.compilation_benchmarks.outputs.compile-stats-results }}
       COMPILE_STATS_RESULTS_GCS_ARTIFACT: ${{ needs.compilation_benchmarks.outputs.compile-stats-results-gcs-artifact }}
+      # Empty if no execution benchmark runs.
       EXECUTION_BENCHMARK_RESULTS_DIR: ${{ needs.execution_benchmarks.outputs.benchmark-results-dir }}
+      # Empty if no execution benchmark runs.
       EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR: ${{ needs.execution_benchmarks.outputs.benchmark-results-gcs-artifact-dir }}
     steps:
       - name: "Checking out repository"
@@ -1008,7 +1004,10 @@
             "${COMPILE_STATS_RESULTS}"
       - name: Downloading execution benchmark results
         id: download-execution-results
-        if: needs.execution_benchmarks.result == 'success'
+        # Skip the download if there is no execution benchmark results (e.g. no
+        # benchmark matches the preset/filter). In such case, no benchmark job
+        # is run in benchmark_execution.yml and the output variables are empty.
+        if: env.EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR != ''
         run: |
           gcloud storage cp -r \
             "${EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR}/benchmark-results-*.json" \
diff --git a/build_tools/benchmarks/export_benchmark_config.py b/build_tools/benchmarks/export_benchmark_config.py
index 0a153a0..315dbdf 100755
--- a/build_tools/benchmarks/export_benchmark_config.py
+++ b/build_tools/benchmarks/export_benchmark_config.py
@@ -63,6 +63,9 @@
         (config.target_device_spec.architecture.type == common_definitions.
          ArchitectureType.GPU and config.target_device_spec.host_environment.
          platform == "android"),
+    # Not a preset for execution benchmarks.
+    "comp-stats":
+        lambda _config: False,
 }
 
 
diff --git a/build_tools/github_actions/configure_ci.py b/build_tools/github_actions/configure_ci.py
index f125401..762ea6e 100755
--- a/build_tools/github_actions/configure_ci.py
+++ b/build_tools/github_actions/configure_ci.py
@@ -61,7 +61,7 @@
 RUNNER_ENV_DEFAULT = "prod"
 RUNNER_ENV_OPTIONS = [RUNNER_ENV_DEFAULT, "testing"]
 
-BENCHMARK_PRESET_OPTIONS = ["all", "cuda", "x86_64"]
+BENCHMARK_PRESET_OPTIONS = ["all", "cuda", "x86_64", "comp-stats"]
 
 
 def skip_path(path: str) -> bool: