Use Black to format Python files (#14161)

Switch from yapf to Black to better align with the LLVM and broader
Python community. I decided not to go with Pyink as it seems much less
popular and differs in formatting style beyond indentation.

-  Reformat all python files outside of `third_party` with black.
- Update the lint workflow to use black. This only considers files
modified by the PR.
-  Delete old dotfiles.

The command used to reformat all files at once:
```shell
fd -e py --exclude third_party | xargs black
```

To learn more about Back, see: https://black.readthedocs.io/en/stable/
and https://github.com/psf/black.

In the next PR, once the commit SHA of this PR is finalized, I plan to
add this commit to `.git-blame-ignore-revs` to keep the blame history
clean.

Issue: https://github.com/openxla/iree/issues/14135
diff --git a/build_tools/bazel_to_cmake/bazel_to_cmake.py b/build_tools/bazel_to_cmake/bazel_to_cmake.py
index 44e2b8a..efb9529 100755
--- a/build_tools/bazel_to_cmake/bazel_to_cmake.py
+++ b/build_tools/bazel_to_cmake/bazel_to_cmake.py
@@ -59,8 +59,8 @@
 repo_cfg = None
 
 EDIT_BLOCKING_PATTERN = re.compile(
-    r"bazel[\s_]*to[\s_]*cmake[\s_]*:?[\s_]*do[\s_]*not[\s_]*edit",
-    flags=re.IGNORECASE)
+    r"bazel[\s_]*to[\s_]*cmake[\s_]*:?[\s_]*do[\s_]*not[\s_]*edit", flags=re.IGNORECASE
+)
 
 PRESERVE_ABOVE_TAG = "### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_ABOVE_THIS_LINE ###"
 PRESERVE_BELOW_TAG = "### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###"
@@ -69,274 +69,299 @@
 
 
 class Status(Enum):
-  UPDATED = 1
-  NOOP = 2
-  FAILED = 3
-  SKIPPED = 4
-  NO_BUILD_FILE = 5
+    UPDATED = 1
+    NOOP = 2
+    FAILED = 3
+    SKIPPED = 4
+    NO_BUILD_FILE = 5
 
 
 def parse_arguments():
-  parser = argparse.ArgumentParser(
-      description="Bazel to CMake conversion helper.")
-  parser.add_argument("--preview",
-                      help="Prints results instead of writing files",
-                      action="store_true",
-                      default=False)
-  parser.add_argument(
-      "--allow_partial_conversion",
-      help="Generates partial files, ignoring errors during conversion.",
-      action="store_true",
-      default=False)
-  parser.add_argument(
-      "--verbosity",
-      "-v",
-      type=int,
-      default=0,
-      help="Specify verbosity level where higher verbosity emits more logging."
-      " 0 (default): Only output errors and summary statistics."
-      " 1: Also output the name of each directory as it's being processed and"
-      " whether the directory is skipped."
-      " 2: Also output when conversion was successful.")
+    parser = argparse.ArgumentParser(description="Bazel to CMake conversion helper.")
+    parser.add_argument(
+        "--preview",
+        help="Prints results instead of writing files",
+        action="store_true",
+        default=False,
+    )
+    parser.add_argument(
+        "--allow_partial_conversion",
+        help="Generates partial files, ignoring errors during conversion.",
+        action="store_true",
+        default=False,
+    )
+    parser.add_argument(
+        "--verbosity",
+        "-v",
+        type=int,
+        default=0,
+        help="Specify verbosity level where higher verbosity emits more logging."
+        " 0 (default): Only output errors and summary statistics."
+        " 1: Also output the name of each directory as it's being processed and"
+        " whether the directory is skipped."
+        " 2: Also output when conversion was successful.",
+    )
 
-  # Specify only one of these (defaults to --root_dir=<main source dirs>).
-  group = parser.add_mutually_exclusive_group()
-  group.add_argument("--dir",
-                     help="Converts the BUILD file in the given directory",
-                     default=None)
-  default_root_dirs = (repo_cfg.DEFAULT_ROOT_DIRS if hasattr(
-      repo_cfg, "DEFAULT_ROOT_DIRS") else [])
-  group.add_argument("--root_dir",
-                     nargs="+",
-                     help="Converts all BUILD files under a root directory",
-                     default=default_root_dirs)
+    # Specify only one of these (defaults to --root_dir=<main source dirs>).
+    group = parser.add_mutually_exclusive_group()
+    group.add_argument(
+        "--dir", help="Converts the BUILD file in the given directory", default=None
+    )
+    default_root_dirs = (
+        repo_cfg.DEFAULT_ROOT_DIRS if hasattr(repo_cfg, "DEFAULT_ROOT_DIRS") else []
+    )
+    group.add_argument(
+        "--root_dir",
+        nargs="+",
+        help="Converts all BUILD files under a root directory",
+        default=default_root_dirs,
+    )
 
-  args = parser.parse_args()
+    args = parser.parse_args()
 
-  # --dir takes precedence over --root_dir.
-  # They are mutually exclusive, but the default value is still set.
-  if args.dir:
-    args.root_dir = None
+    # --dir takes precedence over --root_dir.
+    # They are mutually exclusive, but the default value is still set.
+    if args.dir:
+        args.root_dir = None
 
-  return args
+    return args
 
 
 def setup_environment():
-  """Sets up some environment globals."""
-  global repo_root
-  global repo_cfg
+    """Sets up some environment globals."""
+    global repo_root
+    global repo_cfg
 
-  # Scan up the directory tree for a repo config file.
-  check_dir = os.getcwd()
-  while not os.path.exists(os.path.join(check_dir, REPO_CFG_FILE)):
-    new_check_dir = os.path.dirname(check_dir)
-    if not new_check_dir or new_check_dir == check_dir:
-      print(f"ERROR: Could not find {REPO_CFG_FILE} in a parent directory "
-            f"of {os.getcwd()}")
-      sys.exit(1)
-    check_dir = new_check_dir
-  repo_root = check_dir
-  log(f"Using repo root {repo_root}")
+    # Scan up the directory tree for a repo config file.
+    check_dir = os.getcwd()
+    while not os.path.exists(os.path.join(check_dir, REPO_CFG_FILE)):
+        new_check_dir = os.path.dirname(check_dir)
+        if not new_check_dir or new_check_dir == check_dir:
+            print(
+                f"ERROR: Could not find {REPO_CFG_FILE} in a parent directory "
+                f"of {os.getcwd()}"
+            )
+            sys.exit(1)
+        check_dir = new_check_dir
+    repo_root = check_dir
+    log(f"Using repo root {repo_root}")
 
-  # Dynamically load the config file as a module.
-  orig_dont_write_bytecode = sys.dont_write_bytecode
-  sys.dont_write_bytecode = True  # Don't generate __pycache__ dir
-  repo_cfg_path = os.path.join(repo_root, REPO_CFG_FILE)
-  spec = importlib.util.spec_from_file_location(REPO_CFG_MODULE_NAME,
-                                                repo_cfg_path)
-  if spec and spec.loader:
-    repo_cfg = importlib.util.module_from_spec(spec)
-    sys.modules[REPO_CFG_MODULE_NAME] = repo_cfg
-    spec.loader.exec_module(repo_cfg)
-    sys.dont_write_bytecode = orig_dont_write_bytecode
-  else:
-    print(f"INTERNAL ERROR: Could not evaluate {repo_cfg_path} as module")
-    sys.exit(1)
+    # Dynamically load the config file as a module.
+    orig_dont_write_bytecode = sys.dont_write_bytecode
+    sys.dont_write_bytecode = True  # Don't generate __pycache__ dir
+    repo_cfg_path = os.path.join(repo_root, REPO_CFG_FILE)
+    spec = importlib.util.spec_from_file_location(REPO_CFG_MODULE_NAME, repo_cfg_path)
+    if spec and spec.loader:
+        repo_cfg = importlib.util.module_from_spec(spec)
+        sys.modules[REPO_CFG_MODULE_NAME] = repo_cfg
+        spec.loader.exec_module(repo_cfg)
+        sys.dont_write_bytecode = orig_dont_write_bytecode
+    else:
+        print(f"INTERNAL ERROR: Could not evaluate {repo_cfg_path} as module")
+        sys.exit(1)
 
 
 def repo_relpath(path):
-  return os.path.relpath(path, repo_root).replace("\\", "/")
+    return os.path.relpath(path, repo_root).replace("\\", "/")
 
 
 def log(string, *args, indent=0, **kwargs):
-  print(textwrap.indent(string, prefix=(indent * " ")),
-        *args,
-        **kwargs,
-        file=sys.stderr)
+    print(
+        textwrap.indent(string, prefix=(indent * " ")), *args, **kwargs, file=sys.stderr
+    )
 
 
-def convert_directories(directories, write_files, allow_partial_conversion,
-                        verbosity):
-  failure_dirs = []
-  skip_count = 0
-  success_count = 0
-  noop_count = 0
-  for directory in directories:
-    status = convert_directory(
-        directory,
-        write_files=write_files,
-        allow_partial_conversion=allow_partial_conversion,
-        verbosity=verbosity)
-    if status == Status.FAILED:
-      failure_dirs.append(repo_relpath(directory))
-    elif status == Status.SKIPPED:
-      skip_count += 1
-    elif status == Status.UPDATED:
-      success_count += 1
-    elif status == Status.NOOP:
-      noop_count += 1
+def convert_directories(directories, write_files, allow_partial_conversion, verbosity):
+    failure_dirs = []
+    skip_count = 0
+    success_count = 0
+    noop_count = 0
+    for directory in directories:
+        status = convert_directory(
+            directory,
+            write_files=write_files,
+            allow_partial_conversion=allow_partial_conversion,
+            verbosity=verbosity,
+        )
+        if status == Status.FAILED:
+            failure_dirs.append(repo_relpath(directory))
+        elif status == Status.SKIPPED:
+            skip_count += 1
+        elif status == Status.UPDATED:
+            success_count += 1
+        elif status == Status.NOOP:
+            noop_count += 1
 
-  log(f"{success_count} CMakeLists.txt files were updated, {skip_count} were"
-      f" skipped, and {noop_count} required no change.")
-  if failure_dirs:
-    log(f"ERROR: Encountered unexpected errors converting {len(failure_dirs)}"
-        " directories:")
-    log("\n".join(failure_dirs), indent=2)
-    sys.exit(1)
-
-
-def convert_directory(directory_path, write_files, allow_partial_conversion,
-                      verbosity):
-  if not os.path.isdir(directory_path):
-    raise FileNotFoundError(f"Cannot find directory '{directory_path}'")
-
-  rel_dir_path = repo_relpath(directory_path)
-  if verbosity >= 1:
-    log(f"Processing {rel_dir_path}")
-
-  # Scan for a BUILD file.
-  build_file_found = False
-  build_file_basenames = ["BUILD", "BUILD.bazel"]
-  for build_file_basename in build_file_basenames:
-    build_file_path = os.path.join(directory_path, build_file_basename)
-
-    rel_build_file_path = repo_relpath(build_file_path)
-    if os.path.isfile(build_file_path):
-      build_file_found = True
-      break
-  cmakelists_file_path = os.path.join(directory_path, "CMakeLists.txt")
-  rel_cmakelists_file_path = repo_relpath(cmakelists_file_path)
-
-  if not build_file_found:
-    return Status.NO_BUILD_FILE
-
-  autogeneration_tag = f"Autogenerated by {repo_relpath(os.path.abspath(__file__))}"
-
-  header = "\n".join(["#" * 80] + [
-      l.ljust(79) + "#" for l in [
-          f"# {autogeneration_tag} from",
-          f"# {rel_build_file_path}",
-          "#",
-          "# Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary",
-          "# CMake-only content.",
-          "#",
-          f"# To disable autogeneration for this file entirely, delete this header.",
-      ]
-  ] + ["#" * 80])
-
-  old_lines = []
-  possible_preserved_header_lines = []
-  preserved_footer_lines = ["\n" + PRESERVE_BELOW_TAG + "\n"]
-
-  # Read CMakeLists.txt and check if it has the auto-generated header.
-  found_preserve_below_tag = False
-  found_preserve_above_tag = False
-  if os.path.isfile(cmakelists_file_path):
-    found_autogeneration_tag = False
-    with open(cmakelists_file_path) as f:
-      old_lines = f.readlines()
-
-    for line in old_lines:
-      if not found_preserve_above_tag:
-        possible_preserved_header_lines.append(line)
-      if not found_autogeneration_tag and autogeneration_tag in line:
-        found_autogeneration_tag = True
-      if not found_preserve_below_tag and PRESERVE_BELOW_TAG in line:
-        found_preserve_below_tag = True
-      elif not found_preserve_above_tag and PRESERVE_ABOVE_TAG in line:
-        found_preserve_above_tag = True
-      elif found_preserve_below_tag:
-        preserved_footer_lines.append(line)
-    if not found_autogeneration_tag:
-      if verbosity >= 1:
-        log(f"Skipped. Did not find autogeneration line.", indent=2)
-      return Status.SKIPPED
-  preserved_header = ("".join(possible_preserved_header_lines)
-                      if found_preserve_above_tag else "")
-  preserved_footer = "".join(preserved_footer_lines)
-
-  # Read the Bazel BUILD file and interpret it.
-  with open(build_file_path, "rt") as build_file:
-    build_file_contents = build_file.read()
-  if "bazel-to-cmake: skip" in build_file_contents:
-    return Status.SKIPPED
-  build_file_code = compile(build_file_contents, build_file_path, "exec")
-  try:
-    converted_build_file = bazel_to_cmake_converter.convert_build_file(
-        build_file_code,
-        repo_cfg=repo_cfg,
-        allow_partial_conversion=allow_partial_conversion)
-  except (NameError, NotImplementedError) as e:
     log(
-        f"ERROR generating {rel_dir_path}.\n"
-        f"Missing a rule handler in bazel_to_cmake_converter.py?\n"
-        f"Reason: `{type(e).__name__}: {e}`",
-        indent=2)
-    return Status.FAILED
-  except KeyError as e:
-    log(
-        f"ERROR generating {rel_dir_path}.\n"
-        f"Missing a conversion in bazel_to_cmake_targets.py?\n"
-        f"Reason: `{type(e).__name__}: {e}`",
-        indent=2)
-    return Status.FAILED
-  converted_content = (preserved_header + header + converted_build_file +
-                       preserved_footer)
-  if write_files:
-    with open(cmakelists_file_path, "wt") as cmakelists_file:
-      cmakelists_file.write(converted_content)
-  else:
-    print(converted_content, end="")
+        f"{success_count} CMakeLists.txt files were updated, {skip_count} were"
+        f" skipped, and {noop_count} required no change."
+    )
+    if failure_dirs:
+        log(
+            f"ERROR: Encountered unexpected errors converting {len(failure_dirs)}"
+            " directories:"
+        )
+        log("\n".join(failure_dirs), indent=2)
+        sys.exit(1)
 
-  if converted_content == "".join(old_lines):
+
+def convert_directory(directory_path, write_files, allow_partial_conversion, verbosity):
+    if not os.path.isdir(directory_path):
+        raise FileNotFoundError(f"Cannot find directory '{directory_path}'")
+
+    rel_dir_path = repo_relpath(directory_path)
+    if verbosity >= 1:
+        log(f"Processing {rel_dir_path}")
+
+    # Scan for a BUILD file.
+    build_file_found = False
+    build_file_basenames = ["BUILD", "BUILD.bazel"]
+    for build_file_basename in build_file_basenames:
+        build_file_path = os.path.join(directory_path, build_file_basename)
+
+        rel_build_file_path = repo_relpath(build_file_path)
+        if os.path.isfile(build_file_path):
+            build_file_found = True
+            break
+    cmakelists_file_path = os.path.join(directory_path, "CMakeLists.txt")
+    rel_cmakelists_file_path = repo_relpath(cmakelists_file_path)
+
+    if not build_file_found:
+        return Status.NO_BUILD_FILE
+
+    autogeneration_tag = f"Autogenerated by {repo_relpath(os.path.abspath(__file__))}"
+
+    header = "\n".join(
+        ["#" * 80]
+        + [
+            l.ljust(79) + "#"
+            for l in [
+                f"# {autogeneration_tag} from",
+                f"# {rel_build_file_path}",
+                "#",
+                "# Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary",
+                "# CMake-only content.",
+                "#",
+                f"# To disable autogeneration for this file entirely, delete this header.",
+            ]
+        ]
+        + ["#" * 80]
+    )
+
+    old_lines = []
+    possible_preserved_header_lines = []
+    preserved_footer_lines = ["\n" + PRESERVE_BELOW_TAG + "\n"]
+
+    # Read CMakeLists.txt and check if it has the auto-generated header.
+    found_preserve_below_tag = False
+    found_preserve_above_tag = False
+    if os.path.isfile(cmakelists_file_path):
+        found_autogeneration_tag = False
+        with open(cmakelists_file_path) as f:
+            old_lines = f.readlines()
+
+        for line in old_lines:
+            if not found_preserve_above_tag:
+                possible_preserved_header_lines.append(line)
+            if not found_autogeneration_tag and autogeneration_tag in line:
+                found_autogeneration_tag = True
+            if not found_preserve_below_tag and PRESERVE_BELOW_TAG in line:
+                found_preserve_below_tag = True
+            elif not found_preserve_above_tag and PRESERVE_ABOVE_TAG in line:
+                found_preserve_above_tag = True
+            elif found_preserve_below_tag:
+                preserved_footer_lines.append(line)
+        if not found_autogeneration_tag:
+            if verbosity >= 1:
+                log(f"Skipped. Did not find autogeneration line.", indent=2)
+            return Status.SKIPPED
+    preserved_header = (
+        "".join(possible_preserved_header_lines) if found_preserve_above_tag else ""
+    )
+    preserved_footer = "".join(preserved_footer_lines)
+
+    # Read the Bazel BUILD file and interpret it.
+    with open(build_file_path, "rt") as build_file:
+        build_file_contents = build_file.read()
+    if "bazel-to-cmake: skip" in build_file_contents:
+        return Status.SKIPPED
+    build_file_code = compile(build_file_contents, build_file_path, "exec")
+    try:
+        converted_build_file = bazel_to_cmake_converter.convert_build_file(
+            build_file_code,
+            repo_cfg=repo_cfg,
+            allow_partial_conversion=allow_partial_conversion,
+        )
+    except (NameError, NotImplementedError) as e:
+        log(
+            f"ERROR generating {rel_dir_path}.\n"
+            f"Missing a rule handler in bazel_to_cmake_converter.py?\n"
+            f"Reason: `{type(e).__name__}: {e}`",
+            indent=2,
+        )
+        return Status.FAILED
+    except KeyError as e:
+        log(
+            f"ERROR generating {rel_dir_path}.\n"
+            f"Missing a conversion in bazel_to_cmake_targets.py?\n"
+            f"Reason: `{type(e).__name__}: {e}`",
+            indent=2,
+        )
+        return Status.FAILED
+    converted_content = (
+        preserved_header + header + converted_build_file + preserved_footer
+    )
+    if write_files:
+        with open(cmakelists_file_path, "wt") as cmakelists_file:
+            cmakelists_file.write(converted_content)
+    else:
+        print(converted_content, end="")
+
+    if converted_content == "".join(old_lines):
+        if verbosity >= 2:
+            log(f"{rel_cmakelists_file_path} required no update", indent=2)
+        return Status.NOOP
+
     if verbosity >= 2:
-      log(f"{rel_cmakelists_file_path} required no update", indent=2)
-    return Status.NOOP
-
-  if verbosity >= 2:
-    log(
-        f"Successfly generated {rel_cmakelists_file_path}"
-        f" from {rel_build_file_path}",
-        indent=2)
-  return Status.UPDATED
+        log(
+            f"Successfly generated {rel_cmakelists_file_path}"
+            f" from {rel_build_file_path}",
+            indent=2,
+        )
+    return Status.UPDATED
 
 
 def main(args):
-  """Runs Bazel to CMake conversion."""
-  global repo_root
+    """Runs Bazel to CMake conversion."""
+    global repo_root
 
-  write_files = not args.preview
+    write_files = not args.preview
 
-  if args.root_dir:
-    for root_dir in args.root_dir:
-      root_directory_path = os.path.join(repo_root, root_dir)
-      log(f"Converting directory tree rooted at: {root_directory_path}")
-      convert_directories(
-          (root for root, _, _ in os.walk(root_directory_path)),
-          write_files=write_files,
-          allow_partial_conversion=args.allow_partial_conversion,
-          verbosity=args.verbosity)
-  elif args.dir:
-    convert_directories([os.path.join(repo_root, args.dir)],
-                        write_files=write_files,
-                        allow_partial_conversion=args.allow_partial_conversion,
-                        verbosity=args.verbosity)
-  else:
-    log(f"ERROR: None of --root-dir, --dir arguments or DEFAULT_ROOT_DIRS in "
-        f".bazel_to_cmake.cfg.py: No conversion will be done")
-    sys.exit(1)
+    if args.root_dir:
+        for root_dir in args.root_dir:
+            root_directory_path = os.path.join(repo_root, root_dir)
+            log(f"Converting directory tree rooted at: {root_directory_path}")
+            convert_directories(
+                (root for root, _, _ in os.walk(root_directory_path)),
+                write_files=write_files,
+                allow_partial_conversion=args.allow_partial_conversion,
+                verbosity=args.verbosity,
+            )
+    elif args.dir:
+        convert_directories(
+            [os.path.join(repo_root, args.dir)],
+            write_files=write_files,
+            allow_partial_conversion=args.allow_partial_conversion,
+            verbosity=args.verbosity,
+        )
+    else:
+        log(
+            f"ERROR: None of --root-dir, --dir arguments or DEFAULT_ROOT_DIRS in "
+            f".bazel_to_cmake.cfg.py: No conversion will be done"
+        )
+        sys.exit(1)
 
 
 if __name__ == "__main__":
-  setup_environment()
-  main(parse_arguments())
+    setup_environment()
+    main(parse_arguments())
diff --git a/build_tools/bazel_to_cmake/bazel_to_cmake_converter.py b/build_tools/bazel_to_cmake/bazel_to_cmake_converter.py
index 023dc4d..f6d6cb5 100644
--- a/build_tools/bazel_to_cmake/bazel_to_cmake_converter.py
+++ b/build_tools/bazel_to_cmake/bazel_to_cmake_converter.py
@@ -20,875 +20,921 @@
 
 
 class BuildFileFunctions(object):
-  """Object passed to `exec` that has handlers for BUILD file functions."""
+    """Object passed to `exec` that has handlers for BUILD file functions."""
 
-  def __init__(self, *, converter: "Converter",
-               targets: bazel_to_cmake_targets.TargetConverter):
-    self._converter = converter
-    self._targets = targets
-    self._custom_initialize()
+    def __init__(
+        self, *, converter: "Converter", targets: bazel_to_cmake_targets.TargetConverter
+    ):
+        self._converter = converter
+        self._targets = targets
+        self._custom_initialize()
 
-  def _custom_initialize(self):
-    pass
+    def _custom_initialize(self):
+        pass
 
-  # ------------------------------------------------------------------------- #
-  # Conversion utilities, written to reduce boilerplate and allow for reuse   #
-  # between similar rule conversions (e.g. cc_library and cc_binary).         #
-  # ------------------------------------------------------------------------- #
+    # ------------------------------------------------------------------------- #
+    # Conversion utilities, written to reduce boilerplate and allow for reuse   #
+    # between similar rule conversions (e.g. cc_library and cc_binary).         #
+    # ------------------------------------------------------------------------- #
 
-  def _expand_cmake_var(self, var):
-    return "${" + var + "}"
+    def _expand_cmake_var(self, var):
+        return "${" + var + "}"
 
-  def _convert_string_arg_block(self, name, value, quote=True):
-    #  NAME
-    #    "value"
-    if value is None:
-      return ""
-    if quote:
-      return f'  {name}\n    "{value}"\n'
-    else:
-      return f"  {name}\n    {value}\n"
+    def _convert_string_arg_block(self, name, value, quote=True):
+        #  NAME
+        #    "value"
+        if value is None:
+            return ""
+        if quote:
+            return f'  {name}\n    "{value}"\n'
+        else:
+            return f"  {name}\n    {value}\n"
 
-  # Match Bazel's timeout values
-  # https://docs.bazel.build/versions/main/test-encyclopedia.html
-  _timeout_map = {
-      "short": 60,
-      "moderate": 300,
-      "long": 900,
-      "eternal": 3600,
-  }
+    # Match Bazel's timeout values
+    # https://docs.bazel.build/versions/main/test-encyclopedia.html
+    _timeout_map = {
+        "short": 60,
+        "moderate": 300,
+        "long": 900,
+        "eternal": 3600,
+    }
 
-  def _should_skip_target(self, tags=None, **kwargs):
-    if tags and "skip-bazel_to_cmake" in tags:
-      return True
-    return False
+    def _should_skip_target(self, tags=None, **kwargs):
+        if tags and "skip-bazel_to_cmake" in tags:
+            return True
+        return False
 
-  def _convert_timeout_arg_block(self, name, value):
-    if value is None:
-      return ""
-    value = self._timeout_map[value]
-    return f"  {name}\n    {value}\n"
+    def _convert_timeout_arg_block(self, name, value):
+        if value is None:
+            return ""
+        value = self._timeout_map[value]
+        return f"  {name}\n    {value}\n"
 
-  def _convert_string_list_block(self, name, values, quote=True, sort=False):
-    # Note this deliberately distinguishes between an empty list (argument
-    # explicitly specified) and None (argument left as default).
-    if values is None:
-      return ""
+    def _convert_string_list_block(self, name, values, quote=True, sort=False):
+        # Note this deliberately distinguishes between an empty list (argument
+        # explicitly specified) and None (argument left as default).
+        if values is None:
+            return ""
 
-    if sort:
-      values = sorted(values)
+        if sort:
+            values = sorted(values)
 
-    if quote:
-      values_list = "\n".join([f'    "{v}"' for v in values])
-    else:
-      values_list = "\n".join([f"    {v}" for v in values])
+        if quote:
+            values_list = "\n".join([f'    "{v}"' for v in values])
+        else:
+            values_list = "\n".join([f"    {v}" for v in values])
 
-    return f"  {name}\n{values_list}\n"
+        return f"  {name}\n{values_list}\n"
 
-  def _convert_option_block(self, option, option_value):
-    if option_value:
-      # Note: this is a truthiness check as well as an existence check, e.g.
-      # Bazel `testonly = False` will be handled correctly by this condition.
-      return f"  {option}\n"
-    else:
-      return ""
+    def _convert_option_block(self, option, option_value):
+        if option_value:
+            # Note: this is a truthiness check as well as an existence check, e.g.
+            # Bazel `testonly = False` will be handled correctly by this condition.
+            return f"  {option}\n"
+        else:
+            return ""
 
-  def _convert_target_block(self, name, target):
-    if target is None:
-      return ""
+    def _convert_target_block(self, name, target):
+        if target is None:
+            return ""
 
-    # Convert the target name from its Bazel name to the corresponding CMake name.
-    # The specific conversion pattern depends on the target location. In general,
-    # Bazel targets are fully qualified and use slashes as delimiters, while
-    # targets in CMake are rooted on subtrees and use _ (with :: aliases).
-    cmake_aliases = self._targets.convert_target(target)
-    if len(cmake_aliases) != 1:
-      raise ValueError(
-          f"Expected a CMake alias from {target}. Got {cmake_aliases}")
-    target = cmake_aliases[0]
-    # Replace aliased :: target names with their explicit _ names.
-    target = target.replace("::", "_")
-    return self._convert_string_arg_block(name, target, quote=False)
+        # Convert the target name from its Bazel name to the corresponding CMake name.
+        # The specific conversion pattern depends on the target location. In general,
+        # Bazel targets are fully qualified and use slashes as delimiters, while
+        # targets in CMake are rooted on subtrees and use _ (with :: aliases).
+        cmake_aliases = self._targets.convert_target(target)
+        if len(cmake_aliases) != 1:
+            raise ValueError(
+                f"Expected a CMake alias from {target}. Got {cmake_aliases}"
+            )
+        target = cmake_aliases[0]
+        # Replace aliased :: target names with their explicit _ names.
+        target = target.replace("::", "_")
+        return self._convert_string_arg_block(name, target, quote=False)
 
-  def _convert_srcs_block(self, srcs):
-    if not srcs:
-      return ""
-    # Bazel allows srcs to reference targets in the current package (leading
-    # ':') or in other packages (leading '//'). We map that to paths by:
-    # - dropping any leading ':' as in:
-    #      ':generated.c' -> 'generated.c'
-    # - dropping any leading '//', and internal ':' by '/', as in:
-    #      '//path/to/package:generated.c' ->  'path/to/package/generated.c'
-    srcs = [s.lstrip('//').lstrip(':').replace(':', '/') for s in srcs]
+    def _convert_srcs_block(self, srcs):
+        if not srcs:
+            return ""
+        # Bazel allows srcs to reference targets in the current package (leading
+        # ':') or in other packages (leading '//'). We map that to paths by:
+        # - dropping any leading ':' as in:
+        #      ':generated.c' -> 'generated.c'
+        # - dropping any leading '//', and internal ':' by '/', as in:
+        #      '//path/to/package:generated.c' ->  'path/to/package/generated.c'
+        srcs = [s.lstrip("//").lstrip(":").replace(":", "/") for s in srcs]
 
-    return self._convert_string_list_block("SRCS", srcs, sort=True)
+        return self._convert_string_list_block("SRCS", srcs, sort=True)
 
-  def _convert_td_file_block(self, td_file):
-    if td_file.startswith("//iree"):
-      # TODO: This should be generalized for out of tree.
-      # Bazel `//iree/dir/td_file.td`
-      # -> CMake `${IREE_ROOT_DIR}/iree/dir/td_file.td
-      # Bazel `//iree/dir/IR:td_file.td`
-      # -> CMake `${IREE_ROOT_DIR}/iree/dir/IR/td_file.td
-      td_file = td_file.replace("//iree", "${IREE_ROOT_DIR}/iree")
-      td_file = td_file.replace(":", "/")
-    return self._convert_string_arg_block("TD_FILE", td_file)
+    def _convert_td_file_block(self, td_file):
+        if td_file.startswith("//iree"):
+            # TODO: This should be generalized for out of tree.
+            # Bazel `//iree/dir/td_file.td`
+            # -> CMake `${IREE_ROOT_DIR}/iree/dir/td_file.td
+            # Bazel `//iree/dir/IR:td_file.td`
+            # -> CMake `${IREE_ROOT_DIR}/iree/dir/IR/td_file.td
+            td_file = td_file.replace("//iree", "${IREE_ROOT_DIR}/iree")
+            td_file = td_file.replace(":", "/")
+        return self._convert_string_arg_block("TD_FILE", td_file)
 
-  def _convert_tbl_outs_block(self, tbl_outs):
-    outs_list = "\n".join(
-        [f"    {' '.join(flags)} {value}" for flags, value in tbl_outs])
-    return f"  OUTS\n{outs_list}\n"
-
-  def _convert_tblgen_block(self, tblgen):
-    if tblgen.endswith("iree-tblgen"):
-      return "  TBLGEN\n    IREE\n"
-    else:
-      return ""
-
-  def _convert_target(self, target):
-    """Returns a list of targets that correspond to the specified Bazel target.
-    Note that this must be a list because some targets have a one to many mapping.
-    """
-    return self._targets.convert_target(target)
-
-  def _convert_single_target(self, target):
-    replacement_targets = self._convert_target(target)
-    if len(replacement_targets) != 1:
-      raise RuntimeError(f"Expected single target replacement for {target},"
-                         f" but got multiple: {replacement_targets}")
-    return replacement_targets[0]
-
-  def _convert_single_target_block(self, name, target):
-    mapped_target = self._convert_single_target(target)
-    return self._convert_string_arg_block(name, mapped_target, quote=False)
-
-  def _convert_target_list_block(self, list_name, targets):
-    if targets is None:
-      return ""
-
-    #  DEPS
-    #    package1::target1
-    #    package1::target2
-    #    package2::target
-    targets = [self._convert_target(t) for t in targets]
-    # Flatten lists
-    targets = list(itertools.chain.from_iterable(targets))
-    # Remove duplicates
-    targets = set(targets)
-    # Remove Falsey (None and empty string) values
-    targets = filter(None, targets)
-
-    return self._convert_string_list_block(list_name,
-                                           targets,
-                                           sort=True,
-                                           quote=False)
-
-  def _convert_includes_block(self, includes):
-    if not includes:
-      return ""
-    dirs = []
-    for include in includes:
-      dirs.append("$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/%s>" %
-                  (include,))
-      dirs.append("$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/%s>" %
-                  (include,))
-    return self._convert_string_list_block("INCLUDES",
-                                           dirs,
-                                           sort=False,
-                                           quote=True)
-
-  def _convert_unimplemented_function(self, function, details=""):
-    message = f"Unimplemented {function}: {details}"
-    if not self._converter.first_error:
-      self._converter.first_error = NotImplementedError(message)
-    # Avoid submitting the raw results from non-strict runs. These are still
-    # useful but are generally not safe to submit as-is. An upstream check
-    # prevents changes with this phrase from being submitted.
-    # Written as separate literals to avoid the check triggering here.
-    submit_blocker = "DO" + " NOT" + " SUBMIT."
-    self._converter.body += f"# {submit_blocker} {message}\n"
-
-  # ------------------------------------------------------------------------- #
-  # Function handlers that convert BUILD definitions to CMake definitions.    #
-  #                                                                           #
-  # Names and signatures must match 1:1 with those expected in BUILD files    #
-  # except that default values for optional arguments should generally be     #
-  # `None` so we don't set them unnecessarily in the CMakeLists.txt files.    #
-  # Each function that may be found in a BUILD file must be listed here.      #
-  # ------------------------------------------------------------------------- #
-
-  # Functions with no mapping to CMake. Just ignore these.
-  def alias(self, *args, **kwargs):
-    pass
-
-  def bool_flag(self, *args, **kwargs):
-    pass
-
-  def load(self, *args, **kwargs):
-    pass
-
-  def package(self, **kwargs):
-    pass
-
-  def iree_build_test(self, **kwargs):
-    pass
-
-  def test_suite(self, **kwargs):
-    pass
-
-  def config_setting(self, **kwargs):
-    pass
-
-  def exports_files(self, *args, **kwargs):
-    pass
-
-  def iree_td_library(self, *args, **kwargs):
-    pass
-
-  # Technically we could do something with a CMake equivalent but we have no use
-  # case.
-  def py_binary(self, *args, **kwargs):
-    pass
-
-  def filegroup(self, name, **kwargs):
-    # Not implemented, but allowed for Bazel-only uses, such as declaring internal
-    # headers and other kinds of files that Bazel enforces but CMake doesn't care
-    # about. If we ever need to implement this, this might be a no-op, or may
-    # want to evaluate the srcs attribute and pass them along to any targets
-    # that depend on the filegroup.
-    # Cross-package dependencies and complicated globs could be hard to handle.
-    pass
-
-  def sh_binary(self, name, **kwargs):
-    if self._should_skip_target(**kwargs):
-      return
-    self._convert_unimplemented_function("sh_binary", name)
-
-  def enforce_glob(self, files, **kwargs):
-    return files
-
-  def glob(self, include, exclude=None, exclude_directories=1):
-    if exclude_directories != 1:
-      self._convert_unimplemented_function("glob", "with exclude_directories")
-    if exclude is None:
-      exclude = []
-
-    glob_vars = []
-    for pattern in include:
-      if "**" in pattern:
-        # bazel's glob has some specific restrictions about crossing package
-        # boundaries. We have no uses of recursive globs. Rather than try to
-        # emulate them or silently give different behavior, just error out.
-        # See https://docs.bazel.build/versions/master/be/functions.html#glob
-        raise NotImplementedError("Recursive globs not supported")
-      # Bazel `*.mlir` glob -> CMake Variable `_GLOB_X_MLIR`
-      var = "_GLOB_" + pattern.replace("*", "X").replace(".", "_").upper()
-      glob_vars.append(var)
-      self._converter.body += (
-          f"file(GLOB {var} LIST_DIRECTORIES false"
-          f" RELATIVE {self._expand_cmake_var('CMAKE_CURRENT_SOURCE_DIR')}"
-          f" CONFIGURE_DEPENDS {pattern})\n")
-    for pattern in exclude:
-      if "**" in pattern:
-        raise NotImplementedError("Recursive globs not supported")
-      exclude_var = ("_GLOB_" +
-                     pattern.replace("*", "X").replace(".", "_").upper())
-      self._converter.body += (
-          f"file(GLOB {exclude_var} LIST_DIRECTORIES false"
-          f" RELATIVE {self._expand_cmake_var('CMAKE_CURRENT_SOURCE_DIR')}"
-          f" CONFIGURE_DEPENDS {pattern})\n")
-      for glob_var in glob_vars:
-        self._converter.body += (
-            f"list(REMOVE_ITEM {glob_var} {self._expand_cmake_var(exclude_var)})\n"
+    def _convert_tbl_outs_block(self, tbl_outs):
+        outs_list = "\n".join(
+            [f"    {' '.join(flags)} {value}" for flags, value in tbl_outs]
         )
-    return [self._expand_cmake_var(var) for var in glob_vars]
+        return f"  OUTS\n{outs_list}\n"
 
-  # TODO(gcmn) implement these types of functions in a less hard-coded way
-  def platform_trampoline_deps(self, basename, path="base"):
-    return [f"//{path}/internal:{basename}_internal"]
+    def _convert_tblgen_block(self, tblgen):
+        if tblgen.endswith("iree-tblgen"):
+            return "  TBLGEN\n    IREE\n"
+        else:
+            return ""
 
-  def select(self, d):
-    self._convert_unimplemented_function("select", str(d))
-    return d["//conditions:default"]
+    def _convert_target(self, target):
+        """Returns a list of targets that correspond to the specified Bazel target.
+        Note that this must be a list because some targets have a one to many mapping.
+        """
+        return self._targets.convert_target(target)
 
-  def defaulting_select(self, selector):
-    """Defined in build_defs.oss.bzl as a scoped alternative to select."""
-    default_value = selector.get("//conditions:default")
-    if default_value is None:
-      raise ValueError("bazel_to_cmake can only convert selects with a default")
-    return default_value
+    def _convert_single_target(self, target):
+        replacement_targets = self._convert_target(target)
+        if len(replacement_targets) != 1:
+            raise RuntimeError(
+                f"Expected single target replacement for {target},"
+                f" but got multiple: {replacement_targets}"
+            )
+        return replacement_targets[0]
 
-  def cc_library(self,
-                 name,
-                 hdrs=None,
-                 textual_hdrs=None,
-                 srcs=None,
-                 copts=None,
-                 defines=None,
-                 data=None,
-                 deps=None,
-                 testonly=None,
-                 linkopts=None,
-                 includes=None,
-                 **kwargs):
-    if self._should_skip_target(**kwargs):
-      return
-    if linkopts:
-      self._convert_unimplemented_function("linkopts")
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    hdrs_block = self._convert_string_list_block("HDRS", hdrs, sort=True)
-    textual_hdrs_block = self._convert_string_list_block("TEXTUAL_HDRS",
-                                                         textual_hdrs,
-                                                         sort=True)
-    srcs_block = self._convert_srcs_block(srcs)
-    copts_block = self._convert_string_list_block("COPTS", copts, sort=False)
-    defines_block = self._convert_string_list_block("DEFINES", defines)
-    data_block = self._convert_target_list_block("DATA", data)
-    deps_block = self._convert_target_list_block("DEPS", deps)
-    testonly_block = self._convert_option_block("TESTONLY", testonly)
-    includes_block = self._convert_includes_block(includes)
+    def _convert_single_target_block(self, name, target):
+        mapped_target = self._convert_single_target(target)
+        return self._convert_string_arg_block(name, mapped_target, quote=False)
 
-    self._converter.body += (f"iree_cc_library(\n"
-                             f"{name_block}"
-                             f"{copts_block}"
-                             f"{hdrs_block}"
-                             f"{textual_hdrs_block}"
-                             f"{srcs_block}"
-                             f"{data_block}"
-                             f"{deps_block}"
-                             f"{defines_block}"
-                             f"{testonly_block}"
-                             f"{includes_block}"
-                             f"  PUBLIC\n)\n\n")
+    def _convert_target_list_block(self, list_name, targets):
+        if targets is None:
+            return ""
 
-  def iree_compiler_register_plugin(self, plugin_id, target):
-    plugin_id_block = self._convert_string_arg_block("PLUGIN_ID",
-                                                     plugin_id,
-                                                     quote=False)
-    target_block = self._convert_single_target_block("TARGET", target)
-    self._converter.body += (f"iree_compiler_register_plugin(\n"
-                             f"{plugin_id_block}"
-                             f"{target_block}"
-                             f")\n\n")
+        #  DEPS
+        #    package1::target1
+        #    package1::target2
+        #    package2::target
+        targets = [self._convert_target(t) for t in targets]
+        # Flatten lists
+        targets = list(itertools.chain.from_iterable(targets))
+        # Remove duplicates
+        targets = set(targets)
+        # Remove Falsey (None and empty string) values
+        targets = filter(None, targets)
 
-  def cc_test(self,
-              name,
-              hdrs=None,
-              srcs=None,
-              copts=None,
-              defines=None,
-              data=None,
-              deps=None,
-              timeout=None,
-              args=None,
-              tags=None,
-              includes=None,
-              **kwargs):
-    if self._should_skip_target(tags=tags, **kwargs):
-      return
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    hdrs_block = self._convert_string_list_block("HDRS", hdrs, sort=True)
-    srcs_block = self._convert_srcs_block(srcs)
-    copts_block = self._convert_string_list_block("COPTS", copts, sort=False)
-    defines_block = self._convert_string_list_block("DEFINES", defines)
-    data_block = self._convert_target_list_block("DATA", data)
-    deps_block = self._convert_target_list_block("DEPS", deps)
-    args_block = self._convert_string_list_block("ARGS", args)
-    labels_block = self._convert_string_list_block("LABELS", tags)
-    timeout_block = self._convert_timeout_arg_block("TIMEOUT", timeout)
-    includes_block = self._convert_includes_block(includes)
+        return self._convert_string_list_block(
+            list_name, targets, sort=True, quote=False
+        )
 
-    self._converter.body += (f"iree_cc_test(\n"
-                             f"{name_block}"
-                             f"{hdrs_block}"
-                             f"{srcs_block}"
-                             f"{copts_block}"
-                             f"{defines_block}"
-                             f"{data_block}"
-                             f"{deps_block}"
-                             f"{args_block}"
-                             f"{labels_block}"
-                             f"{timeout_block}"
-                             f"{includes_block}"
-                             f")\n\n")
+    def _convert_includes_block(self, includes):
+        if not includes:
+            return ""
+        dirs = []
+        for include in includes:
+            dirs.append(
+                "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/%s>" % (include,)
+            )
+            dirs.append(
+                "$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/%s>" % (include,)
+            )
+        return self._convert_string_list_block("INCLUDES", dirs, sort=False, quote=True)
 
-  def cc_binary(self,
-                name,
-                srcs=None,
-                data=None,
-                deps=None,
-                copts=None,
-                defines=None,
-                linkopts=None,
-                testonly=None,
-                includes=None,
-                **kwargs):
-    if self._should_skip_target(**kwargs):
-      return
-    if linkopts:
-      self._convert_unimplemented_function("linkopts")
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    copts_block = self._convert_string_list_block("COPTS", copts, sort=False)
-    defines_block = self._convert_string_list_block("DEFINES", defines)
-    srcs_block = self._convert_srcs_block(srcs)
-    data_block = self._convert_target_list_block("DATA", data)
-    deps_block = self._convert_target_list_block("DEPS", deps)
-    testonly_block = self._convert_option_block("TESTONLY", testonly)
-    includes_block = self._convert_includes_block(includes)
+    def _convert_unimplemented_function(self, function, details=""):
+        message = f"Unimplemented {function}: {details}"
+        if not self._converter.first_error:
+            self._converter.first_error = NotImplementedError(message)
+        # Avoid submitting the raw results from non-strict runs. These are still
+        # useful but are generally not safe to submit as-is. An upstream check
+        # prevents changes with this phrase from being submitted.
+        # Written as separate literals to avoid the check triggering here.
+        submit_blocker = "DO" + " NOT" + " SUBMIT."
+        self._converter.body += f"# {submit_blocker} {message}\n"
 
-    self._converter.body += (f"iree_cc_binary(\n"
-                             f"{name_block}"
-                             f"{srcs_block}"
-                             f"{copts_block}"
-                             f"{defines_block}"
-                             f"{data_block}"
-                             f"{deps_block}"
-                             f"{testonly_block}"
-                             f"{includes_block}"
-                             f")\n\n")
+    # ------------------------------------------------------------------------- #
+    # Function handlers that convert BUILD definitions to CMake definitions.    #
+    #                                                                           #
+    # Names and signatures must match 1:1 with those expected in BUILD files    #
+    # except that default values for optional arguments should generally be     #
+    # `None` so we don't set them unnecessarily in the CMakeLists.txt files.    #
+    # Each function that may be found in a BUILD file must be listed here.      #
+    # ------------------------------------------------------------------------- #
 
-  def c_embed_data(self,
-                   name,
-                   srcs,
-                   c_file_output,
-                   h_file_output,
-                   testonly=None,
-                   strip_prefix=None,
-                   flatten=None,
-                   identifier=None,
-                   deps=None,
-                   **kwargs):
-    if self._should_skip_target(**kwargs):
-      return
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    srcs_block = self._convert_srcs_block(srcs)
-    c_file_output_block = self._convert_string_arg_block(
-        "C_FILE_OUTPUT", c_file_output)
-    h_file_output_block = self._convert_string_arg_block(
-        "H_FILE_OUTPUT", h_file_output)
-    testonly_block = self._convert_option_block("TESTONLY", testonly)
-    identifier_block = self._convert_string_arg_block("IDENTIFIER", identifier)
-    flatten_block = self._convert_option_block("FLATTEN", flatten)
-    deps_block = self._convert_target_list_block("DEPS", deps)
+    # Functions with no mapping to CMake. Just ignore these.
+    def alias(self, *args, **kwargs):
+        pass
 
-    self._converter.body += (f"iree_c_embed_data(\n"
-                             f"{name_block}"
-                             f"{srcs_block}"
-                             f"{deps_block}"
-                             f"{c_file_output_block}"
-                             f"{h_file_output_block}"
-                             f"{identifier_block}"
-                             f"{testonly_block}"
-                             f"{flatten_block}"
-                             f"  PUBLIC\n)\n\n")
+    def bool_flag(self, *args, **kwargs):
+        pass
 
-  def iree_bitcode_library(self,
-                           name,
-                           arch,
-                           srcs,
-                           internal_hdrs=None,
-                           copts=None):
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    arch_block = self._convert_string_arg_block("ARCH", arch, quote=False)
-    srcs_block = self._convert_srcs_block(srcs)
-    copts_block = self._convert_string_list_block("COPTS", copts, sort=False)
+    def load(self, *args, **kwargs):
+        pass
 
-    self._converter.body += (f"iree_bitcode_library(\n"
-                             f"{name_block}"
-                             f"{arch_block}"
-                             f"{srcs_block}"
-                             f"{copts_block}"
-                             f")\n\n")
+    def package(self, **kwargs):
+        pass
 
-  def iree_link_bitcode(self, name, bitcode_files):
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    bitcode_files_block = self._convert_srcs_block(
-        [f.replace(":", "/") for f in bitcode_files])
+    def iree_build_test(self, **kwargs):
+        pass
 
-    self._converter.body += (f"iree_link_bitcode(\n"
-                             f"{name_block}"
-                             f"{bitcode_files_block}"
-                             f"\n)\n\n")
+    def test_suite(self, **kwargs):
+        pass
 
-  def iree_bytecode_module(self,
-                           name,
-                           src,
-                           module_name=None,
-                           flags=None,
-                           compile_tool=None,
-                           c_identifier=None,
-                           static_lib_path=None,
-                           deps=None,
-                           testonly=None):
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    src_block = self._convert_string_arg_block("SRC", src)
-    module_name_block = self._convert_string_arg_block("MODULE_FILE_NAME",
-                                                       module_name)
-    c_identifier_block = self._convert_string_arg_block("C_IDENTIFIER",
-                                                        c_identifier)
-    static_lib_block = self._convert_string_arg_block("STATIC_LIB_PATH",
-                                                      static_lib_path)
-    compile_tool_block = self._convert_target_block("COMPILE_TOOL",
-                                                    compile_tool)
-    flags_block = self._convert_string_list_block("FLAGS", flags)
-    deps_block = self._convert_target_list_block("DEPS", deps)
-    testonly_block = self._convert_option_block("TESTONLY", testonly)
+    def config_setting(self, **kwargs):
+        pass
 
-    self._converter.body += (f"iree_bytecode_module(\n"
-                             f"{name_block}"
-                             f"{src_block}"
-                             f"{module_name_block}"
-                             f"{c_identifier_block}"
-                             f"{compile_tool_block}"
-                             f"{static_lib_block}"
-                             f"{flags_block}"
-                             f"{deps_block}"
-                             f"{testonly_block}"
-                             f"  PUBLIC\n)\n\n")
+    def exports_files(self, *args, **kwargs):
+        pass
 
-  def iree_flatbuffer_c_library(self, name, srcs, flatcc_args=None):
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    srcs_block = self._convert_srcs_block(srcs)
-    flatcc_args_block = self._convert_string_list_block("FLATCC_ARGS",
-                                                        flatcc_args)
+    def iree_td_library(self, *args, **kwargs):
+        pass
 
-    self._converter.body += (f"flatbuffer_c_library(\n"
-                             f"{name_block}"
-                             f"{srcs_block}"
-                             f"{flatcc_args_block}"
-                             f"  PUBLIC\n)\n\n")
+    # Technically we could do something with a CMake equivalent but we have no use
+    # case.
+    def py_binary(self, *args, **kwargs):
+        pass
 
-  def gentbl_cc_library(self,
-                        name,
-                        tblgen,
-                        td_file,
-                        tbl_outs,
-                        td_srcs=None,
-                        deps=None,
-                        includes=None,
-                        strip_include_prefix=None,
-                        test=None):
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    tblgen_block = self._convert_tblgen_block(tblgen)
-    td_file_block = self._convert_td_file_block(td_file)
-    outs_block = self._convert_tbl_outs_block(tbl_outs)
+    def filegroup(self, name, **kwargs):
+        # Not implemented, but allowed for Bazel-only uses, such as declaring internal
+        # headers and other kinds of files that Bazel enforces but CMake doesn't care
+        # about. If we ever need to implement this, this might be a no-op, or may
+        # want to evaluate the srcs attribute and pass them along to any targets
+        # that depend on the filegroup.
+        # Cross-package dependencies and complicated globs could be hard to handle.
+        pass
 
-    self._converter.body += (f"iree_tablegen_library(\n"
-                             f"{name_block}"
-                             f"{td_file_block}"
-                             f"{outs_block}"
-                             f"{tblgen_block}"
-                             f")\n\n")
+    def sh_binary(self, name, **kwargs):
+        if self._should_skip_target(**kwargs):
+            return
+        self._convert_unimplemented_function("sh_binary", name)
 
-  def iree_gentbl_cc_library(self, **kwargs):
-    if self._should_skip_target(**kwargs):
-      return
-    # The bazel version of this rule adds some include directories and defs
-    # that are implicitly handled by the cmake version.
-    self.gentbl_cc_library(**kwargs)
+    def enforce_glob(self, files, **kwargs):
+        return files
 
-  def iree_tablegen_doc(self,
-                        name,
-                        tblgen,
-                        td_file,
-                        tbl_outs,
-                        td_srcs=None,
-                        includes=None,
-                        deps=None,
-                        test=None):
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    tblgen_block = self._convert_tblgen_block(tblgen)
-    td_file_block = self._convert_td_file_block(td_file)
-    outs_block = self._convert_tbl_outs_block(tbl_outs)
+    def glob(self, include, exclude=None, exclude_directories=1):
+        if exclude_directories != 1:
+            self._convert_unimplemented_function("glob", "with exclude_directories")
+        if exclude is None:
+            exclude = []
 
-    self._converter.body += (f"iree_tablegen_doc(\n"
-                             f"{name_block}"
-                             f"{td_file_block}"
-                             f"{outs_block}"
-                             f"{tblgen_block}"
-                             f")\n\n")
+        glob_vars = []
+        for pattern in include:
+            if "**" in pattern:
+                # bazel's glob has some specific restrictions about crossing package
+                # boundaries. We have no uses of recursive globs. Rather than try to
+                # emulate them or silently give different behavior, just error out.
+                # See https://docs.bazel.build/versions/master/be/functions.html#glob
+                raise NotImplementedError("Recursive globs not supported")
+            # Bazel `*.mlir` glob -> CMake Variable `_GLOB_X_MLIR`
+            var = "_GLOB_" + pattern.replace("*", "X").replace(".", "_").upper()
+            glob_vars.append(var)
+            self._converter.body += (
+                f"file(GLOB {var} LIST_DIRECTORIES false"
+                f" RELATIVE {self._expand_cmake_var('CMAKE_CURRENT_SOURCE_DIR')}"
+                f" CONFIGURE_DEPENDS {pattern})\n"
+            )
+        for pattern in exclude:
+            if "**" in pattern:
+                raise NotImplementedError("Recursive globs not supported")
+            exclude_var = "_GLOB_" + pattern.replace("*", "X").replace(".", "_").upper()
+            self._converter.body += (
+                f"file(GLOB {exclude_var} LIST_DIRECTORIES false"
+                f" RELATIVE {self._expand_cmake_var('CMAKE_CURRENT_SOURCE_DIR')}"
+                f" CONFIGURE_DEPENDS {pattern})\n"
+            )
+            for glob_var in glob_vars:
+                self._converter.body += f"list(REMOVE_ITEM {glob_var} {self._expand_cmake_var(exclude_var)})\n"
+        return [self._expand_cmake_var(var) for var in glob_vars]
 
-  def iree_lit_test_suite(self,
-                          name,
-                          srcs,
-                          tools=None,
-                          data=None,
-                          tags=None,
-                          timeout=None,
-                          **kwargs):
-    if self._should_skip_target(tags=tags, **kwargs):
-      return
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    srcs_block = self._convert_srcs_block(srcs)
-    tools_block = self._convert_target_list_block("TOOLS", tools)
-    data_block = self._convert_target_list_block("DATA", data)
-    labels_block = self._convert_string_list_block("LABELS", tags)
-    timeout_block = self._convert_timeout_arg_block("TIMEOUT", timeout)
+    # TODO(gcmn) implement these types of functions in a less hard-coded way
+    def platform_trampoline_deps(self, basename, path="base"):
+        return [f"//{path}/internal:{basename}_internal"]
 
-    self._converter.body += (f"iree_lit_test_suite(\n"
-                             f"{name_block}"
-                             f"{srcs_block}"
-                             f"{tools_block}"
-                             f"{data_block}"
-                             f"{labels_block}"
-                             f"{timeout_block}"
-                             f")\n\n")
+    def select(self, d):
+        self._convert_unimplemented_function("select", str(d))
+        return d["//conditions:default"]
 
-  def iree_check_single_backend_test_suite(self,
-                                           name,
-                                           srcs,
-                                           target_backend,
-                                           driver=None,
-                                           compiler_flags=None,
-                                           target_backends_and_drivers=None,
-                                           runner_args=None,
-                                           tags=None,
-                                           target_cpu_features=None,
-                                           timeout=None,
-                                           **kwargs):
-    if self._should_skip_target(tags=tags, **kwargs):
-      return
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    srcs_block = self._convert_srcs_block(srcs)
-    target_backend_block = self._convert_string_arg_block(
-        "TARGET_BACKEND", target_backend)
-    driver_block = self._convert_string_arg_block("DRIVER", driver)
-    compiler_flags_block = self._convert_string_list_block(
-        "COMPILER_FLAGS", compiler_flags)
-    runner_args_block = self._convert_string_list_block("RUNNER_ARGS",
-                                                        runner_args)
-    labels_block = self._convert_string_list_block("LABELS", tags)
-    target_cpu_features_block = self._convert_string_arg_block(
-        "TARGET_CPU_FEATURES", target_cpu_features)
-    timeout_block = self._convert_timeout_arg_block("TIMEOUT", timeout)
+    def defaulting_select(self, selector):
+        """Defined in build_defs.oss.bzl as a scoped alternative to select."""
+        default_value = selector.get("//conditions:default")
+        if default_value is None:
+            raise ValueError("bazel_to_cmake can only convert selects with a default")
+        return default_value
 
-    self._converter.body += (f"iree_check_single_backend_test_suite(\n"
-                             f"{name_block}"
-                             f"{srcs_block}"
-                             f"{target_backend_block}"
-                             f"{driver_block}"
-                             f"{compiler_flags_block}"
-                             f"{runner_args_block}"
-                             f"{labels_block}"
-                             f"{target_cpu_features_block}"
-                             f"{timeout_block}"
-                             f")\n\n")
+    def cc_library(
+        self,
+        name,
+        hdrs=None,
+        textual_hdrs=None,
+        srcs=None,
+        copts=None,
+        defines=None,
+        data=None,
+        deps=None,
+        testonly=None,
+        linkopts=None,
+        includes=None,
+        **kwargs,
+    ):
+        if self._should_skip_target(**kwargs):
+            return
+        if linkopts:
+            self._convert_unimplemented_function("linkopts")
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        hdrs_block = self._convert_string_list_block("HDRS", hdrs, sort=True)
+        textual_hdrs_block = self._convert_string_list_block(
+            "TEXTUAL_HDRS", textual_hdrs, sort=True
+        )
+        srcs_block = self._convert_srcs_block(srcs)
+        copts_block = self._convert_string_list_block("COPTS", copts, sort=False)
+        defines_block = self._convert_string_list_block("DEFINES", defines)
+        data_block = self._convert_target_list_block("DATA", data)
+        deps_block = self._convert_target_list_block("DEPS", deps)
+        testonly_block = self._convert_option_block("TESTONLY", testonly)
+        includes_block = self._convert_includes_block(includes)
 
-  def iree_check_test_suite(self,
-                            name,
-                            srcs,
-                            target_backends_and_drivers=None,
-                            compiler_flags=None,
-                            runner_args=None,
-                            tags=None,
-                            target_cpu_features_variants=None,
-                            timeout=None,
-                            **kwargs):
-    if self._should_skip_target(tags=tags, **kwargs):
-      return
-    target_backends = None
-    drivers = None
-    if target_backends_and_drivers is not None:
-      target_backends = [it[0] for it in target_backends_and_drivers]
-      drivers = [it[1] for it in target_backends_and_drivers]
+        self._converter.body += (
+            f"iree_cc_library(\n"
+            f"{name_block}"
+            f"{copts_block}"
+            f"{hdrs_block}"
+            f"{textual_hdrs_block}"
+            f"{srcs_block}"
+            f"{data_block}"
+            f"{deps_block}"
+            f"{defines_block}"
+            f"{testonly_block}"
+            f"{includes_block}"
+            f"  PUBLIC\n)\n\n"
+        )
 
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    srcs_block = self._convert_srcs_block(srcs)
-    target_backends_block = self._convert_string_list_block(
-        "TARGET_BACKENDS", target_backends)
-    drivers_block = self._convert_string_list_block("DRIVERS", drivers)
-    compiler_flags_block = self._convert_string_list_block(
-        "COMPILER_FLAGS", compiler_flags)
-    runner_args_block = self._convert_string_list_block("RUNNER_ARGS",
-                                                        runner_args)
-    labels_block = self._convert_string_list_block("LABELS", tags)
-    target_cpu_features_variants_block = self._convert_string_list_block(
-        "TARGET_CPU_FEATURES_VARIANTS", target_cpu_features_variants)
-    timeout_block = self._convert_timeout_arg_block("TIMEOUT", timeout)
+    def iree_compiler_register_plugin(self, plugin_id, target):
+        plugin_id_block = self._convert_string_arg_block(
+            "PLUGIN_ID", plugin_id, quote=False
+        )
+        target_block = self._convert_single_target_block("TARGET", target)
+        self._converter.body += (
+            f"iree_compiler_register_plugin(\n"
+            f"{plugin_id_block}"
+            f"{target_block}"
+            f")\n\n"
+        )
 
-    self._converter.body += (f"iree_check_test_suite(\n"
-                             f"{name_block}"
-                             f"{srcs_block}"
-                             f"{target_backends_block}"
-                             f"{drivers_block}"
-                             f"{compiler_flags_block}"
-                             f"{runner_args_block}"
-                             f"{labels_block}"
-                             f"{target_cpu_features_variants_block}"
-                             f"{timeout_block}"
-                             f")\n\n")
+    def cc_test(
+        self,
+        name,
+        hdrs=None,
+        srcs=None,
+        copts=None,
+        defines=None,
+        data=None,
+        deps=None,
+        timeout=None,
+        args=None,
+        tags=None,
+        includes=None,
+        **kwargs,
+    ):
+        if self._should_skip_target(tags=tags, **kwargs):
+            return
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        hdrs_block = self._convert_string_list_block("HDRS", hdrs, sort=True)
+        srcs_block = self._convert_srcs_block(srcs)
+        copts_block = self._convert_string_list_block("COPTS", copts, sort=False)
+        defines_block = self._convert_string_list_block("DEFINES", defines)
+        data_block = self._convert_target_list_block("DATA", data)
+        deps_block = self._convert_target_list_block("DEPS", deps)
+        args_block = self._convert_string_list_block("ARGS", args)
+        labels_block = self._convert_string_list_block("LABELS", tags)
+        timeout_block = self._convert_timeout_arg_block("TIMEOUT", timeout)
+        includes_block = self._convert_includes_block(includes)
 
-  def iree_generated_trace_runner_test(self,
-                                       name,
-                                       generator,
-                                       generator_args=None,
-                                       trace_runner=None,
-                                       target_backends_and_drivers=None,
-                                       compiler_flags=None,
-                                       runner_args=None,
-                                       tags=None,
-                                       target_cpu_features_variants=None,
-                                       **kwargs):
-    if self._should_skip_target(tags=tags, **kwargs):
-      return
-    target_backends = None
-    drivers = None
-    if target_backends_and_drivers is not None:
-      target_backends = [it[0] for it in target_backends_and_drivers]
-      drivers = [it[1] for it in target_backends_and_drivers]
+        self._converter.body += (
+            f"iree_cc_test(\n"
+            f"{name_block}"
+            f"{hdrs_block}"
+            f"{srcs_block}"
+            f"{copts_block}"
+            f"{defines_block}"
+            f"{data_block}"
+            f"{deps_block}"
+            f"{args_block}"
+            f"{labels_block}"
+            f"{timeout_block}"
+            f"{includes_block}"
+            f")\n\n"
+        )
 
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    # For now we assume that the generator target is a py_binary with a single
-    # source .py file named like it.
-    generator_py = f"{generator.split(':')[-1]}.py"
-    generator_block = self._convert_string_arg_block("GENERATOR",
-                                                     generator_py,
-                                                     quote=True)
-    generator_args_block = self._convert_string_list_block(
-        "GENERATOR_ARGS", generator_args)
-    trace_runner_block = self._convert_target_block("TRACE_RUNNER",
-                                                    trace_runner)
-    target_backends_block = self._convert_string_list_block(
-        "TARGET_BACKENDS", target_backends)
-    drivers_block = self._convert_string_list_block("DRIVERS", drivers)
-    compiler_flags_block = self._convert_string_list_block(
-        "COMPILER_FLAGS", compiler_flags)
-    runner_args_block = self._convert_string_list_block("RUNNER_ARGS",
-                                                        runner_args)
-    labels_block = self._convert_string_list_block("LABELS", tags)
-    target_cpu_features_variants_block = self._convert_string_list_block(
-        "TARGET_CPU_FEATURES_VARIANTS", target_cpu_features_variants)
+    def cc_binary(
+        self,
+        name,
+        srcs=None,
+        data=None,
+        deps=None,
+        copts=None,
+        defines=None,
+        linkopts=None,
+        testonly=None,
+        includes=None,
+        **kwargs,
+    ):
+        if self._should_skip_target(**kwargs):
+            return
+        if linkopts:
+            self._convert_unimplemented_function("linkopts")
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        copts_block = self._convert_string_list_block("COPTS", copts, sort=False)
+        defines_block = self._convert_string_list_block("DEFINES", defines)
+        srcs_block = self._convert_srcs_block(srcs)
+        data_block = self._convert_target_list_block("DATA", data)
+        deps_block = self._convert_target_list_block("DEPS", deps)
+        testonly_block = self._convert_option_block("TESTONLY", testonly)
+        includes_block = self._convert_includes_block(includes)
 
-    self._converter.body += (f"iree_generated_trace_runner_test(\n"
-                             f"{name_block}"
-                             f"{generator_block}"
-                             f"{generator_args_block}"
-                             f"{trace_runner_block}"
-                             f"{target_backends_block}"
-                             f"{drivers_block}"
-                             f"{compiler_flags_block}"
-                             f"{runner_args_block}"
-                             f"{labels_block}"
-                             f"{target_cpu_features_variants_block}"
-                             f")\n\n")
+        self._converter.body += (
+            f"iree_cc_binary(\n"
+            f"{name_block}"
+            f"{srcs_block}"
+            f"{copts_block}"
+            f"{defines_block}"
+            f"{data_block}"
+            f"{deps_block}"
+            f"{testonly_block}"
+            f"{includes_block}"
+            f")\n\n"
+        )
 
-  def native_test(self,
-                  name,
-                  src,
-                  args=None,
-                  data=None,
-                  tags=None,
-                  timeout=None):
-    if self._should_skip_target(tags=tags):
-      return
-    if data is not None:
-      self._convert_unimplemented_function("native_test", name + " has data")
+    def c_embed_data(
+        self,
+        name,
+        srcs,
+        c_file_output,
+        h_file_output,
+        testonly=None,
+        strip_prefix=None,
+        flatten=None,
+        identifier=None,
+        deps=None,
+        **kwargs,
+    ):
+        if self._should_skip_target(**kwargs):
+            return
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        srcs_block = self._convert_srcs_block(srcs)
+        c_file_output_block = self._convert_string_arg_block(
+            "C_FILE_OUTPUT", c_file_output
+        )
+        h_file_output_block = self._convert_string_arg_block(
+            "H_FILE_OUTPUT", h_file_output
+        )
+        testonly_block = self._convert_option_block("TESTONLY", testonly)
+        identifier_block = self._convert_string_arg_block("IDENTIFIER", identifier)
+        flatten_block = self._convert_option_block("FLATTEN", flatten)
+        deps_block = self._convert_target_list_block("DEPS", deps)
 
-    name_block = self._convert_string_arg_block("NAME", name)
-    test_binary_block = self._convert_single_target_block("SRC", src)
-    args_block = self._convert_string_list_block("ARGS", args)
-    labels_block = self._convert_string_list_block("LABELS", tags)
-    timeout_block = self._convert_timeout_arg_block("TIMEOUT", timeout)
+        self._converter.body += (
+            f"iree_c_embed_data(\n"
+            f"{name_block}"
+            f"{srcs_block}"
+            f"{deps_block}"
+            f"{c_file_output_block}"
+            f"{h_file_output_block}"
+            f"{identifier_block}"
+            f"{testonly_block}"
+            f"{flatten_block}"
+            f"  PUBLIC\n)\n\n"
+        )
 
-    self._converter.body += (f"iree_native_test(\n"
-                             f"{name_block}"
-                             f"{args_block}"
-                             f"{test_binary_block}"
-                             f"{labels_block}"
-                             f")\n\n")
+    def iree_bitcode_library(self, name, arch, srcs, internal_hdrs=None, copts=None):
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        arch_block = self._convert_string_arg_block("ARCH", arch, quote=False)
+        srcs_block = self._convert_srcs_block(srcs)
+        copts_block = self._convert_string_list_block("COPTS", copts, sort=False)
 
-  def cc_binary_benchmark(
-      self,
-      name,
-      srcs=None,
-      data=None,
-      deps=None,
-      copts=None,
-      defines=None,
-      linkopts=None,
-      tags=None,
-      testonly=True,
-      # unused
-      size="small",
-      timeout=None):
-    if self._should_skip_target(tags=tags):
-      return
-    name_block = self._convert_string_arg_block("NAME", name, quote=False)
-    srcs_block = self._convert_srcs_block(srcs)
-    data_block = self._convert_target_list_block("DATA", data)
-    deps_block = self._convert_target_list_block("DEPS", deps)
-    copts_block = self._convert_string_list_block("COPTS", copts, sort=False)
-    defines_block = self._convert_string_list_block("DEFINES", defines)
-    defines_block = self._convert_string_list_block("LINKOPTS", linkopts)
-    testonly_block = self._convert_option_block("TESTONLY", testonly)
-    labels_block = self._convert_string_list_block("LABELS", tags)
+        self._converter.body += (
+            f"iree_bitcode_library(\n"
+            f"{name_block}"
+            f"{arch_block}"
+            f"{srcs_block}"
+            f"{copts_block}"
+            f")\n\n"
+        )
 
-    self._converter.body += (f"iree_cc_binary_benchmark(\n"
-                             f"{name_block}"
-                             f"{srcs_block}"
-                             f"{data_block}"
-                             f"{deps_block}"
-                             f"{copts_block}"
-                             f"{defines_block}"
-                             f"{defines_block}"
-                             f"{testonly_block}"
-                             f"{labels_block}"
-                             f")\n\n")
+    def iree_link_bitcode(self, name, bitcode_files):
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        bitcode_files_block = self._convert_srcs_block(
+            [f.replace(":", "/") for f in bitcode_files]
+        )
 
-  def iree_cmake_extra_content(self, content, inline=False):
-    if inline:
-      self._converter.body += (f"\n{content}\n")
-    else:
-      self._converter.header += (f"\n{content}\n")
+        self._converter.body += (
+            f"iree_link_bitcode(\n" f"{name_block}" f"{bitcode_files_block}" f"\n)\n\n"
+        )
+
+    def iree_bytecode_module(
+        self,
+        name,
+        src,
+        module_name=None,
+        flags=None,
+        compile_tool=None,
+        c_identifier=None,
+        static_lib_path=None,
+        deps=None,
+        testonly=None,
+    ):
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        src_block = self._convert_string_arg_block("SRC", src)
+        module_name_block = self._convert_string_arg_block(
+            "MODULE_FILE_NAME", module_name
+        )
+        c_identifier_block = self._convert_string_arg_block(
+            "C_IDENTIFIER", c_identifier
+        )
+        static_lib_block = self._convert_string_arg_block(
+            "STATIC_LIB_PATH", static_lib_path
+        )
+        compile_tool_block = self._convert_target_block("COMPILE_TOOL", compile_tool)
+        flags_block = self._convert_string_list_block("FLAGS", flags)
+        deps_block = self._convert_target_list_block("DEPS", deps)
+        testonly_block = self._convert_option_block("TESTONLY", testonly)
+
+        self._converter.body += (
+            f"iree_bytecode_module(\n"
+            f"{name_block}"
+            f"{src_block}"
+            f"{module_name_block}"
+            f"{c_identifier_block}"
+            f"{compile_tool_block}"
+            f"{static_lib_block}"
+            f"{flags_block}"
+            f"{deps_block}"
+            f"{testonly_block}"
+            f"  PUBLIC\n)\n\n"
+        )
+
+    def iree_flatbuffer_c_library(self, name, srcs, flatcc_args=None):
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        srcs_block = self._convert_srcs_block(srcs)
+        flatcc_args_block = self._convert_string_list_block("FLATCC_ARGS", flatcc_args)
+
+        self._converter.body += (
+            f"flatbuffer_c_library(\n"
+            f"{name_block}"
+            f"{srcs_block}"
+            f"{flatcc_args_block}"
+            f"  PUBLIC\n)\n\n"
+        )
+
+    def gentbl_cc_library(
+        self,
+        name,
+        tblgen,
+        td_file,
+        tbl_outs,
+        td_srcs=None,
+        deps=None,
+        includes=None,
+        strip_include_prefix=None,
+        test=None,
+    ):
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        tblgen_block = self._convert_tblgen_block(tblgen)
+        td_file_block = self._convert_td_file_block(td_file)
+        outs_block = self._convert_tbl_outs_block(tbl_outs)
+
+        self._converter.body += (
+            f"iree_tablegen_library(\n"
+            f"{name_block}"
+            f"{td_file_block}"
+            f"{outs_block}"
+            f"{tblgen_block}"
+            f")\n\n"
+        )
+
+    def iree_gentbl_cc_library(self, **kwargs):
+        if self._should_skip_target(**kwargs):
+            return
+        # The bazel version of this rule adds some include directories and defs
+        # that are implicitly handled by the cmake version.
+        self.gentbl_cc_library(**kwargs)
+
+    def iree_tablegen_doc(
+        self,
+        name,
+        tblgen,
+        td_file,
+        tbl_outs,
+        td_srcs=None,
+        includes=None,
+        deps=None,
+        test=None,
+    ):
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        tblgen_block = self._convert_tblgen_block(tblgen)
+        td_file_block = self._convert_td_file_block(td_file)
+        outs_block = self._convert_tbl_outs_block(tbl_outs)
+
+        self._converter.body += (
+            f"iree_tablegen_doc(\n"
+            f"{name_block}"
+            f"{td_file_block}"
+            f"{outs_block}"
+            f"{tblgen_block}"
+            f")\n\n"
+        )
+
+    def iree_lit_test_suite(
+        self, name, srcs, tools=None, data=None, tags=None, timeout=None, **kwargs
+    ):
+        if self._should_skip_target(tags=tags, **kwargs):
+            return
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        srcs_block = self._convert_srcs_block(srcs)
+        tools_block = self._convert_target_list_block("TOOLS", tools)
+        data_block = self._convert_target_list_block("DATA", data)
+        labels_block = self._convert_string_list_block("LABELS", tags)
+        timeout_block = self._convert_timeout_arg_block("TIMEOUT", timeout)
+
+        self._converter.body += (
+            f"iree_lit_test_suite(\n"
+            f"{name_block}"
+            f"{srcs_block}"
+            f"{tools_block}"
+            f"{data_block}"
+            f"{labels_block}"
+            f"{timeout_block}"
+            f")\n\n"
+        )
+
+    def iree_check_single_backend_test_suite(
+        self,
+        name,
+        srcs,
+        target_backend,
+        driver=None,
+        compiler_flags=None,
+        target_backends_and_drivers=None,
+        runner_args=None,
+        tags=None,
+        target_cpu_features=None,
+        timeout=None,
+        **kwargs,
+    ):
+        if self._should_skip_target(tags=tags, **kwargs):
+            return
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        srcs_block = self._convert_srcs_block(srcs)
+        target_backend_block = self._convert_string_arg_block(
+            "TARGET_BACKEND", target_backend
+        )
+        driver_block = self._convert_string_arg_block("DRIVER", driver)
+        compiler_flags_block = self._convert_string_list_block(
+            "COMPILER_FLAGS", compiler_flags
+        )
+        runner_args_block = self._convert_string_list_block("RUNNER_ARGS", runner_args)
+        labels_block = self._convert_string_list_block("LABELS", tags)
+        target_cpu_features_block = self._convert_string_arg_block(
+            "TARGET_CPU_FEATURES", target_cpu_features
+        )
+        timeout_block = self._convert_timeout_arg_block("TIMEOUT", timeout)
+
+        self._converter.body += (
+            f"iree_check_single_backend_test_suite(\n"
+            f"{name_block}"
+            f"{srcs_block}"
+            f"{target_backend_block}"
+            f"{driver_block}"
+            f"{compiler_flags_block}"
+            f"{runner_args_block}"
+            f"{labels_block}"
+            f"{target_cpu_features_block}"
+            f"{timeout_block}"
+            f")\n\n"
+        )
+
+    def iree_check_test_suite(
+        self,
+        name,
+        srcs,
+        target_backends_and_drivers=None,
+        compiler_flags=None,
+        runner_args=None,
+        tags=None,
+        target_cpu_features_variants=None,
+        timeout=None,
+        **kwargs,
+    ):
+        if self._should_skip_target(tags=tags, **kwargs):
+            return
+        target_backends = None
+        drivers = None
+        if target_backends_and_drivers is not None:
+            target_backends = [it[0] for it in target_backends_and_drivers]
+            drivers = [it[1] for it in target_backends_and_drivers]
+
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        srcs_block = self._convert_srcs_block(srcs)
+        target_backends_block = self._convert_string_list_block(
+            "TARGET_BACKENDS", target_backends
+        )
+        drivers_block = self._convert_string_list_block("DRIVERS", drivers)
+        compiler_flags_block = self._convert_string_list_block(
+            "COMPILER_FLAGS", compiler_flags
+        )
+        runner_args_block = self._convert_string_list_block("RUNNER_ARGS", runner_args)
+        labels_block = self._convert_string_list_block("LABELS", tags)
+        target_cpu_features_variants_block = self._convert_string_list_block(
+            "TARGET_CPU_FEATURES_VARIANTS", target_cpu_features_variants
+        )
+        timeout_block = self._convert_timeout_arg_block("TIMEOUT", timeout)
+
+        self._converter.body += (
+            f"iree_check_test_suite(\n"
+            f"{name_block}"
+            f"{srcs_block}"
+            f"{target_backends_block}"
+            f"{drivers_block}"
+            f"{compiler_flags_block}"
+            f"{runner_args_block}"
+            f"{labels_block}"
+            f"{target_cpu_features_variants_block}"
+            f"{timeout_block}"
+            f")\n\n"
+        )
+
+    def iree_generated_trace_runner_test(
+        self,
+        name,
+        generator,
+        generator_args=None,
+        trace_runner=None,
+        target_backends_and_drivers=None,
+        compiler_flags=None,
+        runner_args=None,
+        tags=None,
+        target_cpu_features_variants=None,
+        **kwargs,
+    ):
+        if self._should_skip_target(tags=tags, **kwargs):
+            return
+        target_backends = None
+        drivers = None
+        if target_backends_and_drivers is not None:
+            target_backends = [it[0] for it in target_backends_and_drivers]
+            drivers = [it[1] for it in target_backends_and_drivers]
+
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        # For now we assume that the generator target is a py_binary with a single
+        # source .py file named like it.
+        generator_py = f"{generator.split(':')[-1]}.py"
+        generator_block = self._convert_string_arg_block(
+            "GENERATOR", generator_py, quote=True
+        )
+        generator_args_block = self._convert_string_list_block(
+            "GENERATOR_ARGS", generator_args
+        )
+        trace_runner_block = self._convert_target_block("TRACE_RUNNER", trace_runner)
+        target_backends_block = self._convert_string_list_block(
+            "TARGET_BACKENDS", target_backends
+        )
+        drivers_block = self._convert_string_list_block("DRIVERS", drivers)
+        compiler_flags_block = self._convert_string_list_block(
+            "COMPILER_FLAGS", compiler_flags
+        )
+        runner_args_block = self._convert_string_list_block("RUNNER_ARGS", runner_args)
+        labels_block = self._convert_string_list_block("LABELS", tags)
+        target_cpu_features_variants_block = self._convert_string_list_block(
+            "TARGET_CPU_FEATURES_VARIANTS", target_cpu_features_variants
+        )
+
+        self._converter.body += (
+            f"iree_generated_trace_runner_test(\n"
+            f"{name_block}"
+            f"{generator_block}"
+            f"{generator_args_block}"
+            f"{trace_runner_block}"
+            f"{target_backends_block}"
+            f"{drivers_block}"
+            f"{compiler_flags_block}"
+            f"{runner_args_block}"
+            f"{labels_block}"
+            f"{target_cpu_features_variants_block}"
+            f")\n\n"
+        )
+
+    def native_test(self, name, src, args=None, data=None, tags=None, timeout=None):
+        if self._should_skip_target(tags=tags):
+            return
+        if data is not None:
+            self._convert_unimplemented_function("native_test", name + " has data")
+
+        name_block = self._convert_string_arg_block("NAME", name)
+        test_binary_block = self._convert_single_target_block("SRC", src)
+        args_block = self._convert_string_list_block("ARGS", args)
+        labels_block = self._convert_string_list_block("LABELS", tags)
+        timeout_block = self._convert_timeout_arg_block("TIMEOUT", timeout)
+
+        self._converter.body += (
+            f"iree_native_test(\n"
+            f"{name_block}"
+            f"{args_block}"
+            f"{test_binary_block}"
+            f"{labels_block}"
+            f")\n\n"
+        )
+
+    def cc_binary_benchmark(
+        self,
+        name,
+        srcs=None,
+        data=None,
+        deps=None,
+        copts=None,
+        defines=None,
+        linkopts=None,
+        tags=None,
+        testonly=True,
+        # unused
+        size="small",
+        timeout=None,
+    ):
+        if self._should_skip_target(tags=tags):
+            return
+        name_block = self._convert_string_arg_block("NAME", name, quote=False)
+        srcs_block = self._convert_srcs_block(srcs)
+        data_block = self._convert_target_list_block("DATA", data)
+        deps_block = self._convert_target_list_block("DEPS", deps)
+        copts_block = self._convert_string_list_block("COPTS", copts, sort=False)
+        defines_block = self._convert_string_list_block("DEFINES", defines)
+        defines_block = self._convert_string_list_block("LINKOPTS", linkopts)
+        testonly_block = self._convert_option_block("TESTONLY", testonly)
+        labels_block = self._convert_string_list_block("LABELS", tags)
+
+        self._converter.body += (
+            f"iree_cc_binary_benchmark(\n"
+            f"{name_block}"
+            f"{srcs_block}"
+            f"{data_block}"
+            f"{deps_block}"
+            f"{copts_block}"
+            f"{defines_block}"
+            f"{defines_block}"
+            f"{testonly_block}"
+            f"{labels_block}"
+            f")\n\n"
+        )
+
+    def iree_cmake_extra_content(self, content, inline=False):
+        if inline:
+            self._converter.body += f"\n{content}\n"
+        else:
+            self._converter.header += f"\n{content}\n"
 
 
 class Converter(object):
-  """Conversion state tracking and full file template substitution."""
+    """Conversion state tracking and full file template substitution."""
 
-  def __init__(self):
-    # Header appears after the license block but before `iree_add_all_subdirs`.
-    self.header = ""
-    # Body appears after `iree_add_all_subdirs`.
-    self.body = ""
+    def __init__(self):
+        # Header appears after the license block but before `iree_add_all_subdirs`.
+        self.header = ""
+        # Body appears after `iree_add_all_subdirs`.
+        self.body = ""
 
-    self.first_error = None
+        self.first_error = None
 
-  def convert(self):
-    converted_content = (f"{self.header}\n\n"
-                         f"iree_add_all_subdirs()\n\n"
-                         f"{self.body}")
+    def convert(self):
+        converted_content = (
+            f"{self.header}\n\n" f"iree_add_all_subdirs()\n\n" f"{self.body}"
+        )
 
-    # Cleanup newline characters. This is more convenient than ensuring all
-    # conversions are careful with where they insert newlines.
-    converted_content = converted_content.replace("\n\n\n", "\n")
-    converted_content = converted_content.rstrip() + "\n"
+        # Cleanup newline characters. This is more convenient than ensuring all
+        # conversions are careful with where they insert newlines.
+        converted_content = converted_content.replace("\n\n\n", "\n")
+        converted_content = converted_content.rstrip() + "\n"
 
-    return converted_content
+        return converted_content
 
 
 def GetDict(obj):
-  ret = {}
-  for k in dir(obj):
-    if not k.startswith("_"):
-      ret[k] = getattr(obj, k)
-  return ret
+    ret = {}
+    for k in dir(obj):
+        if not k.startswith("_"):
+            ret[k] = getattr(obj, k)
+    return ret
 
 
-def convert_build_file(build_file_code,
-                       repo_cfg,
-                       allow_partial_conversion=False):
-  converter = Converter()
-  # Allow overrides of TargetConverter and BuildFileFunctions from repo cfg.
-  repo_map = getattr(repo_cfg, "REPO_MAP", {})
-  target_converter = getattr(
-      repo_cfg, "CustomTargetConverter",
-      bazel_to_cmake_targets.TargetConverter)(repo_map=repo_map)
-  build_file_functions = getattr(repo_cfg, "CustomBuildFileFunctions",
-                                 BuildFileFunctions)(converter=converter,
-                                                     targets=target_converter)
+def convert_build_file(build_file_code, repo_cfg, allow_partial_conversion=False):
+    converter = Converter()
+    # Allow overrides of TargetConverter and BuildFileFunctions from repo cfg.
+    repo_map = getattr(repo_cfg, "REPO_MAP", {})
+    target_converter = getattr(
+        repo_cfg, "CustomTargetConverter", bazel_to_cmake_targets.TargetConverter
+    )(repo_map=repo_map)
+    build_file_functions = getattr(
+        repo_cfg, "CustomBuildFileFunctions", BuildFileFunctions
+    )(converter=converter, targets=target_converter)
 
-  exec(build_file_code, GetDict(build_file_functions))
-  converted_text = converter.convert()
-  if not allow_partial_conversion and converter.first_error:
-    raise converter.first_error  # pylint: disable=raising-bad-type
-  return converted_text
+    exec(build_file_code, GetDict(build_file_functions))
+    converted_text = converter.convert()
+    if not allow_partial_conversion and converter.first_error:
+        raise converter.first_error  # pylint: disable=raising-bad-type
+    return converted_text
diff --git a/build_tools/bazel_to_cmake/bazel_to_cmake_targets.py b/build_tools/bazel_to_cmake/bazel_to_cmake_targets.py
index ca6c385..14d56bc 100644
--- a/build_tools/bazel_to_cmake/bazel_to_cmake_targets.py
+++ b/build_tools/bazel_to_cmake/bazel_to_cmake_targets.py
@@ -9,230 +9,228 @@
 
 
 class TargetConverter:
+    def __init__(self, repo_map: Dict[str, str]):
+        self._explicit_target_mapping = {}
+        self._repo_map = repo_map
 
-  def __init__(self, repo_map: Dict[str, str]):
-    self._explicit_target_mapping = {}
-    self._repo_map = repo_map
+        iree_core_repo = self._repo_alias("@iree_core")
+        self._update_target_mappings(
+            {
+                # Internal utilities to emulate various binary/library options.
+                f"{iree_core_repo}//build_tools:default_linkopts": [],
+                f"{iree_core_repo}//build_tools:dl": ["${CMAKE_DL_LIBS}"],
+                f"{iree_core_repo}//compiler/src/iree/compiler/API:CAPI": [
+                    "IREECompilerCAPILib"
+                ],
+                # IREE llvm-external-projects
+                f"{iree_core_repo}//llvm-external-projects/iree-dialects:CAPI": [
+                    "IREEDialectsCAPI"
+                ],
+                # Disable all hard-coded codegen targets (they are expanded dynamically
+                # in CMake).
+                "@llvm-project//llvm:AArch64AsmParser": ["IREELLVMCPUTargetDeps"],
+                "@llvm-project//llvm:AArch64CodeGen": ["IREELLVMCPUTargetDeps"],
+                "@llvm-project//llvm:ARMAsmParser": ["IREELLVMCPUTargetDeps"],
+                "@llvm-project//llvm:ARMCodeGen": ["IREELLVMCPUTargetDeps"],
+                "@llvm-project//llvm:RISCVAsmParser": ["IREELLVMCPUTargetDeps"],
+                "@llvm-project//llvm:RISCVCodeGen": ["IREELLVMCPUTargetDeps"],
+                "@llvm-project//llvm:WebAssemblyAsmParser": ["IREELLVMCPUTargetDeps"],
+                "@llvm-project//llvm:WebAssemblyCodeGen": ["IREELLVMCPUTargetDeps"],
+                "@llvm-project//llvm:X86AsmParser": ["IREELLVMCPUTargetDeps"],
+                "@llvm-project//llvm:X86CodeGen": ["IREELLVMCPUTargetDeps"],
+                # Clang
+                "@llvm-project//clang": ["${IREE_CLANG_TARGET}"],
+                # LLD
+                "@llvm-project//lld": ["${IREE_LLD_TARGET}"],
+                "@llvm-project//lld:COFF": ["lldCOFF"],
+                "@llvm-project//lld:Common": ["lldCommon"],
+                "@llvm-project//lld:ELF": ["lldELF"],
+                "@llvm-project//lld:MachO": ["lldMachO"],
+                "@llvm-project//lld:Wasm": ["lldWasm"],
+                # LLVM
+                "@llvm-project//llvm:config": [],
+                "@llvm-project//llvm:IPO": ["LLVMipo"],
+                "@llvm-project//llvm:FileCheck": ["FileCheck"],
+                "@llvm-project//llvm:not": ["not"],
+                "@llvm-project//llvm:llvm-link": ["${IREE_LLVM_LINK_TARGET}"],
+                "@llvm-project//llvm:NVPTXUtilsAndDesc": [
+                    "LLVMNVPTXDesc",
+                ],
+                # MLIR
+                "@llvm-project//mlir:AllPassesAndDialects": ["MLIRAllDialects"],
+                "@llvm-project//mlir:CommonFolders": [""],
+                "@llvm-project//mlir:DialectUtils": [""],
+                "@llvm-project//mlir:GPUDialect": ["MLIRGPUDialect"],
+                "@llvm-project//mlir:GPUTransforms": ["MLIRGPUTransforms"],
+                "@llvm-project//mlir:LinalgStructuredOpsIncGen": [
+                    "MLIRLinalgStructuredOpsIncGenLib"
+                ],
+                "@llvm-project//mlir:ShapeTransforms": ["MLIRShapeOpsTransforms"],
+                "@llvm-project//mlir:ToLLVMIRTranslation": ["MLIRTargetLLVMIRExport"],
+                "@llvm-project//mlir:mlir-translate": ["mlir-translate"],
+                "@llvm-project//mlir:MlirLspServerLib": ["MLIRLspServerLib"],
+                "@llvm-project//mlir:MlirTableGenMain": ["MLIRTableGen"],
+                "@llvm-project//mlir:MlirOptLib": ["MLIROptLib"],
+                "@llvm-project//mlir:VectorOps": ["MLIRVector"],
+                # StableHLO.
+                "@stablehlo//:chlo_ops": [
+                    "ChloOps",
+                ],
+                "@stablehlo//:stablehlo_ops": [
+                    "StablehloOps",
+                ],
+                "@stablehlo//:broadcast_utils": [
+                    "StablehloBroadcastUtils",
+                ],
+                # NCCL
+                "@nccl//:headers": [
+                    "nccl::headers",
+                ],
+                # Torch-MLIR.
+                "@torch-mlir-dialects//:TorchMLIRTMTensorDialect": [
+                    "TorchMLIRTMTensorDialect"
+                ],
+                # Tracy.
+                "@tracy_client//:runtime": ["tracy_client::runtime"],
+                # Vulkan
+                "@vulkan_headers": ["Vulkan::Headers"],
+                # Misc single targets
+                "@com_google_benchmark//:benchmark": ["benchmark"],
+                "@com_github_dvidelabs_flatcc//:flatcc": ["flatcc"],
+                "@com_github_dvidelabs_flatcc//:parsing": ["flatcc::parsing"],
+                "@com_github_dvidelabs_flatcc//:runtime": ["flatcc::runtime"],
+                "@com_github_yaml_libyaml//:yaml": ["yaml"],
+                "@com_google_googletest//:gtest": ["gmock", "gtest"],
+                "@spirv_cross//:spirv_cross_lib": ["spirv-cross-msl"],
+                "@cpuinfo": ["${IREE_CPUINFO_TARGET}"],
+                "@vulkan_memory_allocator//:impl_header_only": [
+                    "vulkan_memory_allocator"
+                ],
+                "@webgpu_headers": [],
+            }
+        )
 
-    iree_core_repo = self._repo_alias("@iree_core")
-    self._update_target_mappings({
-        # Internal utilities to emulate various binary/library options.
-        f"{iree_core_repo}//build_tools:default_linkopts": [],
-        f"{iree_core_repo}//build_tools:dl": ["${CMAKE_DL_LIBS}"],
-        f"{iree_core_repo}//compiler/src/iree/compiler/API:CAPI": [
-            "IREECompilerCAPILib"
-        ],
+        self._initialize()
 
-        # IREE llvm-external-projects
-        f"{iree_core_repo}//llvm-external-projects/iree-dialects:CAPI": [
-            "IREEDialectsCAPI"
-        ],
+    def _initialize(self):
+        pass
 
-        # Disable all hard-coded codegen targets (they are expanded dynamically
-        # in CMake).
-        "@llvm-project//llvm:AArch64AsmParser": ["IREELLVMCPUTargetDeps"],
-        "@llvm-project//llvm:AArch64CodeGen": ["IREELLVMCPUTargetDeps"],
-        "@llvm-project//llvm:ARMAsmParser": ["IREELLVMCPUTargetDeps"],
-        "@llvm-project//llvm:ARMCodeGen": ["IREELLVMCPUTargetDeps"],
-        "@llvm-project//llvm:RISCVAsmParser": ["IREELLVMCPUTargetDeps"],
-        "@llvm-project//llvm:RISCVCodeGen": ["IREELLVMCPUTargetDeps"],
-        "@llvm-project//llvm:WebAssemblyAsmParser": ["IREELLVMCPUTargetDeps"],
-        "@llvm-project//llvm:WebAssemblyCodeGen": ["IREELLVMCPUTargetDeps"],
-        "@llvm-project//llvm:X86AsmParser": ["IREELLVMCPUTargetDeps"],
-        "@llvm-project//llvm:X86CodeGen": ["IREELLVMCPUTargetDeps"],
+    def _repo_alias(self, repo_name: str) -> str:
+        """Returns the prefix of a repo (i.e. '@iree_core') given the repo map."""
+        return self._repo_map.get(repo_name, repo_name)
 
-        # Clang
-        "@llvm-project//clang": ["${IREE_CLANG_TARGET}"],
+    def _update_target_mappings(self, mappings: Dict[str, List[str]]):
+        self._explicit_target_mapping.update(mappings)
 
-        # LLD
-        "@llvm-project//lld": ["${IREE_LLD_TARGET}"],
-        "@llvm-project//lld:COFF": ["lldCOFF"],
-        "@llvm-project//lld:Common": ["lldCommon"],
-        "@llvm-project//lld:ELF": ["lldELF"],
-        "@llvm-project//lld:MachO": ["lldMachO"],
-        "@llvm-project//lld:Wasm": ["lldWasm"],
+    def _convert_mlir_target(self, target):
+        # Default to a pattern substitution approach.
+        # Take "MLIR" and append the name part of the full target identifier, e.g.
+        #   "@llvm-project//mlir:IR"   -> "MLIRIR"
+        #   "@llvm-project//mlir:Pass" -> "MLIRPass"
+        # MLIR does not have header-only targets apart from the libraries. Here
+        # we redirect any request for a CAPI{Name}Headers to a target within IREE
+        # that sets this up.
+        label = target.rsplit(":")[-1]
+        if label.startswith("CAPI") and label.endswith("Headers"):
+            return [f"IREELLVMIncludeSetup"]
+        else:
+            return [f"MLIR{label}"]
 
-        # LLVM
-        "@llvm-project//llvm:config": [],
-        "@llvm-project//llvm:IPO": ["LLVMipo"],
-        "@llvm-project//llvm:FileCheck": ["FileCheck"],
-        "@llvm-project//llvm:not": ["not"],
-        "@llvm-project//llvm:llvm-link": ["${IREE_LLVM_LINK_TARGET}"],
-        "@llvm-project//llvm:NVPTXUtilsAndDesc": ["LLVMNVPTXDesc",],
+    def _convert_llvm_target(self, target):
+        # Default to a pattern substitution approach.
+        # Prepend "LLVM" to the Bazel target name.
+        #   "@llvm-project//llvm:AsmParser" -> "LLVMAsmParser"
+        #   "@llvm-project//llvm:Core" -> "LLVMCore"
+        return ["LLVM" + target.rsplit(":")[-1]]
 
-        # MLIR
-        "@llvm-project//mlir:AllPassesAndDialects": ["MLIRAllDialects"],
-        "@llvm-project//mlir:CommonFolders": [""],
-        "@llvm-project//mlir:DialectUtils": [""],
-        "@llvm-project//mlir:GPUDialect": ["MLIRGPUDialect"],
-        "@llvm-project//mlir:GPUTransforms": ["MLIRGPUTransforms"],
-        "@llvm-project//mlir:LinalgStructuredOpsIncGen": [
-            "MLIRLinalgStructuredOpsIncGenLib"
-        ],
-        "@llvm-project//mlir:ShapeTransforms": ["MLIRShapeOpsTransforms"],
-        "@llvm-project//mlir:ToLLVMIRTranslation": ["MLIRTargetLLVMIRExport"],
-        "@llvm-project//mlir:mlir-translate": ["mlir-translate"],
-        "@llvm-project//mlir:MlirLspServerLib": ["MLIRLspServerLib"],
-        "@llvm-project//mlir:MlirTableGenMain": ["MLIRTableGen"],
-        "@llvm-project//mlir:MlirOptLib": ["MLIROptLib"],
-        "@llvm-project//mlir:VectorOps": ["MLIRVector"],
+    def _convert_iree_cuda_target(self, target):
+        # Convert like:
+        #   @iree_cuda//:libdevice_embedded -> iree_cuda::libdevice_embedded
+        label = target.rsplit(":")[-1]
+        return [f"iree_cuda::{label}"]
 
-        # StableHLO.
-        "@stablehlo//:chlo_ops": ["ChloOps",],
-        "@stablehlo//:stablehlo_ops": ["StablehloOps",],
-        "@stablehlo//:broadcast_utils": ["StablehloBroadcastUtils",],
+    def _convert_iree_dialects_target(self, target):
+        # Just take the target name as-is.
+        return [target.rsplit(":")[-1]]
 
-        # NCCL
-        "@nccl//:headers": ["nccl::headers",],
+    def _convert_to_cmake_path(self, bazel_path_fragment: str) -> str:
+        cmake_path = bazel_path_fragment
+        # Bazel `//iree/base`     -> CMake `iree::base`
+        # Bazel `//iree/base:foo` -> CMake `iree::base::foo`
+        if cmake_path.startswith("//"):
+            cmake_path = cmake_path[len("//") :]
+        cmake_path = cmake_path.replace(":", "::")  # iree/base::foo or ::foo
+        cmake_path = cmake_path.replace("/", "::")  # iree::base
+        return cmake_path
 
-        # Torch-MLIR.
-        "@torch-mlir-dialects//:TorchMLIRTMTensorDialect": [
-            "TorchMLIRTMTensorDialect"
-        ],
+    def convert_target(self, target):
+        """Converts a Bazel target to a list of CMake targets.
 
-        # Tracy.
-        "@tracy_client//:runtime": ["tracy_client::runtime"],
+        IREE targets are expected to follow a standard form between Bazel and CMake
+        that facilitates conversion. External targets *may* have their own patterns,
+        or they may be purely special cases.
 
-        # Vulkan
-        "@vulkan_headers": ["Vulkan::Headers"],
-        # Misc single targets
-        "@com_google_benchmark//:benchmark": ["benchmark"],
-        "@com_github_dvidelabs_flatcc//:flatcc": ["flatcc"],
-        "@com_github_dvidelabs_flatcc//:parsing": ["flatcc::parsing"],
-        "@com_github_dvidelabs_flatcc//:runtime": ["flatcc::runtime"],
-        "@com_github_yaml_libyaml//:yaml": ["yaml"],
-        "@com_google_googletest//:gtest": ["gmock", "gtest"],
-        "@spirv_cross//:spirv_cross_lib": ["spirv-cross-msl"],
-        "@cpuinfo": ["${IREE_CPUINFO_TARGET}"],
-        "@vulkan_memory_allocator//:impl_header_only": [
-            "vulkan_memory_allocator"
-        ],
-        "@webgpu_headers": [],
-    })
+        Multiple target in Bazel may map to a single target in CMake and a Bazel
+        target may map to multiple CMake targets.
 
-    self._initialize()
+        Returns:
+          A list of converted targets if it was successfully converted.
 
-  def _initialize(self):
-    pass
+        Raises:
+          KeyError: No conversion was found for the target.
+        """
+        iree_core_repo = self._repo_alias("@iree_core")
+        if target in self._explicit_target_mapping:
+            return self._explicit_target_mapping[target]
+        if target.startswith("@llvm-project//llvm"):
+            return self._convert_llvm_target(target)
+        if target.startswith("@llvm-project//mlir"):
+            return self._convert_mlir_target(target)
+        if target.startswith("@iree_cuda//"):
+            return self._convert_iree_cuda_target(target)
+        if target.startswith(f"{iree_core_repo}//"):
+            return self._convert_iree_core_target(target)
+        if target.startswith("@"):
+            raise KeyError(f"No conversion found for target '{target}'")
 
-  def _repo_alias(self, repo_name: str) -> str:
-    """Returns the prefix of a repo (i.e. '@iree_core') given the repo map."""
-    return self._repo_map.get(repo_name, repo_name)
+        # Pass through package-relative targets
+        #   :target_name
+        #   file_name.txt
+        if target.startswith(":") or (":" not in target and not target.startswith("/")):
+            return [self._convert_to_cmake_path(target)]
 
-  def _update_target_mappings(self, mappings: Dict[str, List[str]]):
-    self._explicit_target_mapping.update(mappings)
+        return self._convert_unmatched_target(target)
 
-  def _convert_mlir_target(self, target):
-    # Default to a pattern substitution approach.
-    # Take "MLIR" and append the name part of the full target identifier, e.g.
-    #   "@llvm-project//mlir:IR"   -> "MLIRIR"
-    #   "@llvm-project//mlir:Pass" -> "MLIRPass"
-    # MLIR does not have header-only targets apart from the libraries. Here
-    # we redirect any request for a CAPI{Name}Headers to a target within IREE
-    # that sets this up.
-    label = target.rsplit(":")[-1]
-    if label.startswith("CAPI") and label.endswith("Headers"):
-      return [f"IREELLVMIncludeSetup"]
-    else:
-      return [f"MLIR{label}"]
+    def _convert_iree_core_target(self, target):
+        iree_core_repo = self._repo_alias("@iree_core")
+        if target.startswith(f"{iree_core_repo}//llvm-external-projects/iree-dialects"):
+            return self._convert_iree_dialects_target(target)
 
-  def _convert_llvm_target(self, target):
-    # Default to a pattern substitution approach.
-    # Prepend "LLVM" to the Bazel target name.
-    #   "@llvm-project//llvm:AsmParser" -> "LLVMAsmParser"
-    #   "@llvm-project//llvm:Core" -> "LLVMCore"
-    return ["LLVM" + target.rsplit(":")[-1]]
+        # IREE root paths map to package names based on explicit rules.
+        #   * src/iree/ directories (compiler/src/iree/ and runtime/src/iree/)
+        #     creating their own root paths by trimming down to just "iree"
+        #   * tools/ uses an empty root, for binary targets names like "iree-compile"
+        #   * other top level directories add back an 'iree' prefix
+        # If changing these, make the corresponding change in iree_macros.cmake
+        # (iree_package_ns function).
 
-  def _convert_iree_cuda_target(self, target):
-    # Convert like:
-    #   @iree_cuda//:libdevice_embedded -> iree_cuda::libdevice_embedded
-    label = target.rsplit(":")[-1]
-    return [f"iree_cuda::{label}"]
+        # Map //compiler/src/iree/(.*) -> iree::\1 (i.e. iree::compiler::\1)
+        m = re.match(f"^{iree_core_repo}//compiler/src/iree/(.+)", target)
+        if m:
+            return ["iree::" + self._convert_to_cmake_path(m.group(1))]
 
-  def _convert_iree_dialects_target(self, target):
-    # Just take the target name as-is.
-    return [target.rsplit(":")[-1]]
+        # Map //runtime/src/iree/(.*) -> iree::\1
+        m = re.match(f"^{iree_core_repo}//runtime/src/iree/(.+)", target)
+        if m:
+            return ["iree::" + self._convert_to_cmake_path(m.group(1))]
 
-  def _convert_to_cmake_path(self, bazel_path_fragment: str) -> str:
-    cmake_path = bazel_path_fragment
-    # Bazel `//iree/base`     -> CMake `iree::base`
-    # Bazel `//iree/base:foo` -> CMake `iree::base::foo`
-    if cmake_path.startswith("//"):
-      cmake_path = cmake_path[len("//"):]
-    cmake_path = cmake_path.replace(":", "::")  # iree/base::foo or ::foo
-    cmake_path = cmake_path.replace("/", "::")  # iree::base
-    return cmake_path
+        # Map //tools/(.*) -> \1
+        m = re.match(f"^{iree_core_repo}//tools[/|:](.+)", target)
+        if m:
+            return [self._convert_to_cmake_path(m.group(1))]
 
-  def convert_target(self, target):
-    """Converts a Bazel target to a list of CMake targets.
+        return self._convert_unmatched_target(target)
 
-    IREE targets are expected to follow a standard form between Bazel and CMake
-    that facilitates conversion. External targets *may* have their own patterns,
-    or they may be purely special cases.
-
-    Multiple target in Bazel may map to a single target in CMake and a Bazel
-    target may map to multiple CMake targets.
-
-    Returns:
-      A list of converted targets if it was successfully converted.
-
-    Raises:
-      KeyError: No conversion was found for the target.
-    """
-    iree_core_repo = self._repo_alias("@iree_core")
-    if target in self._explicit_target_mapping:
-      return self._explicit_target_mapping[target]
-    if target.startswith("@llvm-project//llvm"):
-      return self._convert_llvm_target(target)
-    if target.startswith("@llvm-project//mlir"):
-      return self._convert_mlir_target(target)
-    if target.startswith("@iree_cuda//"):
-      return self._convert_iree_cuda_target(target)
-    if target.startswith(f"{iree_core_repo}//"):
-      return self._convert_iree_core_target(target)
-    if target.startswith("@"):
-      raise KeyError(f"No conversion found for target '{target}'")
-
-    # Pass through package-relative targets
-    #   :target_name
-    #   file_name.txt
-    if target.startswith(":") or (":" not in target and
-                                  not target.startswith("/")):
-      return [self._convert_to_cmake_path(target)]
-
-    return self._convert_unmatched_target(target)
-
-  def _convert_iree_core_target(self, target):
-    iree_core_repo = self._repo_alias("@iree_core")
-    if target.startswith(
-        f"{iree_core_repo}//llvm-external-projects/iree-dialects"):
-      return self._convert_iree_dialects_target(target)
-
-    # IREE root paths map to package names based on explicit rules.
-    #   * src/iree/ directories (compiler/src/iree/ and runtime/src/iree/)
-    #     creating their own root paths by trimming down to just "iree"
-    #   * tools/ uses an empty root, for binary targets names like "iree-compile"
-    #   * other top level directories add back an 'iree' prefix
-    # If changing these, make the corresponding change in iree_macros.cmake
-    # (iree_package_ns function).
-
-    # Map //compiler/src/iree/(.*) -> iree::\1 (i.e. iree::compiler::\1)
-    m = re.match(f"^{iree_core_repo}//compiler/src/iree/(.+)", target)
-    if m:
-      return ["iree::" + self._convert_to_cmake_path(m.group(1))]
-
-    # Map //runtime/src/iree/(.*) -> iree::\1
-    m = re.match(f"^{iree_core_repo}//runtime/src/iree/(.+)", target)
-    if m:
-      return ["iree::" + self._convert_to_cmake_path(m.group(1))]
-
-    # Map //tools/(.*) -> \1
-    m = re.match(f"^{iree_core_repo}//tools[/|:](.+)", target)
-    if m:
-      return [self._convert_to_cmake_path(m.group(1))]
-
-    return self._convert_unmatched_target(target)
-
-  def _convert_unmatched_target(self, target: str) -> str:
-    """Converts unmatched targets in a repo specific way."""
-    raise ValueError(f"No target matching for {target}")
+    def _convert_unmatched_target(self, target: str) -> str:
+        """Converts unmatched targets in a repo specific way."""
+        raise ValueError(f"No target matching for {target}")
diff --git a/build_tools/benchmarks/benchmark_helper.py b/build_tools/benchmarks/benchmark_helper.py
index 847c94e..5a5d377 100755
--- a/build_tools/benchmarks/benchmark_helper.py
+++ b/build_tools/benchmarks/benchmark_helper.py
@@ -27,159 +27,179 @@
 
 
 def _convert_to_cmd_string(cmds: Sequence[str]) -> str:
-  if os.name == "nt":
-    # list2cmdline is an undocumented method for Windows command lines. Python
-    # doesn't provide an official method for quoting Windows command lines and
-    # the correct implementation is slightly non-trivial. Use the undocumented
-    # method for now and can be rewritten with our own implementation later.
-    # See https://learn.microsoft.com/en-us/archive/blogs/twistylittlepassagesallalike/everyone-quotes-command-line-arguments-the-wrong-way
-    return subprocess.list2cmdline(cmds)
+    if os.name == "nt":
+        # list2cmdline is an undocumented method for Windows command lines. Python
+        # doesn't provide an official method for quoting Windows command lines and
+        # the correct implementation is slightly non-trivial. Use the undocumented
+        # method for now and can be rewritten with our own implementation later.
+        # See https://learn.microsoft.com/en-us/archive/blogs/twistylittlepassagesallalike/everyone-quotes-command-line-arguments-the-wrong-way
+        return subprocess.list2cmdline(cmds)
 
-  return " ".join(shlex.quote(cmd) for cmd in cmds)
+    return " ".join(shlex.quote(cmd) for cmd in cmds)
 
 
 def _dump_cmds_of_generation_config(
     gen_config: iree_definitions.ModuleGenerationConfig,
-    root_path: pathlib.PurePath = pathlib.PurePath()):
-
-  imported_model = gen_config.imported_model
-  imported_model_path = iree_artifacts.get_imported_model_path(
-      imported_model=imported_model, root_path=root_path)
-  module_dir_path = iree_artifacts.get_module_dir_path(
-      module_generation_config=gen_config, root_path=root_path)
-  module_path = module_dir_path / iree_artifacts.MODULE_FILENAME
-  compile_cmds = [
-      IREE_COMPILER_NAME,
-      str(imported_model_path), "-o",
-      str(module_path)
-  ]
-  compile_cmds += gen_config.materialize_compile_flags(
-      module_dir_path=module_dir_path)
-  compile_cmd_str = _convert_to_cmd_string(compile_cmds)
-
-  if imported_model.import_config.tool == iree_definitions.ImportTool.NONE:
-    import_cmd_str = "# (Source model is already in MLIR)"
-  else:
-    source_model_path = model_artifacts.get_model_path(
-        model=imported_model.model, root_path=root_path)
-    import_cmds = [
-        imported_model.import_config.tool.value,
-        str(source_model_path), "-o",
-        str(imported_model_path)
+    root_path: pathlib.PurePath = pathlib.PurePath(),
+):
+    imported_model = gen_config.imported_model
+    imported_model_path = iree_artifacts.get_imported_model_path(
+        imported_model=imported_model, root_path=root_path
+    )
+    module_dir_path = iree_artifacts.get_module_dir_path(
+        module_generation_config=gen_config, root_path=root_path
+    )
+    module_path = module_dir_path / iree_artifacts.MODULE_FILENAME
+    compile_cmds = [
+        IREE_COMPILER_NAME,
+        str(imported_model_path),
+        "-o",
+        str(module_path),
     ]
-    import_cmds += imported_model.import_config.materialize_import_flags(
-        model=imported_model.model)
-    import_cmd_str = _convert_to_cmd_string(import_cmds)
+    compile_cmds += gen_config.materialize_compile_flags(
+        module_dir_path=module_dir_path
+    )
+    compile_cmd_str = _convert_to_cmd_string(compile_cmds)
 
-  # Insert a blank line after each command to help read with line wrap.
-  return [
-      "Compile Module:", compile_cmd_str, "", "Import Model:", import_cmd_str,
-      ""
-  ]
+    if imported_model.import_config.tool == iree_definitions.ImportTool.NONE:
+        import_cmd_str = "# (Source model is already in MLIR)"
+    else:
+        source_model_path = model_artifacts.get_model_path(
+            model=imported_model.model, root_path=root_path
+        )
+        import_cmds = [
+            imported_model.import_config.tool.value,
+            str(source_model_path),
+            "-o",
+            str(imported_model_path),
+        ]
+        import_cmds += imported_model.import_config.materialize_import_flags(
+            model=imported_model.model
+        )
+        import_cmd_str = _convert_to_cmd_string(import_cmds)
+
+    # Insert a blank line after each command to help read with line wrap.
+    return ["Compile Module:", compile_cmd_str, "", "Import Model:", import_cmd_str, ""]
 
 
 def _dump_cmds_from_run_config(
     run_config: iree_definitions.E2EModelRunConfig,
-    root_path: pathlib.PurePath = pathlib.PurePath()):
+    root_path: pathlib.PurePath = pathlib.PurePath(),
+):
+    gen_config = run_config.module_generation_config
+    module_path = (
+        iree_artifacts.get_module_dir_path(
+            module_generation_config=gen_config, root_path=root_path
+        )
+        / iree_artifacts.MODULE_FILENAME
+    )
 
-  gen_config = run_config.module_generation_config
-  module_path = iree_artifacts.get_module_dir_path(
-      module_generation_config=gen_config,
-      root_path=root_path) / iree_artifacts.MODULE_FILENAME
-
-  run_cmds = [run_config.tool.value, f"--module={module_path}"]
-  run_cmds += run_config.materialize_run_flags()
-  # Insert a blank line after the command to help read with line wrap.
-  lines = ["Run Module:", _convert_to_cmd_string(run_cmds), ""]
-  lines += _dump_cmds_of_generation_config(gen_config=gen_config,
-                                           root_path=root_path)
-  return lines
+    run_cmds = [run_config.tool.value, f"--module={module_path}"]
+    run_cmds += run_config.materialize_run_flags()
+    # Insert a blank line after the command to help read with line wrap.
+    lines = ["Run Module:", _convert_to_cmd_string(run_cmds), ""]
+    lines += _dump_cmds_of_generation_config(gen_config=gen_config, root_path=root_path)
+    return lines
 
 
-def _dump_cmds_handler(e2e_test_artifacts_dir: pathlib.Path,
-                       execution_benchmark_config: Optional[pathlib.Path],
-                       compilation_benchmark_config: Optional[pathlib.Path],
-                       benchmark_id: Optional[str], **_unused_args):
-  lines = []
+def _dump_cmds_handler(
+    e2e_test_artifacts_dir: pathlib.Path,
+    execution_benchmark_config: Optional[pathlib.Path],
+    compilation_benchmark_config: Optional[pathlib.Path],
+    benchmark_id: Optional[str],
+    **_unused_args,
+):
+    lines = []
 
-  if execution_benchmark_config is not None:
-    benchmark_groups = json.loads(execution_benchmark_config.read_text())
-    for target_device, benchmark_group in benchmark_groups.items():
-      run_configs = serialization.unpack_and_deserialize(
-          data=benchmark_group["run_configs"],
-          root_type=List[iree_definitions.E2EModelRunConfig])
-      for run_config in run_configs:
-        if benchmark_id is not None and benchmark_id != run_config.composite_id:
-          continue
+    if execution_benchmark_config is not None:
+        benchmark_groups = json.loads(execution_benchmark_config.read_text())
+        for target_device, benchmark_group in benchmark_groups.items():
+            run_configs = serialization.unpack_and_deserialize(
+                data=benchmark_group["run_configs"],
+                root_type=List[iree_definitions.E2EModelRunConfig],
+            )
+            for run_config in run_configs:
+                if benchmark_id is not None and benchmark_id != run_config.composite_id:
+                    continue
 
-        lines.append("################")
-        lines.append("")
-        lines.append(f"Execution Benchmark ID: {run_config.composite_id}")
-        lines.append(f"Name: {run_config}")
-        lines.append(f"Target Device: {target_device}")
-        lines.append("")
-        lines += _dump_cmds_from_run_config(run_config=run_config,
-                                            root_path=e2e_test_artifacts_dir)
+                lines.append("################")
+                lines.append("")
+                lines.append(f"Execution Benchmark ID: {run_config.composite_id}")
+                lines.append(f"Name: {run_config}")
+                lines.append(f"Target Device: {target_device}")
+                lines.append("")
+                lines += _dump_cmds_from_run_config(
+                    run_config=run_config, root_path=e2e_test_artifacts_dir
+                )
 
-  if compilation_benchmark_config is not None:
-    benchmark_config = json.loads(compilation_benchmark_config.read_text())
-    gen_configs = serialization.unpack_and_deserialize(
-        data=benchmark_config["generation_configs"],
-        root_type=List[iree_definitions.ModuleGenerationConfig])
-    for gen_config in gen_configs:
-      if benchmark_id is not None and benchmark_id != gen_config.composite_id:
-        continue
+    if compilation_benchmark_config is not None:
+        benchmark_config = json.loads(compilation_benchmark_config.read_text())
+        gen_configs = serialization.unpack_and_deserialize(
+            data=benchmark_config["generation_configs"],
+            root_type=List[iree_definitions.ModuleGenerationConfig],
+        )
+        for gen_config in gen_configs:
+            if benchmark_id is not None and benchmark_id != gen_config.composite_id:
+                continue
 
-      lines.append("################")
-      lines.append("")
-      lines.append(f"Compilation Benchmark ID: {gen_config.composite_id}")
-      lines.append(f"Name: {gen_config}")
-      lines.append("")
-      lines += _dump_cmds_of_generation_config(gen_config=gen_config,
-                                               root_path=e2e_test_artifacts_dir)
+            lines.append("################")
+            lines.append("")
+            lines.append(f"Compilation Benchmark ID: {gen_config.composite_id}")
+            lines.append(f"Name: {gen_config}")
+            lines.append("")
+            lines += _dump_cmds_of_generation_config(
+                gen_config=gen_config, root_path=e2e_test_artifacts_dir
+            )
 
-  print(*lines, sep="\n")
+    print(*lines, sep="\n")
 
 
 def _parse_arguments() -> argparse.Namespace:
-  parser = argparse.ArgumentParser(
-      description=
-      "Miscellaneous tool to help work with benchmark suite and benchmark CI.")
+    parser = argparse.ArgumentParser(
+        description="Miscellaneous tool to help work with benchmark suite and benchmark CI."
+    )
 
-  subparser = parser.add_subparsers(required=True, title="operation")
-  dump_cmds_parser = subparser.add_parser(
-      "dump-cmds",
-      help="Dump the commands to compile and run benchmarks manually.")
-  dump_cmds_parser.add_argument(
-      "--e2e_test_artifacts_dir",
-      type=pathlib.PurePath,
-      default=pathlib.Path(),
-      help="E2E test artifacts root path used in the outputs of artifact paths")
-  dump_cmds_parser.add_argument("--benchmark_id",
-                                type=str,
-                                help="Only dump the benchmark with this id")
-  dump_cmds_parser.add_argument(
-      "--execution_benchmark_config",
-      type=pathlib.Path,
-      help="Config file exported from export_benchmark_config.py execution")
-  dump_cmds_parser.add_argument(
-      "--compilation_benchmark_config",
-      type=pathlib.Path,
-      help="Config file exported from export_benchmark_config.py compilation")
-  dump_cmds_parser.set_defaults(handler=_dump_cmds_handler)
+    subparser = parser.add_subparsers(required=True, title="operation")
+    dump_cmds_parser = subparser.add_parser(
+        "dump-cmds", help="Dump the commands to compile and run benchmarks manually."
+    )
+    dump_cmds_parser.add_argument(
+        "--e2e_test_artifacts_dir",
+        type=pathlib.PurePath,
+        default=pathlib.Path(),
+        help="E2E test artifacts root path used in the outputs of artifact paths",
+    )
+    dump_cmds_parser.add_argument(
+        "--benchmark_id", type=str, help="Only dump the benchmark with this id"
+    )
+    dump_cmds_parser.add_argument(
+        "--execution_benchmark_config",
+        type=pathlib.Path,
+        help="Config file exported from export_benchmark_config.py execution",
+    )
+    dump_cmds_parser.add_argument(
+        "--compilation_benchmark_config",
+        type=pathlib.Path,
+        help="Config file exported from export_benchmark_config.py compilation",
+    )
+    dump_cmds_parser.set_defaults(handler=_dump_cmds_handler)
 
-  args = parser.parse_args()
-  if (args.execution_benchmark_config is None and
-      args.compilation_benchmark_config is None):
-    parser.error("At least one of --execution_benchmark_config or "
-                 "--compilation_benchmark_config must be set.")
+    args = parser.parse_args()
+    if (
+        args.execution_benchmark_config is None
+        and args.compilation_benchmark_config is None
+    ):
+        parser.error(
+            "At least one of --execution_benchmark_config or "
+            "--compilation_benchmark_config must be set."
+        )
 
-  return args
+    return args
 
 
 def main(args: argparse.Namespace):
-  args.handler(**vars(args))
+    args.handler(**vars(args))
 
 
 if __name__ == "__main__":
-  main(_parse_arguments())
+    main(_parse_arguments())
diff --git a/build_tools/benchmarks/collect_compilation_statistics.py b/build_tools/benchmarks/collect_compilation_statistics.py
index 19c80e9..057368d 100755
--- a/build_tools/benchmarks/collect_compilation_statistics.py
+++ b/build_tools/benchmarks/collect_compilation_statistics.py
@@ -26,7 +26,13 @@
 from typing import BinaryIO, Dict, List, Optional, TextIO
 
 from common import benchmark_definition
-from common.benchmark_definition import CompilationInfo, CompilationResults, CompilationStatistics, ModuleComponentSizes, get_git_commit_hash
+from common.benchmark_definition import (
+    CompilationInfo,
+    CompilationResults,
+    CompilationStatistics,
+    ModuleComponentSizes,
+    get_git_commit_hash,
+)
 from common import benchmark_config
 from e2e_test_artifacts import iree_artifacts
 from e2e_test_framework import serialization
@@ -52,208 +58,226 @@
 
 @dataclass(frozen=True)
 class ModuleInfo(object):
-  module_path: pathlib.Path
-  stream_stats_path: pathlib.Path
+    module_path: pathlib.Path
+    stream_stats_path: pathlib.Path
 
 
 def match_module_cmake_target(module_path: pathlib.PurePath) -> Optional[str]:
-  if module_path.match(f"{E2E_TEST_ARTIFACTS_REL_PATH}/iree_*/"
-                       f"{iree_artifacts.MODULE_FILENAME}"):
-    # <e2e test artifacts dir>/iree_<module dir>/<module filename>
-    path_parts = module_path.parts[-3:]
-    # Join to get the CMake target name. This is *not* a filesystem path, so we
-    # don't want \ separators on Windows that we would get with os.path.join().
-    return '/'.join(path_parts)
+    if module_path.match(
+        f"{E2E_TEST_ARTIFACTS_REL_PATH}/iree_*/" f"{iree_artifacts.MODULE_FILENAME}"
+    ):
+        # <e2e test artifacts dir>/iree_<module dir>/<module filename>
+        path_parts = module_path.parts[-3:]
+        # Join to get the CMake target name. This is *not* a filesystem path, so we
+        # don't want \ separators on Windows that we would get with os.path.join().
+        return "/".join(path_parts)
 
-  return None
+    return None
 
 
 def parse_compilation_time_from_ninja_log(log: TextIO) -> Dict[str, int]:
-  """Retrieve the compilation time (ms) from the Ninja build log.
+    """Retrieve the compilation time (ms) from the Ninja build log.
 
-  Returns:
-    Map of target name and compilation time in ms.
-  """
+    Returns:
+      Map of target name and compilation time in ms.
+    """
 
-  target_build_time_map = {}
-  header = log.readline()
-  if NINJA_LOG_HEADER not in header:
-    raise NotImplementedError(f"Unsupported ninja log version: {header}")
+    target_build_time_map = {}
+    header = log.readline()
+    if NINJA_LOG_HEADER not in header:
+        raise NotImplementedError(f"Unsupported ninja log version: {header}")
 
-  for line in log:
-    start_time, end_time, _, target, _ = line.strip().split("\t")
-    cmake_target = match_module_cmake_target(pathlib.PurePath(target))
-    if cmake_target is None:
-      continue
+    for line in log:
+        start_time, end_time, _, target, _ = line.strip().split("\t")
+        cmake_target = match_module_cmake_target(pathlib.PurePath(target))
+        if cmake_target is None:
+            continue
 
-    start_time = int(start_time)
-    end_time = int(end_time)
-    target_build_time_map[cmake_target] = end_time - start_time
+        start_time = int(start_time)
+        end_time = int(end_time)
+        target_build_time_map[cmake_target] = end_time - start_time
 
-  return target_build_time_map
+    return target_build_time_map
 
 
-def get_module_component_info(module: BinaryIO,
-                              module_file_bytes: int) -> ModuleComponentSizes:
-  with zipfile.ZipFile(module) as module_zipfile:
-    size_map = dict(
-        (info.filename, info.file_size) for info in module_zipfile.infolist())
+def get_module_component_info(
+    module: BinaryIO, module_file_bytes: int
+) -> ModuleComponentSizes:
+    with zipfile.ZipFile(module) as module_zipfile:
+        size_map = dict(
+            (info.filename, info.file_size) for info in module_zipfile.infolist()
+        )
 
-  identified_names = set()
-  if VM_COMPONENT_NAME in size_map:
-    vm_component_bytes = size_map[VM_COMPONENT_NAME]
-    identified_names.add(VM_COMPONENT_NAME)
-  else:
-    vm_component_bytes = 0
+    identified_names = set()
+    if VM_COMPONENT_NAME in size_map:
+        vm_component_bytes = size_map[VM_COMPONENT_NAME]
+        identified_names.add(VM_COMPONENT_NAME)
+    else:
+        vm_component_bytes = 0
 
-  if CONST_COMPONENT_NAME in size_map:
-    const_component_bytes = size_map[CONST_COMPONENT_NAME]
-    identified_names.add(CONST_COMPONENT_NAME)
-  else:
-    const_component_bytes = 0
+    if CONST_COMPONENT_NAME in size_map:
+        const_component_bytes = size_map[CONST_COMPONENT_NAME]
+        identified_names.add(CONST_COMPONENT_NAME)
+    else:
+        const_component_bytes = 0
 
-  total_dispatch_component_bytes = 0
-  for filename, size in size_map.items():
-    for pattern in DISPATCH_COMPONENT_PATTERNS:
-      if re.match(pattern, filename):
-        total_dispatch_component_bytes += size
-        identified_names.add(filename)
-        break
+    total_dispatch_component_bytes = 0
+    for filename, size in size_map.items():
+        for pattern in DISPATCH_COMPONENT_PATTERNS:
+            if re.match(pattern, filename):
+                total_dispatch_component_bytes += size
+                identified_names.add(filename)
+                break
 
-  if identified_names != set(size_map.keys()):
-    raise RuntimeError(
-        f"Unrecognized components in the module: {size_map.keys()}.")
+    if identified_names != set(size_map.keys()):
+        raise RuntimeError(f"Unrecognized components in the module: {size_map.keys()}.")
 
-  return ModuleComponentSizes(
-      file_bytes=module_file_bytes,
-      vm_component_bytes=vm_component_bytes,
-      const_component_bytes=const_component_bytes,
-      total_dispatch_component_bytes=total_dispatch_component_bytes)
+    return ModuleComponentSizes(
+        file_bytes=module_file_bytes,
+        vm_component_bytes=vm_component_bytes,
+        const_component_bytes=const_component_bytes,
+        total_dispatch_component_bytes=total_dispatch_component_bytes,
+    )
 
 
 def get_module_map_from_compilation_benchmark_config(
-    compilation_benchmark_config_data: TextIO,
-    e2e_test_artifacts_dir: pathlib.PurePath
+    compilation_benchmark_config_data: TextIO, e2e_test_artifacts_dir: pathlib.PurePath
 ) -> Dict[CompilationInfo, ModuleInfo]:
-  benchmark_config = json.load(compilation_benchmark_config_data)
-  gen_configs = serialization.unpack_and_deserialize(
-      data=benchmark_config["generation_configs"],
-      root_type=List[iree_definitions.ModuleGenerationConfig])
-  module_map = {}
-  for gen_config in gen_configs:
-    model = gen_config.imported_model.model
-    compile_config = gen_config.compile_config
-    target_archs = []
-    for compile_target in compile_config.compile_targets:
-      arch = compile_target.target_architecture
-      target_archs.append(
-          (f"{arch.type.value}-{arch.architecture}-{arch.microarchitecture}-"
-           f"{compile_target.target_abi.value}"))
-    compilation_info = CompilationInfo(
-        name=gen_config.name,
-        model_name=model.name,
-        model_tags=tuple(model.tags),
-        model_source=model.source_type.value,
-        target_arch=f"[{','.join(target_archs)}]",
-        compile_tags=tuple(compile_config.tags),
-        gen_config_id=gen_config.composite_id)
-    module_dir_path = pathlib.Path(
-        iree_artifacts.get_module_dir_path(module_generation_config=gen_config,
-                                           root_path=e2e_test_artifacts_dir))
-    module_path = module_dir_path / iree_artifacts.MODULE_FILENAME
-    stream_stats_path = (module_dir_path /
-                         iree_artifacts.SCHEDULING_STATS_FILENAME)
-    module_map[compilation_info] = ModuleInfo(
-        module_path=module_path, stream_stats_path=stream_stats_path)
+    benchmark_config = json.load(compilation_benchmark_config_data)
+    gen_configs = serialization.unpack_and_deserialize(
+        data=benchmark_config["generation_configs"],
+        root_type=List[iree_definitions.ModuleGenerationConfig],
+    )
+    module_map = {}
+    for gen_config in gen_configs:
+        model = gen_config.imported_model.model
+        compile_config = gen_config.compile_config
+        target_archs = []
+        for compile_target in compile_config.compile_targets:
+            arch = compile_target.target_architecture
+            target_archs.append(
+                (
+                    f"{arch.type.value}-{arch.architecture}-{arch.microarchitecture}-"
+                    f"{compile_target.target_abi.value}"
+                )
+            )
+        compilation_info = CompilationInfo(
+            name=gen_config.name,
+            model_name=model.name,
+            model_tags=tuple(model.tags),
+            model_source=model.source_type.value,
+            target_arch=f"[{','.join(target_archs)}]",
+            compile_tags=tuple(compile_config.tags),
+            gen_config_id=gen_config.composite_id,
+        )
+        module_dir_path = pathlib.Path(
+            iree_artifacts.get_module_dir_path(
+                module_generation_config=gen_config, root_path=e2e_test_artifacts_dir
+            )
+        )
+        module_path = module_dir_path / iree_artifacts.MODULE_FILENAME
+        stream_stats_path = module_dir_path / iree_artifacts.SCHEDULING_STATS_FILENAME
+        module_map[compilation_info] = ModuleInfo(
+            module_path=module_path, stream_stats_path=stream_stats_path
+        )
 
-  return module_map
+    return module_map
 
 
 def _check_dir_path(path_str: str) -> pathlib.Path:
-  path = pathlib.Path(path_str)
-  if not path.is_dir():
-    raise argparse.ArgumentTypeError(f"{path} is not a directory.")
-  return path
+    path = pathlib.Path(path_str)
+    if not path.is_dir():
+        raise argparse.ArgumentTypeError(f"{path} is not a directory.")
+    return path
 
 
 def _check_file_path(path_str: str) -> pathlib.Path:
-  path = pathlib.Path(path_str)
-  if not path.is_file():
-    raise argparse.ArgumentTypeError(f"{path} is not a file.")
-  return path
+    path = pathlib.Path(path_str)
+    if not path.is_file():
+        raise argparse.ArgumentTypeError(f"{path} is not a file.")
+    return path
 
 
 def _parse_arguments():
-  """Returns an argument parser with common options."""
+    """Returns an argument parser with common options."""
 
-  parser = argparse.ArgumentParser(
-      description="Collect compilation statistics from benchmark suites.")
-  parser.add_argument(
-      "--compilation_benchmark_config",
-      type=_check_file_path,
-      required=True,
-      help="Exported compilation benchmark config of e2e test artifacts.")
-  parser.add_argument("--build_log",
-                      type=_check_file_path,
-                      required=True,
-                      help="Path to the ninja build log.")
-  parser.add_argument("--e2e_test_artifacts_dir",
-                      type=_check_dir_path,
-                      required=True,
-                      help="Path to the e2e test artifacts directory.")
-  parser.add_argument("--output",
-                      type=pathlib.Path,
-                      help="Path to output JSON file.")
+    parser = argparse.ArgumentParser(
+        description="Collect compilation statistics from benchmark suites."
+    )
+    parser.add_argument(
+        "--compilation_benchmark_config",
+        type=_check_file_path,
+        required=True,
+        help="Exported compilation benchmark config of e2e test artifacts.",
+    )
+    parser.add_argument(
+        "--build_log",
+        type=_check_file_path,
+        required=True,
+        help="Path to the ninja build log.",
+    )
+    parser.add_argument(
+        "--e2e_test_artifacts_dir",
+        type=_check_dir_path,
+        required=True,
+        help="Path to the e2e test artifacts directory.",
+    )
+    parser.add_argument("--output", type=pathlib.Path, help="Path to output JSON file.")
 
-  return parser.parse_args()
+    return parser.parse_args()
 
 
 def main(args: argparse.Namespace):
-  config_data = args.compilation_benchmark_config.open("r")
-  module_map = get_module_map_from_compilation_benchmark_config(
-      compilation_benchmark_config_data=config_data,
-      e2e_test_artifacts_dir=args.e2e_test_artifacts_dir)
-  build_log_path = args.build_log
+    config_data = args.compilation_benchmark_config.open("r")
+    module_map = get_module_map_from_compilation_benchmark_config(
+        compilation_benchmark_config_data=config_data,
+        e2e_test_artifacts_dir=args.e2e_test_artifacts_dir,
+    )
+    build_log_path = args.build_log
 
-  with build_log_path.open("r") as log_file:
-    target_build_time_map = parse_compilation_time_from_ninja_log(log_file)
+    with build_log_path.open("r") as log_file:
+        target_build_time_map = parse_compilation_time_from_ninja_log(log_file)
 
-  compilation_statistics_list = []
-  for compilation_info, module_info in module_map.items():
-    module_path = module_info.module_path
-    with module_path.open("rb") as module_file:
-      module_component_sizes = get_module_component_info(
-          module_file,
-          module_path.stat().st_size)
+    compilation_statistics_list = []
+    for compilation_info, module_info in module_map.items():
+        module_path = module_info.module_path
+        with module_path.open("rb") as module_file:
+            module_component_sizes = get_module_component_info(
+                module_file, module_path.stat().st_size
+            )
 
-    cmake_target = match_module_cmake_target(module_path)
-    if cmake_target is None:
-      raise RuntimeError(
-          f"Module path isn't a module cmake target: {module_path}")
-    compilation_time_ms = target_build_time_map[cmake_target]
+        cmake_target = match_module_cmake_target(module_path)
+        if cmake_target is None:
+            raise RuntimeError(
+                f"Module path isn't a module cmake target: {module_path}"
+            )
+        compilation_time_ms = target_build_time_map[cmake_target]
 
-    stream_stats_json = json.loads(module_info.stream_stats_path.read_text())
-    exec_stats_json = stream_stats_json["stream-aggregate"]["execution"]
-    ir_stats = benchmark_definition.IRStatistics(
-        stream_dispatch_count=exec_stats_json["dispatch-count"])
+        stream_stats_json = json.loads(module_info.stream_stats_path.read_text())
+        exec_stats_json = stream_stats_json["stream-aggregate"]["execution"]
+        ir_stats = benchmark_definition.IRStatistics(
+            stream_dispatch_count=exec_stats_json["dispatch-count"]
+        )
 
-    compilation_statistics = CompilationStatistics(
-        compilation_info=compilation_info,
-        module_component_sizes=module_component_sizes,
-        compilation_time_ms=compilation_time_ms,
-        ir_stats=ir_stats)
-    compilation_statistics_list.append(compilation_statistics)
+        compilation_statistics = CompilationStatistics(
+            compilation_info=compilation_info,
+            module_component_sizes=module_component_sizes,
+            compilation_time_ms=compilation_time_ms,
+            ir_stats=ir_stats,
+        )
+        compilation_statistics_list.append(compilation_statistics)
 
-  commit = get_git_commit_hash("HEAD")
-  compilation_results = CompilationResults(
-      commit=commit, compilation_statistics=compilation_statistics_list)
+    commit = get_git_commit_hash("HEAD")
+    compilation_results = CompilationResults(
+        commit=commit, compilation_statistics=compilation_statistics_list
+    )
 
-  json_output = json.dumps(asdict(compilation_results), indent=2)
-  if args.output is None:
-    print(json_output)
-  else:
-    args.output.write_text(json_output)
+    json_output = json.dumps(asdict(compilation_results), indent=2)
+    if args.output is None:
+        print(json_output)
+    else:
+        args.output.write_text(json_output)
 
 
 if __name__ == "__main__":
-  main(_parse_arguments())
+    main(_parse_arguments())
diff --git a/build_tools/benchmarks/collect_compilation_statistics_test.py b/build_tools/benchmarks/collect_compilation_statistics_test.py
index 6328757..d2aff95 100644
--- a/build_tools/benchmarks/collect_compilation_statistics_test.py
+++ b/build_tools/benchmarks/collect_compilation_statistics_test.py
@@ -12,7 +12,12 @@
 import zipfile
 
 from common.benchmark_definition import ModuleComponentSizes
-from collect_compilation_statistics import CONST_COMPONENT_NAME, VM_COMPONENT_NAME, get_module_component_info, parse_compilation_time_from_ninja_log
+from collect_compilation_statistics import (
+    CONST_COMPONENT_NAME,
+    VM_COMPONENT_NAME,
+    get_module_component_info,
+    parse_compilation_time_from_ninja_log,
+)
 from e2e_test_artifacts import iree_artifacts
 from e2e_test_framework import serialization
 from e2e_test_framework.definitions import common_definitions, iree_definitions
@@ -21,140 +26,161 @@
 
 
 class CollectCompilationStatistics(unittest.TestCase):
+    def test_match_module_cmake_target_with_e2e_test_artifacts(self):
+        target = collect_compilation_statistics.match_module_cmake_target(
+            pathlib.PurePath("e2e_test_artifacts/iree_abcd/module.vmfb")
+        )
 
-  def test_match_module_cmake_target_with_e2e_test_artifacts(self):
-    target = collect_compilation_statistics.match_module_cmake_target(
-        pathlib.PurePath("e2e_test_artifacts/iree_abcd/module.vmfb"))
+        self.assertEqual(target, "e2e_test_artifacts/iree_abcd/module.vmfb")
 
-    self.assertEqual(target, "e2e_test_artifacts/iree_abcd/module.vmfb")
+    def test_match_module_cmake_target_not_match(self):
+        target = collect_compilation_statistics.match_module_cmake_target(
+            pathlib.PurePath("other/target.vmfb")
+        )
 
-  def test_match_module_cmake_target_not_match(self):
-    target = collect_compilation_statistics.match_module_cmake_target(
-        pathlib.PurePath("other/target.vmfb"))
+        self.assertIsNone(target)
 
-    self.assertIsNone(target)
+    def test_parse_compilation_time_from_ninja_log(self):
+        target1 = "e2e_test_artifacts/iree_deeplabv3/module.vmfb"
+        target2 = "e2e_test_artifacts/iree_mobilessd/module.vmfb"
+        ninja_log = StringIO(
+            "# ninja log v5\n"
+            f"0\t100\taaa\tbuild/{target1}\taaa\n"
+            f"130\t200\tbbb\tbuild/{target2}\tbbb\n"
+        )
 
-  def test_parse_compilation_time_from_ninja_log(self):
-    target1 = "e2e_test_artifacts/iree_deeplabv3/module.vmfb"
-    target2 = "e2e_test_artifacts/iree_mobilessd/module.vmfb"
-    ninja_log = StringIO("# ninja log v5\n"
-                         f"0\t100\taaa\tbuild/{target1}\taaa\n"
-                         f"130\t200\tbbb\tbuild/{target2}\tbbb\n")
+        target_map = parse_compilation_time_from_ninja_log(ninja_log)
 
-    target_map = parse_compilation_time_from_ninja_log(ninja_log)
+        self.assertEqual(target_map, {target1: 100, target2: 70})
 
-    self.assertEqual(target_map, {target1: 100, target2: 70})
+    def test_get_module_component_info(self):
+        module_file = BytesIO()
+        with zipfile.ZipFile(module_file, "w") as zip:
+            zip.writestr(VM_COMPONENT_NAME, b"abcd")
+            zip.writestr(CONST_COMPONENT_NAME, b"123")
+            zip.writestr("main_dispatch_0_vulkan_spirv_fb.fb", b"bindata0")
+            zip.writestr("main_dispatch_1_vulkan_spirv_fb.fb", b"bindata1")
+            zip.writestr("predict_dispatch_2_cuda_nvptx_fb.fb", b"bindata2")
+            zip.writestr("dispatch_3_embedded_elf_x86_64.so", b"bindata3")
+        module_file_data = module_file.getvalue()
 
-  def test_get_module_component_info(self):
-    module_file = BytesIO()
-    with zipfile.ZipFile(module_file, "w") as zip:
-      zip.writestr(VM_COMPONENT_NAME, b"abcd")
-      zip.writestr(CONST_COMPONENT_NAME, b"123")
-      zip.writestr("main_dispatch_0_vulkan_spirv_fb.fb", b"bindata0")
-      zip.writestr("main_dispatch_1_vulkan_spirv_fb.fb", b"bindata1")
-      zip.writestr("predict_dispatch_2_cuda_nvptx_fb.fb", b"bindata2")
-      zip.writestr("dispatch_3_embedded_elf_x86_64.so", b"bindata3")
-    module_file_data = module_file.getvalue()
+        component_sizes = get_module_component_info(
+            BytesIO(module_file_data), len(module_file_data)
+        )
 
-    component_sizes = get_module_component_info(BytesIO(module_file_data),
-                                                len(module_file_data))
+        self.assertEqual(
+            component_sizes,
+            ModuleComponentSizes(
+                file_bytes=len(module_file_data),
+                vm_component_bytes=4,
+                const_component_bytes=3,
+                total_dispatch_component_bytes=32,
+            ),
+        )
 
-    self.assertEqual(
-        component_sizes,
-        ModuleComponentSizes(file_bytes=len(module_file_data),
-                             vm_component_bytes=4,
-                             const_component_bytes=3,
-                             total_dispatch_component_bytes=32))
+    def test_get_module_component_info_unknown_components(self):
+        module_file = BytesIO()
+        with zipfile.ZipFile(module_file, "w") as zip:
+            zip.writestr(VM_COMPONENT_NAME, b"abcd")
+            zip.writestr(CONST_COMPONENT_NAME, b"123")
+            zip.writestr("main_dispatch_0_unknown.fb", b"bindata")
+        module_file_data = module_file.getvalue()
 
-  def test_get_module_component_info_unknown_components(self):
-    module_file = BytesIO()
-    with zipfile.ZipFile(module_file, "w") as zip:
-      zip.writestr(VM_COMPONENT_NAME, b"abcd")
-      zip.writestr(CONST_COMPONENT_NAME, b"123")
-      zip.writestr("main_dispatch_0_unknown.fb", b"bindata")
-    module_file_data = module_file.getvalue()
+        self.assertRaises(
+            RuntimeError,
+            lambda: get_module_component_info(
+                BytesIO(module_file_data), len(module_file_data)
+            ),
+        )
 
-    self.assertRaises(
-        RuntimeError, lambda: get_module_component_info(
-            BytesIO(module_file_data), len(module_file_data)))
+    def test_get_module_map_from_compilation_benchmark_config(self):
+        model_a = common_definitions.Model(
+            id="1234",
+            name="tflite_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="https://example.com/xyz.tflite",
+            entry_function="main",
+            input_types=["1xf32"],
+        )
+        imported_model_a = iree_definitions.ImportedModel.from_model(model_a)
+        compile_config_a = iree_definitions.CompileConfig.build(
+            id="config_a",
+            tags=["defaults"],
+            compile_targets=[
+                iree_definitions.CompileTarget(
+                    target_architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+                    target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+                    target_abi=iree_definitions.TargetABI.LINUX_GNU,
+                )
+            ],
+        )
+        compile_config_b = iree_definitions.CompileConfig.build(
+            id="config_b",
+            tags=["defaults"],
+            compile_targets=[
+                iree_definitions.CompileTarget(
+                    target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+                    target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+                    target_abi=iree_definitions.TargetABI.LINUX_GNU,
+                )
+            ],
+        )
+        gen_config_a = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model_a, compile_config=compile_config_a
+        )
+        gen_config_b = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model_a, compile_config=compile_config_b
+        )
+        benchmark_config = dict(
+            generation_configs=serialization.serialize_and_pack(
+                [gen_config_a, gen_config_b]
+            ),
+            module_dir_paths=["a", "b"],
+        )
+        root_dir = pathlib.PurePath("artifacts_dir")
 
-  def test_get_module_map_from_compilation_benchmark_config(self):
-    model_a = common_definitions.Model(
-        id="1234",
-        name="tflite_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="https://example.com/xyz.tflite",
-        entry_function="main",
-        input_types=["1xf32"])
-    imported_model_a = iree_definitions.ImportedModel.from_model(model_a)
-    compile_config_a = iree_definitions.CompileConfig.build(
-        id="config_a",
-        tags=["defaults"],
-        compile_targets=[
-            iree_definitions.CompileTarget(
-                target_architecture=common_definitions.DeviceArchitecture.
-                X86_64_CASCADELAKE,
-                target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-                target_abi=iree_definitions.TargetABI.LINUX_GNU)
-        ])
-    compile_config_b = iree_definitions.CompileConfig.build(
-        id="config_b",
-        tags=["defaults"],
-        compile_targets=[
-            iree_definitions.CompileTarget(
-                target_architecture=common_definitions.DeviceArchitecture.
-                RV64_GENERIC,
-                target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-                target_abi=iree_definitions.TargetABI.LINUX_GNU)
-        ])
-    gen_config_a = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model_a, compile_config=compile_config_a)
-    gen_config_b = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model_a, compile_config=compile_config_b)
-    benchmark_config = dict(generation_configs=serialization.serialize_and_pack(
-        [gen_config_a, gen_config_b]),
-                            module_dir_paths=["a", "b"])
-    root_dir = pathlib.PurePath("artifacts_dir")
+        module_map = collect_compilation_statistics.get_module_map_from_compilation_benchmark_config(
+            compilation_benchmark_config_data=StringIO(json.dumps(benchmark_config)),
+            e2e_test_artifacts_dir=root_dir,
+        )
 
-    module_map = collect_compilation_statistics.get_module_map_from_compilation_benchmark_config(
-        compilation_benchmark_config_data=StringIO(
-            json.dumps(benchmark_config)),
-        e2e_test_artifacts_dir=root_dir)
-
-    compile_info_a = common.benchmark_definition.CompilationInfo(
-        name=gen_config_a.name,
-        model_name=model_a.name,
-        model_tags=tuple(model_a.tags),
-        model_source=model_a.source_type.value,
-        target_arch=f"[cpu-x86_64-cascadelake-linux-gnu]",
-        compile_tags=tuple(gen_config_a.compile_config.tags),
-        gen_config_id=gen_config_a.composite_id)
-    module_dir_a = pathlib.Path(
-        iree_artifacts.get_module_dir_path(gen_config_a, root_dir))
-    module_info_a = collect_compilation_statistics.ModuleInfo(
-        module_path=module_dir_a / iree_artifacts.MODULE_FILENAME,
-        stream_stats_path=module_dir_a /
-        iree_artifacts.SCHEDULING_STATS_FILENAME)
-    compile_info_b = common.benchmark_definition.CompilationInfo(
-        name=gen_config_b.name,
-        model_name=model_a.name,
-        model_tags=tuple(model_a.tags),
-        model_source=model_a.source_type.value,
-        target_arch=f"[cpu-riscv_64-generic-linux-gnu]",
-        compile_tags=tuple(gen_config_a.compile_config.tags),
-        gen_config_id=gen_config_b.composite_id)
-    module_dir_b = pathlib.Path(
-        iree_artifacts.get_module_dir_path(gen_config_b, root_dir))
-    module_info_b = collect_compilation_statistics.ModuleInfo(
-        module_path=module_dir_b / iree_artifacts.MODULE_FILENAME,
-        stream_stats_path=module_dir_b /
-        iree_artifacts.SCHEDULING_STATS_FILENAME)
-    self.assertEqual(module_map, {
-        compile_info_a: module_info_a,
-        compile_info_b: module_info_b
-    })
+        compile_info_a = common.benchmark_definition.CompilationInfo(
+            name=gen_config_a.name,
+            model_name=model_a.name,
+            model_tags=tuple(model_a.tags),
+            model_source=model_a.source_type.value,
+            target_arch=f"[cpu-x86_64-cascadelake-linux-gnu]",
+            compile_tags=tuple(gen_config_a.compile_config.tags),
+            gen_config_id=gen_config_a.composite_id,
+        )
+        module_dir_a = pathlib.Path(
+            iree_artifacts.get_module_dir_path(gen_config_a, root_dir)
+        )
+        module_info_a = collect_compilation_statistics.ModuleInfo(
+            module_path=module_dir_a / iree_artifacts.MODULE_FILENAME,
+            stream_stats_path=module_dir_a / iree_artifacts.SCHEDULING_STATS_FILENAME,
+        )
+        compile_info_b = common.benchmark_definition.CompilationInfo(
+            name=gen_config_b.name,
+            model_name=model_a.name,
+            model_tags=tuple(model_a.tags),
+            model_source=model_a.source_type.value,
+            target_arch=f"[cpu-riscv_64-generic-linux-gnu]",
+            compile_tags=tuple(gen_config_a.compile_config.tags),
+            gen_config_id=gen_config_b.composite_id,
+        )
+        module_dir_b = pathlib.Path(
+            iree_artifacts.get_module_dir_path(gen_config_b, root_dir)
+        )
+        module_info_b = collect_compilation_statistics.ModuleInfo(
+            module_path=module_dir_b / iree_artifacts.MODULE_FILENAME,
+            stream_stats_path=module_dir_b / iree_artifacts.SCHEDULING_STATS_FILENAME,
+        )
+        self.assertEqual(
+            module_map, {compile_info_a: module_info_a, compile_info_b: module_info_b}
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/benchmarks/common/android_device_utils.py b/build_tools/benchmarks/common/android_device_utils.py
index a0f8604..5149da8 100644
--- a/build_tools/benchmarks/common/android_device_utils.py
+++ b/build_tools/benchmarks/common/android_device_utils.py
@@ -10,59 +10,64 @@
 import re
 
 from typing import Sequence
-from .benchmark_definition import (execute_cmd_and_get_stdout, DeviceInfo,
-                                   PlatformType)
+from .benchmark_definition import execute_cmd_and_get_stdout, DeviceInfo, PlatformType
 
 
 def get_android_device_model(verbose: bool = False) -> str:
-  """Returns the Android device model."""
-  model = execute_cmd_and_get_stdout(
-      ["adb", "shell", "getprop", "ro.product.model"], verbose=verbose)
-  model = re.sub(r"\W+", "-", model)
-  return model
+    """Returns the Android device model."""
+    model = execute_cmd_and_get_stdout(
+        ["adb", "shell", "getprop", "ro.product.model"], verbose=verbose
+    )
+    model = re.sub(r"\W+", "-", model)
+    return model
 
 
 def get_android_cpu_abi(verbose: bool = False) -> str:
-  """Returns the CPU ABI for the Android device."""
-  return execute_cmd_and_get_stdout(
-      ["adb", "shell", "getprop", "ro.product.cpu.abi"], verbose=verbose)
+    """Returns the CPU ABI for the Android device."""
+    return execute_cmd_and_get_stdout(
+        ["adb", "shell", "getprop", "ro.product.cpu.abi"], verbose=verbose
+    )
 
 
 def get_android_cpu_features(verbose: bool = False) -> Sequence[str]:
-  """Returns the CPU features for the Android device."""
-  cpuinfo = execute_cmd_and_get_stdout(["adb", "shell", "cat", "/proc/cpuinfo"],
-                                       verbose=verbose)
-  features = []
-  for line in cpuinfo.splitlines():
-    if line.startswith("Features"):
-      _, features = line.split(":")
-      return features.strip().split()
-  return features
+    """Returns the CPU features for the Android device."""
+    cpuinfo = execute_cmd_and_get_stdout(
+        ["adb", "shell", "cat", "/proc/cpuinfo"], verbose=verbose
+    )
+    features = []
+    for line in cpuinfo.splitlines():
+        if line.startswith("Features"):
+            _, features = line.split(":")
+            return features.strip().split()
+    return features
 
 
 def get_android_gpu_name(verbose: bool = False) -> str:
-  """Returns the GPU name for the Android device."""
-  vkjson = execute_cmd_and_get_stdout(["adb", "shell", "cmd", "gpu", "vkjson"],
-                                      verbose=verbose)
-  vkjson = json.loads(vkjson)
-  name = vkjson["devices"][0]["properties"]["deviceName"]
+    """Returns the GPU name for the Android device."""
+    vkjson = execute_cmd_and_get_stdout(
+        ["adb", "shell", "cmd", "gpu", "vkjson"], verbose=verbose
+    )
+    vkjson = json.loads(vkjson)
+    name = vkjson["devices"][0]["properties"]["deviceName"]
 
-  # Perform some canonicalization:
+    # Perform some canonicalization:
 
-  # - Adreno GPUs have raw names like "Adreno (TM) 650".
-  name = name.replace("(TM)", "")
+    # - Adreno GPUs have raw names like "Adreno (TM) 650".
+    name = name.replace("(TM)", "")
 
-  # Replace all consecutive non-word characters with a single hyphen.
-  name = re.sub(r"\W+", "-", name)
+    # Replace all consecutive non-word characters with a single hyphen.
+    name = re.sub(r"\W+", "-", name)
 
-  return name
+    return name
 
 
 def get_android_device_info(verbose: bool = False) -> DeviceInfo:
-  """Returns device info for the Android device."""
-  return DeviceInfo(platform_type=PlatformType.ANDROID,
-                    model=get_android_device_model(verbose),
-                    cpu_abi=get_android_cpu_abi(verbose),
-                    cpu_uarch=None,
-                    cpu_features=get_android_cpu_features(verbose),
-                    gpu_name=get_android_gpu_name(verbose))
+    """Returns device info for the Android device."""
+    return DeviceInfo(
+        platform_type=PlatformType.ANDROID,
+        model=get_android_device_model(verbose),
+        cpu_abi=get_android_cpu_abi(verbose),
+        cpu_uarch=None,
+        cpu_features=get_android_cpu_features(verbose),
+        gpu_name=get_android_gpu_name(verbose),
+    )
diff --git a/build_tools/benchmarks/common/benchmark_config.py b/build_tools/benchmarks/common/benchmark_config.py
index 2d08d4e..f9fb2dd 100644
--- a/build_tools/benchmarks/common/benchmark_config.py
+++ b/build_tools/benchmarks/common/benchmark_config.py
@@ -16,24 +16,24 @@
 
 @dataclass
 class TraceCaptureConfig:
-  """Represents the settings for capturing traces during benchamrking.
+    """Represents the settings for capturing traces during benchamrking.
 
     traced_benchmark_tool_dir: the path to the tracing-enabled benchmark tool
       directory.
     trace_capture_tool: the path to the tool for collecting captured traces.
     capture_tarball: the path of capture tar archive.
     capture_tmp_dir: the temporary directory to store captured traces.
-  """
+    """
 
-  traced_benchmark_tool_dir: pathlib.Path
-  trace_capture_tool: pathlib.Path
-  capture_tarball: pathlib.Path
-  capture_tmp_dir: pathlib.Path
+    traced_benchmark_tool_dir: pathlib.Path
+    trace_capture_tool: pathlib.Path
+    capture_tarball: pathlib.Path
+    capture_tmp_dir: pathlib.Path
 
 
 @dataclass
 class BenchmarkConfig:
-  """Represents the settings to run benchmarks.
+    """Represents the settings to run benchmarks.
 
     root_benchmark_dir: the root directory containing the built benchmark
       suites.
@@ -56,71 +56,72 @@
       times.
     continue_from_previous: skip the benchmarks if their results are found in
       the benchmark_results_dir.
-  """
-
-  root_benchmark_dir: pathlib.Path
-  benchmark_results_dir: pathlib.Path
-  git_commit_hash: str
-
-  normal_benchmark_tool_dir: Optional[pathlib.Path] = None
-  trace_capture_config: Optional[TraceCaptureConfig] = None
-
-  driver_filter: Optional[str] = None
-  model_name_filter: Optional[str] = None
-  mode_filter: Optional[str] = None
-  use_compatible_filter: bool = False
-
-  keep_going: bool = False
-  benchmark_min_time: float = 0
-  continue_from_previous: bool = False
-
-  @staticmethod
-  def build_from_args(args: Namespace, git_commit_hash: str):
-    """Build config from command arguments.
-
-    Args:
-      args: the command arguments.
-      git_commit_hash: the git commit hash of IREE.
     """
 
-    def real_path_or_none(
-        path: Optional[pathlib.Path]) -> Optional[pathlib.Path]:
-      return path.resolve() if path else None
+    root_benchmark_dir: pathlib.Path
+    benchmark_results_dir: pathlib.Path
+    git_commit_hash: str
 
-    if not args.normal_benchmark_tool_dir and not args.traced_benchmark_tool_dir:
-      raise ValueError(
-          "At least one of --normal_benchmark_tool_dir or --traced_benchmark_tool_dir should be specified."
-      )
-    if not ((args.traced_benchmark_tool_dir is None) ==
-            (args.trace_capture_tool is None) ==
-            (args.capture_tarball is None)):
-      raise ValueError(
-          "The following 3 flags should be simultaneously all specified or all unspecified: --traced_benchmark_tool_dir, --trace_capture_tool, --capture_tarball"
-      )
+    normal_benchmark_tool_dir: Optional[pathlib.Path] = None
+    trace_capture_config: Optional[TraceCaptureConfig] = None
 
-    per_commit_tmp_dir: pathlib.Path = (args.tmp_dir /
-                                        git_commit_hash).resolve()
+    driver_filter: Optional[str] = None
+    model_name_filter: Optional[str] = None
+    mode_filter: Optional[str] = None
+    use_compatible_filter: bool = False
 
-    if args.traced_benchmark_tool_dir is None:
-      trace_capture_config = None
-    else:
-      trace_capture_config = TraceCaptureConfig(
-          traced_benchmark_tool_dir=args.traced_benchmark_tool_dir.resolve(),
-          trace_capture_tool=args.trace_capture_tool.resolve(),
-          capture_tarball=args.capture_tarball.resolve(),
-          capture_tmp_dir=per_commit_tmp_dir / CAPTURES_REL_PATH)
+    keep_going: bool = False
+    benchmark_min_time: float = 0
+    continue_from_previous: bool = False
 
-    return BenchmarkConfig(root_benchmark_dir=args.e2e_test_artifacts_dir,
-                           benchmark_results_dir=per_commit_tmp_dir /
-                           BENCHMARK_RESULTS_REL_PATH,
-                           git_commit_hash=git_commit_hash,
-                           normal_benchmark_tool_dir=real_path_or_none(
-                               args.normal_benchmark_tool_dir),
-                           trace_capture_config=trace_capture_config,
-                           driver_filter=args.driver_filter_regex,
-                           model_name_filter=args.model_name_regex,
-                           mode_filter=args.mode_regex,
-                           use_compatible_filter=args.compatible_only,
-                           keep_going=args.keep_going,
-                           benchmark_min_time=args.benchmark_min_time,
-                           continue_from_previous=args.continue_from_previous)
+    @staticmethod
+    def build_from_args(args: Namespace, git_commit_hash: str):
+        """Build config from command arguments.
+
+        Args:
+          args: the command arguments.
+          git_commit_hash: the git commit hash of IREE.
+        """
+
+        def real_path_or_none(path: Optional[pathlib.Path]) -> Optional[pathlib.Path]:
+            return path.resolve() if path else None
+
+        if not args.normal_benchmark_tool_dir and not args.traced_benchmark_tool_dir:
+            raise ValueError(
+                "At least one of --normal_benchmark_tool_dir or --traced_benchmark_tool_dir should be specified."
+            )
+        if not (
+            (args.traced_benchmark_tool_dir is None)
+            == (args.trace_capture_tool is None)
+            == (args.capture_tarball is None)
+        ):
+            raise ValueError(
+                "The following 3 flags should be simultaneously all specified or all unspecified: --traced_benchmark_tool_dir, --trace_capture_tool, --capture_tarball"
+            )
+
+        per_commit_tmp_dir: pathlib.Path = (args.tmp_dir / git_commit_hash).resolve()
+
+        if args.traced_benchmark_tool_dir is None:
+            trace_capture_config = None
+        else:
+            trace_capture_config = TraceCaptureConfig(
+                traced_benchmark_tool_dir=args.traced_benchmark_tool_dir.resolve(),
+                trace_capture_tool=args.trace_capture_tool.resolve(),
+                capture_tarball=args.capture_tarball.resolve(),
+                capture_tmp_dir=per_commit_tmp_dir / CAPTURES_REL_PATH,
+            )
+
+        return BenchmarkConfig(
+            root_benchmark_dir=args.e2e_test_artifacts_dir,
+            benchmark_results_dir=per_commit_tmp_dir / BENCHMARK_RESULTS_REL_PATH,
+            git_commit_hash=git_commit_hash,
+            normal_benchmark_tool_dir=real_path_or_none(args.normal_benchmark_tool_dir),
+            trace_capture_config=trace_capture_config,
+            driver_filter=args.driver_filter_regex,
+            model_name_filter=args.model_name_regex,
+            mode_filter=args.mode_regex,
+            use_compatible_filter=args.compatible_only,
+            keep_going=args.keep_going,
+            benchmark_min_time=args.benchmark_min_time,
+            continue_from_previous=args.continue_from_previous,
+        )
diff --git a/build_tools/benchmarks/common/benchmark_config_test.py b/build_tools/benchmarks/common/benchmark_config_test.py
index 32d2387..2a446ab 100644
--- a/build_tools/benchmarks/common/benchmark_config_test.py
+++ b/build_tools/benchmarks/common/benchmark_config_test.py
@@ -15,97 +15,109 @@
 
 
 class BenchmarkConfigTest(unittest.TestCase):
+    def setUp(self):
+        self._tmp_dir_manager = tempfile.TemporaryDirectory()
+        self.tmp_dir = pathlib.Path(self._tmp_dir_manager.name).resolve()
+        self._build_dir_manager = tempfile.TemporaryDirectory()
+        self.build_dir = pathlib.Path(self._build_dir_manager.name).resolve()
+        self.e2e_test_artifacts_dir = self.build_dir / "e2e_test_artifacts"
+        self.e2e_test_artifacts_dir.mkdir()
+        self.normal_tool_dir = self.build_dir / "normal_tool"
+        self.normal_tool_dir.mkdir()
+        self.traced_tool_dir = self.build_dir / "traced_tool"
+        self.traced_tool_dir.mkdir()
+        self.trace_capture_tool = self.build_dir / "tracy_capture"
+        # Create capture tool with executable file mode.
+        self.trace_capture_tool.touch(mode=0o755)
+        self.execution_config = self.build_dir / "execution_config.json"
+        self.execution_config.touch()
 
-  def setUp(self):
-    self._tmp_dir_manager = tempfile.TemporaryDirectory()
-    self.tmp_dir = pathlib.Path(self._tmp_dir_manager.name).resolve()
-    self._build_dir_manager = tempfile.TemporaryDirectory()
-    self.build_dir = pathlib.Path(self._build_dir_manager.name).resolve()
-    self.e2e_test_artifacts_dir = self.build_dir / "e2e_test_artifacts"
-    self.e2e_test_artifacts_dir.mkdir()
-    self.normal_tool_dir = self.build_dir / "normal_tool"
-    self.normal_tool_dir.mkdir()
-    self.traced_tool_dir = self.build_dir / "traced_tool"
-    self.traced_tool_dir.mkdir()
-    self.trace_capture_tool = self.build_dir / "tracy_capture"
-    # Create capture tool with executable file mode.
-    self.trace_capture_tool.touch(mode=0o755)
-    self.execution_config = self.build_dir / "execution_config.json"
-    self.execution_config.touch()
+    def tearDown(self):
+        self._build_dir_manager.cleanup()
+        self._tmp_dir_manager.cleanup()
 
-  def tearDown(self):
-    self._build_dir_manager.cleanup()
-    self._tmp_dir_manager.cleanup()
+    def test_build_from_args(self):
+        args = common_arguments.Parser().parse_args(
+            [
+                f"--tmp_dir={self.tmp_dir}",
+                f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
+                f"--traced_benchmark_tool_dir={self.traced_tool_dir}",
+                f"--trace_capture_tool={self.trace_capture_tool}",
+                f"--capture_tarball=capture.tar",
+                f"--driver_filter_regex=a",
+                f"--model_name_regex=b",
+                f"--mode_regex=c",
+                f"--keep_going",
+                f"--benchmark_min_time=10",
+                f"--compatible_only",
+                f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
+                f"--execution_benchmark_config={self.execution_config}",
+                "--target_device=test",
+            ]
+        )
 
-  def test_build_from_args(self):
-    args = common_arguments.Parser().parse_args([
-        f"--tmp_dir={self.tmp_dir}",
-        f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
-        f"--traced_benchmark_tool_dir={self.traced_tool_dir}",
-        f"--trace_capture_tool={self.trace_capture_tool}",
-        f"--capture_tarball=capture.tar",
-        f"--driver_filter_regex=a",
-        f"--model_name_regex=b",
-        f"--mode_regex=c",
-        f"--keep_going",
-        f"--benchmark_min_time=10",
-        f"--compatible_only",
-        f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
-        f"--execution_benchmark_config={self.execution_config}",
-        "--target_device=test",
-    ])
+        config = benchmark_config.BenchmarkConfig.build_from_args(
+            args=args, git_commit_hash="abcd"
+        )
 
-    config = benchmark_config.BenchmarkConfig.build_from_args(
-        args=args, git_commit_hash="abcd")
+        per_commit_tmp_dir = self.tmp_dir / "abcd"
+        expected_trace_capture_config = benchmark_config.TraceCaptureConfig(
+            traced_benchmark_tool_dir=self.traced_tool_dir,
+            trace_capture_tool=pathlib.Path(self.trace_capture_tool).resolve(),
+            capture_tarball=pathlib.Path("capture.tar").resolve(),
+            capture_tmp_dir=per_commit_tmp_dir / "captures",
+        )
+        expected_config = benchmark_config.BenchmarkConfig(
+            root_benchmark_dir=self.e2e_test_artifacts_dir,
+            benchmark_results_dir=per_commit_tmp_dir / "benchmark-results",
+            git_commit_hash="abcd",
+            normal_benchmark_tool_dir=self.normal_tool_dir,
+            trace_capture_config=expected_trace_capture_config,
+            driver_filter="a",
+            model_name_filter="b",
+            mode_filter="c",
+            keep_going=True,
+            benchmark_min_time=10,
+            use_compatible_filter=True,
+        )
+        self.assertEqual(config, expected_config)
 
-    per_commit_tmp_dir = self.tmp_dir / "abcd"
-    expected_trace_capture_config = benchmark_config.TraceCaptureConfig(
-        traced_benchmark_tool_dir=self.traced_tool_dir,
-        trace_capture_tool=pathlib.Path(self.trace_capture_tool).resolve(),
-        capture_tarball=pathlib.Path("capture.tar").resolve(),
-        capture_tmp_dir=per_commit_tmp_dir / "captures")
-    expected_config = benchmark_config.BenchmarkConfig(
-        root_benchmark_dir=self.e2e_test_artifacts_dir,
-        benchmark_results_dir=per_commit_tmp_dir / "benchmark-results",
-        git_commit_hash="abcd",
-        normal_benchmark_tool_dir=self.normal_tool_dir,
-        trace_capture_config=expected_trace_capture_config,
-        driver_filter="a",
-        model_name_filter="b",
-        mode_filter="c",
-        keep_going=True,
-        benchmark_min_time=10,
-        use_compatible_filter=True)
-    self.assertEqual(config, expected_config)
+    def test_build_from_args_benchmark_only(self):
+        args = common_arguments.Parser().parse_args(
+            [
+                f"--tmp_dir={self.tmp_dir}",
+                f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
+                f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
+                f"--execution_benchmark_config={self.execution_config}",
+                "--target_device=test",
+            ]
+        )
 
-  def test_build_from_args_benchmark_only(self):
-    args = common_arguments.Parser().parse_args([
-        f"--tmp_dir={self.tmp_dir}",
-        f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
-        f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
-        f"--execution_benchmark_config={self.execution_config}",
-        "--target_device=test",
-    ])
+        config = benchmark_config.BenchmarkConfig.build_from_args(
+            args=args, git_commit_hash="abcd"
+        )
 
-    config = benchmark_config.BenchmarkConfig.build_from_args(
-        args=args, git_commit_hash="abcd")
+        self.assertIsNone(config.trace_capture_config)
 
-    self.assertIsNone(config.trace_capture_config)
+    def test_build_from_args_invalid_capture_args(self):
+        args = common_arguments.Parser().parse_args(
+            [
+                f"--tmp_dir={self.tmp_dir}",
+                f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
+                f"--traced_benchmark_tool_dir={self.traced_tool_dir}",
+                f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
+                f"--execution_benchmark_config={self.execution_config}",
+                "--target_device=test",
+            ]
+        )
 
-  def test_build_from_args_invalid_capture_args(self):
-    args = common_arguments.Parser().parse_args([
-        f"--tmp_dir={self.tmp_dir}",
-        f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
-        f"--traced_benchmark_tool_dir={self.traced_tool_dir}",
-        f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
-        f"--execution_benchmark_config={self.execution_config}",
-        "--target_device=test",
-    ])
-
-    self.assertRaises(
-        ValueError, lambda: benchmark_config.BenchmarkConfig.build_from_args(
-            args=args, git_commit_hash="abcd"))
+        self.assertRaises(
+            ValueError,
+            lambda: benchmark_config.BenchmarkConfig.build_from_args(
+                args=args, git_commit_hash="abcd"
+            ),
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/benchmarks/common/benchmark_definition.py b/build_tools/benchmarks/common/benchmark_definition.py
index 1bd29ea..bdfd5f7 100644
--- a/build_tools/benchmarks/common/benchmark_definition.py
+++ b/build_tools/benchmarks/common/benchmark_definition.py
@@ -23,68 +23,53 @@
 
 # A map from CPU ABI to IREE's benchmark target architecture.
 CPU_ABI_TO_TARGET_ARCH_MAP = {
-    "arm64-v8a":
-        common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
-    "x86_64-cascadelake":
-        common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+    "arm64-v8a": common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
+    "x86_64-cascadelake": common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
 }
 
 # A map from GPU name to IREE's benchmark target architecture.
 GPU_NAME_TO_TARGET_ARCH_MAP = {
-    "adreno-640":
-        common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
-    "adreno-650":
-        common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
-    "adreno-660":
-        common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
-    "adreno-730":
-        common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
-    "mali-g77":
-        common_definitions.DeviceArchitecture.ARM_VALHALL,
-    "mali-g78":
-        common_definitions.DeviceArchitecture.ARM_VALHALL,
-    "tesla-v100-sxm2-16gb":
-        common_definitions.DeviceArchitecture.NVIDIA_PASCAL,
-    "nvidia-a100-sxm4-40gb":
-        common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
-    "nvidia-geforce-rtx-3090":
-        common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
+    "adreno-640": common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
+    "adreno-650": common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
+    "adreno-660": common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
+    "adreno-730": common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
+    "mali-g77": common_definitions.DeviceArchitecture.ARM_VALHALL,
+    "mali-g78": common_definitions.DeviceArchitecture.ARM_VALHALL,
+    "tesla-v100-sxm2-16gb": common_definitions.DeviceArchitecture.NVIDIA_PASCAL,
+    "nvidia-a100-sxm4-40gb": common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
+    "nvidia-geforce-rtx-3090": common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
 }
 
 
 @dataclasses.dataclass
 class DriverInfo:
-  """An object describing a IREE HAL driver.
+    """An object describing a IREE HAL driver.
 
-  It includes the following characteristics:
-  - pretty_name: the pretty name, e.g., 'IREE-LLVM-CPU'
-  - device_type: the targeted device type, e.g., 'CPU'
-  - driver_name: runtime driver flag, e.g., 'local-task'
-  - loader_name: executable loader name, if used
-  """
+    It includes the following characteristics:
+    - pretty_name: the pretty name, e.g., 'IREE-LLVM-CPU'
+    - device_type: the targeted device type, e.g., 'CPU'
+    - driver_name: runtime driver flag, e.g., 'local-task'
+    - loader_name: executable loader name, if used
+    """
 
-  pretty_name: str
-  device_type: str
-  driver_name: str
-  loader_name: str
+    pretty_name: str
+    device_type: str
+    driver_name: str
+    loader_name: str
 
 
 # A map for IREE driver names. This allows us to normalize driver names like
 # mapping to more friendly ones and detach to keep driver names used in
 # benchmark presentation stable.
 IREE_DRIVERS_INFOS = {
-    "iree-llvm-cpu":
-        DriverInfo("IREE-LLVM-CPU", "CPU", "local-task", "embedded-elf"),
-    "iree-llvm-cpu-sync":
-        DriverInfo("IREE-LLVM-CPU-Sync", "CPU", "local-sync", "embedded-elf"),
-    "iree-vmvx":
-        DriverInfo("IREE-VMVX", "CPU", "local-task", "vmvx-module"),
-    "iree-vmvx-sync":
-        DriverInfo("IREE-VMVX-Sync", "CPU", "local-sync", "vmvx-module"),
-    "iree-vulkan":
-        DriverInfo("IREE-Vulkan", "GPU", "vulkan", ""),
-    "iree-cuda":
-        DriverInfo("IREE-CUDA", "GPU", "cuda", ""),
+    "iree-llvm-cpu": DriverInfo("IREE-LLVM-CPU", "CPU", "local-task", "embedded-elf"),
+    "iree-llvm-cpu-sync": DriverInfo(
+        "IREE-LLVM-CPU-Sync", "CPU", "local-sync", "embedded-elf"
+    ),
+    "iree-vmvx": DriverInfo("IREE-VMVX", "CPU", "local-task", "vmvx-module"),
+    "iree-vmvx-sync": DriverInfo("IREE-VMVX-Sync", "CPU", "local-sync", "vmvx-module"),
+    "iree-vulkan": DriverInfo("IREE-Vulkan", "GPU", "vulkan", ""),
+    "iree-cuda": DriverInfo("IREE-CUDA", "GPU", "cuda", ""),
 }
 
 IREE_PRETTY_NAME_TO_DRIVER_NAME = {
@@ -92,296 +77,318 @@
 }
 
 
-def execute_cmd(args: Sequence[Any],
-                verbose: bool = False,
-                **kwargs) -> subprocess.CompletedProcess:
-  """Executes a command and returns the completed process.
+def execute_cmd(
+    args: Sequence[Any], verbose: bool = False, **kwargs
+) -> subprocess.CompletedProcess:
+    """Executes a command and returns the completed process.
 
-  A thin wrapper around subprocess.run that sets some useful defaults and
-  optionally prints out the command being run.
+    A thin wrapper around subprocess.run that sets some useful defaults and
+    optionally prints out the command being run.
 
-  Raises:
-    CalledProcessError if the command fails.
-  """
-  if verbose:
-    print(f"cmd: {args}")
-  try:
-    return subprocess.run(args, check=True, text=True, **kwargs)
-  except subprocess.CalledProcessError as exc:
-    print((f"\n\nThe following command failed:\n\n{args}"
-           f"\n\nReturn code: {exc.returncode}\n\n"))
-    if exc.stdout:
-      print(f"Stdout:\n\n{exc.stdout}\n\n")
-    if exc.stderr:
-      print(f"Stderr:\n\n{exc.stderr}\n\n")
-    raise exc
+    Raises:
+      CalledProcessError if the command fails.
+    """
+    if verbose:
+        print(f"cmd: {args}")
+    try:
+        return subprocess.run(args, check=True, text=True, **kwargs)
+    except subprocess.CalledProcessError as exc:
+        print(
+            (
+                f"\n\nThe following command failed:\n\n{args}"
+                f"\n\nReturn code: {exc.returncode}\n\n"
+            )
+        )
+        if exc.stdout:
+            print(f"Stdout:\n\n{exc.stdout}\n\n")
+        if exc.stderr:
+            print(f"Stderr:\n\n{exc.stderr}\n\n")
+        raise exc
 
 
-def execute_cmd_and_get_output(args: Sequence[Any],
-                               verbose: bool = False,
-                               **kwargs) -> Tuple[str, str]:
-  """Executes a command and returns its stdout and stderr
+def execute_cmd_and_get_output(
+    args: Sequence[Any], verbose: bool = False, **kwargs
+) -> Tuple[str, str]:
+    """Executes a command and returns its stdout and stderr
 
-  Same as execute_cmd except captures stdout and stderr.
-  """
-  exc = execute_cmd(args,
-                    verbose=verbose,
-                    stdout=subprocess.PIPE,
-                    stderr=subprocess.PIPE,
-                    **kwargs)
-  return exc.stdout.strip(), exc.stderr.strip()
+    Same as execute_cmd except captures stdout and stderr.
+    """
+    exc = execute_cmd(
+        args, verbose=verbose, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs
+    )
+    return exc.stdout.strip(), exc.stderr.strip()
 
 
-def execute_cmd_and_get_stdout(args: Sequence[Any],
-                               verbose: bool = False,
-                               **kwargs) -> str:
-  """Executes a command and returns its stdout.
+def execute_cmd_and_get_stdout(
+    args: Sequence[Any], verbose: bool = False, **kwargs
+) -> str:
+    """Executes a command and returns its stdout.
 
-  Same as execute_cmd except captures stdout (and not stderr).
-  """
-  stdout, _ = execute_cmd_and_get_output(args, verbose=verbose, **kwargs)
-  return stdout
+    Same as execute_cmd except captures stdout (and not stderr).
+    """
+    stdout, _ = execute_cmd_and_get_output(args, verbose=verbose, **kwargs)
+    return stdout
 
 
 def get_git_commit_hash(commit: str) -> str:
-  return execute_cmd_and_get_stdout(['git', 'rev-parse', commit],
-                                    cwd=pathlib.Path(__file__).resolve().parent)
+    return execute_cmd_and_get_stdout(
+        ["git", "rev-parse", commit], cwd=pathlib.Path(__file__).resolve().parent
+    )
 
 
 def get_iree_benchmark_module_arguments(
     results_filename: str,
     driver_info: DriverInfo,
-    benchmark_min_time: Optional[float] = None):
-  """Returns the common arguments to run iree-benchmark-module."""
+    benchmark_min_time: Optional[float] = None,
+):
+    """Returns the common arguments to run iree-benchmark-module."""
 
-  if driver_info.loader_name == "vmvx-module":
-    # VMVX is very unoptimized for now and can take a long time to run.
-    # Decrease the repetition for it until it's reasonably fast.
-    repetitions = 3
-  else:
-    repetitions = 10
+    if driver_info.loader_name == "vmvx-module":
+        # VMVX is very unoptimized for now and can take a long time to run.
+        # Decrease the repetition for it until it's reasonably fast.
+        repetitions = 3
+    else:
+        repetitions = 10
 
-  cmd = [
-      "--time_unit=ns",
-      "--benchmark_format=json",
-      "--benchmark_out_format=json",
-      f"--benchmark_out={results_filename}",
-      "--print_statistics=true",
-  ]
-  if benchmark_min_time:
-    cmd.extend([
-        f"--benchmark_min_time={benchmark_min_time}",
-    ])
-  else:
-    cmd.extend([
-        f"--benchmark_repetitions={repetitions}",
-    ])
+    cmd = [
+        "--time_unit=ns",
+        "--benchmark_format=json",
+        "--benchmark_out_format=json",
+        f"--benchmark_out={results_filename}",
+        "--print_statistics=true",
+    ]
+    if benchmark_min_time:
+        cmd.extend(
+            [
+                f"--benchmark_min_time={benchmark_min_time}",
+            ]
+        )
+    else:
+        cmd.extend(
+            [
+                f"--benchmark_repetitions={repetitions}",
+            ]
+        )
 
-  return cmd
+    return cmd
 
 
-def wait_for_iree_benchmark_module_start(process: subprocess.Popen,
-                                         verbose: bool = False) -> None:
-  """Wait for the start of iree-benchmark module; otherwise will see connection
-  failure when opening the catpure tool."""
+def wait_for_iree_benchmark_module_start(
+    process: subprocess.Popen, verbose: bool = False
+) -> None:
+    """Wait for the start of iree-benchmark module; otherwise will see connection
+    failure when opening the catpure tool."""
 
-  while True:
-    line = process.stdout.readline()  # pytype: disable=attribute-error
-    if line == "" and process.poll() is not None:  # Process completed
-      raise ValueError("Cannot find benchmark result line in the log!")
-    if verbose:
-      print(line.strip())
-    # Result available
-    if re.match(r"^BM_.+/real_time", line) is not None:
-      break
+    while True:
+        line = process.stdout.readline()  # pytype: disable=attribute-error
+        if line == "" and process.poll() is not None:  # Process completed
+            raise ValueError("Cannot find benchmark result line in the log!")
+        if verbose:
+            print(line.strip())
+        # Result available
+        if re.match(r"^BM_.+/real_time", line) is not None:
+            break
 
 
 class PlatformType(Enum):
-  ANDROID = "Android"
-  LINUX = "Linux"
+    ANDROID = "Android"
+    LINUX = "Linux"
 
 
 @dataclasses.dataclass(frozen=True)
 class DeviceInfo:
-  """An object describing a device.
+    """An object describing a device.
 
-  It includes the following characteristics:
-  - platform_type: the OS platform, e.g., 'Android'
-  - model: the product model, e.g., 'Pixel-4'
-  - cpu_abi: the CPU ABI, e.g., 'arm64-v8a', 'x86_64'
-  - cpu_uarch: the CPU microarchitecture, e.g., 'CascadeLake'
-  - cpu_features: the detailed CPU features, e.g., ['fphp', 'sve']
-  - gpu_name: the GPU name, e.g., 'Mali-G77'
-  """
+    It includes the following characteristics:
+    - platform_type: the OS platform, e.g., 'Android'
+    - model: the product model, e.g., 'Pixel-4'
+    - cpu_abi: the CPU ABI, e.g., 'arm64-v8a', 'x86_64'
+    - cpu_uarch: the CPU microarchitecture, e.g., 'CascadeLake'
+    - cpu_features: the detailed CPU features, e.g., ['fphp', 'sve']
+    - gpu_name: the GPU name, e.g., 'Mali-G77'
+    """
 
-  platform_type: PlatformType
-  model: str
-  cpu_abi: str
-  cpu_uarch: Optional[str]
-  cpu_features: Sequence[str]
-  gpu_name: str
+    platform_type: PlatformType
+    model: str
+    cpu_abi: str
+    cpu_uarch: Optional[str]
+    cpu_features: Sequence[str]
+    gpu_name: str
 
-  def __str__(self):
-    features = ", ".join(self.cpu_features)
-    params = [
-        f"model='{self.model}'",
-        f"cpu_abi='{self.cpu_abi}'",
-        f"cpu_uarch='{self.cpu_uarch}'",
-        f"gpu_name='{self.gpu_name}'",
-        f"cpu_features=[{features}]",
-    ]
-    params = ", ".join(params)
-    return f"{self.platform_type.value} device <{params}>"
+    def __str__(self):
+        features = ", ".join(self.cpu_features)
+        params = [
+            f"model='{self.model}'",
+            f"cpu_abi='{self.cpu_abi}'",
+            f"cpu_uarch='{self.cpu_uarch}'",
+            f"gpu_name='{self.gpu_name}'",
+            f"cpu_features=[{features}]",
+        ]
+        params = ", ".join(params)
+        return f"{self.platform_type.value} device <{params}>"
 
-  def get_cpu_arch(self) -> Optional[common_definitions.DeviceArchitecture]:
-    name = self.cpu_abi.lower()
-    if self.cpu_uarch:
-      name += f"-{self.cpu_uarch.lower()}"
+    def get_cpu_arch(self) -> Optional[common_definitions.DeviceArchitecture]:
+        name = self.cpu_abi.lower()
+        if self.cpu_uarch:
+            name += f"-{self.cpu_uarch.lower()}"
 
-    return CPU_ABI_TO_TARGET_ARCH_MAP.get(name)
+        return CPU_ABI_TO_TARGET_ARCH_MAP.get(name)
 
-  def get_gpu_arch(self) -> Optional[common_definitions.DeviceArchitecture]:
-    name = self.gpu_name.lower()
-    return GPU_NAME_TO_TARGET_ARCH_MAP.get(name)
+    def get_gpu_arch(self) -> Optional[common_definitions.DeviceArchitecture]:
+        name = self.gpu_name.lower()
+        return GPU_NAME_TO_TARGET_ARCH_MAP.get(name)
 
-  def get_detailed_cpu_arch_name(self) -> str:
-    """Returns the detailed architecture name."""
+    def get_detailed_cpu_arch_name(self) -> str:
+        """Returns the detailed architecture name."""
 
-    if self.cpu_abi == "arm64-v8a":
-      return self.__get_arm_cpu_arch_revision()
-    if self.cpu_abi == "x86_64":
-      return self.__get_x86_detailed_cpu_arch_name()
-    raise ValueError("Unrecognized CPU ABI; need to update the list")
+        if self.cpu_abi == "arm64-v8a":
+            return self.__get_arm_cpu_arch_revision()
+        if self.cpu_abi == "x86_64":
+            return self.__get_x86_detailed_cpu_arch_name()
+        raise ValueError("Unrecognized CPU ABI; need to update the list")
 
-  def to_json_object(self) -> Dict[str, Any]:
-    return {
-        "platform_type": self.platform_type.value,
-        "model": self.model,
-        "cpu_abi": self.cpu_abi,
-        "cpu_uarch": self.cpu_uarch if self.cpu_uarch else "",
-        "cpu_features": self.cpu_features,
-        "gpu_name": self.gpu_name,
-    }
+    def to_json_object(self) -> Dict[str, Any]:
+        return {
+            "platform_type": self.platform_type.value,
+            "model": self.model,
+            "cpu_abi": self.cpu_abi,
+            "cpu_uarch": self.cpu_uarch if self.cpu_uarch else "",
+            "cpu_features": self.cpu_features,
+            "gpu_name": self.gpu_name,
+        }
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
-    cpu_uarch = json_object.get("cpu_uarch")
-    return DeviceInfo(PlatformType(json_object["platform_type"]),
-                      json_object["model"], json_object["cpu_abi"],
-                      None if cpu_uarch == "" else cpu_uarch,
-                      json_object["cpu_features"], json_object["gpu_name"])
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        cpu_uarch = json_object.get("cpu_uarch")
+        return DeviceInfo(
+            PlatformType(json_object["platform_type"]),
+            json_object["model"],
+            json_object["cpu_abi"],
+            None if cpu_uarch == "" else cpu_uarch,
+            json_object["cpu_features"],
+            json_object["gpu_name"],
+        )
 
-  def __get_x86_detailed_cpu_arch_name(self) -> str:
-    """Returns the x86 architecture with microarchitecture name."""
+    def __get_x86_detailed_cpu_arch_name(self) -> str:
+        """Returns the x86 architecture with microarchitecture name."""
 
-    if not self.cpu_uarch:
-      return self.cpu_abi
+        if not self.cpu_uarch:
+            return self.cpu_abi
 
-    return f"{self.cpu_abi}-{self.cpu_uarch}"
+        return f"{self.cpu_abi}-{self.cpu_uarch}"
 
-  def __get_arm_cpu_arch_revision(self) -> str:
-    """Returns the ARM architecture revision."""
+    def __get_arm_cpu_arch_revision(self) -> str:
+        """Returns the ARM architecture revision."""
 
-    # CPU features for ARMv8 revisions.
-    # From https://en.wikichip.org/wiki/arm/armv8#ARMv8_Extensions_and_Processor_Features
-    rev1_features = ["atomics", "asimdrdm"]
-    rev2_features = [
-        "fphp", "dcpop", "sha3", "sm3", "sm4", "asimddp", "sha512", "sve"
-    ]
+        # CPU features for ARMv8 revisions.
+        # From https://en.wikichip.org/wiki/arm/armv8#ARMv8_Extensions_and_Processor_Features
+        rev1_features = ["atomics", "asimdrdm"]
+        rev2_features = [
+            "fphp",
+            "dcpop",
+            "sha3",
+            "sm3",
+            "sm4",
+            "asimddp",
+            "sha512",
+            "sve",
+        ]
 
-    rev = "ARMv8-A"
-    if any([f in self.cpu_features for f in rev1_features]):
-      rev = "ARMv8.1-A"
-    if any([f in self.cpu_features for f in rev2_features]):
-      rev = "ARMv8.2-A"
-    return rev
+        rev = "ARMv8-A"
+        if any([f in self.cpu_features for f in rev1_features]):
+            rev = "ARMv8.1-A"
+        if any([f in self.cpu_features for f in rev2_features]):
+            rev = "ARMv8.2-A"
+        return rev
 
 
 @dataclasses.dataclass(frozen=True)
 class BenchmarkInfo:
-  """An object describing the current benchmark.
+    """An object describing the current benchmark.
 
-  It includes the following benchmark characteristics:
-  - name: the benchmark name
-  - model_name: the model name, e.g., 'MobileNetV2'
-  - model_tags: a list of tags used to describe additional model information,
-      e.g., ['imagenet']
-  - model_source: the source of the model, e.g., 'TensorFlow'
-  - bench_mode: a list of tags for benchmark mode,
-      e.g., ['1-thread', 'big-core', 'full-inference']
-  - device_info: an DriverInfo object describing the IREE runtime dirver.
-  - device_info: an DeviceInfo object describing the device where benchmarks run
-  - compile_tags: an optional list of tags to describe the compile configs,
-      e.g., ['fuse-padding']
-  - runner: which runner is used for benchmarking, e.g., 'iree_vulkan', 'tflite'
-  - run_config_id: ID of the corresponding iree_definitions.E2EModelRunConfig.
-  """
+    It includes the following benchmark characteristics:
+    - name: the benchmark name
+    - model_name: the model name, e.g., 'MobileNetV2'
+    - model_tags: a list of tags used to describe additional model information,
+        e.g., ['imagenet']
+    - model_source: the source of the model, e.g., 'TensorFlow'
+    - bench_mode: a list of tags for benchmark mode,
+        e.g., ['1-thread', 'big-core', 'full-inference']
+    - device_info: an DriverInfo object describing the IREE runtime dirver.
+    - device_info: an DeviceInfo object describing the device where benchmarks run
+    - compile_tags: an optional list of tags to describe the compile configs,
+        e.g., ['fuse-padding']
+    - runner: which runner is used for benchmarking, e.g., 'iree_vulkan', 'tflite'
+    - run_config_id: ID of the corresponding iree_definitions.E2EModelRunConfig.
+    """
 
-  name: str
-  model_name: str
-  model_tags: Sequence[str]
-  model_source: str
-  bench_mode: Sequence[str]
-  driver_info: DriverInfo
-  device_info: DeviceInfo
-  compile_tags: Optional[Sequence[str]] = None
-  run_config_id: Optional[str] = None
+    name: str
+    model_name: str
+    model_tags: Sequence[str]
+    model_source: str
+    bench_mode: Sequence[str]
+    driver_info: DriverInfo
+    device_info: DeviceInfo
+    compile_tags: Optional[Sequence[str]] = None
+    run_config_id: Optional[str] = None
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
-  def to_json_object(self) -> Dict[str, Any]:
-    return {
-        "name": self.name,
-        "model_name": self.model_name,
-        "model_tags": self.model_tags,
-        "model_source": self.model_source,
-        "bench_mode": self.bench_mode,
-        "compile_tags": self.compile_tags,
-        # Get the "iree-*" driver name from the DriverInfo.
-        "runner": IREE_PRETTY_NAME_TO_DRIVER_NAME[self.driver_info.pretty_name],
-        "device_info": self.device_info.to_json_object(),
-        "run_config_id": self.run_config_id
-    }
+    def to_json_object(self) -> Dict[str, Any]:
+        return {
+            "name": self.name,
+            "model_name": self.model_name,
+            "model_tags": self.model_tags,
+            "model_source": self.model_source,
+            "bench_mode": self.bench_mode,
+            "compile_tags": self.compile_tags,
+            # Get the "iree-*" driver name from the DriverInfo.
+            "runner": IREE_PRETTY_NAME_TO_DRIVER_NAME[self.driver_info.pretty_name],
+            "device_info": self.device_info.to_json_object(),
+            "run_config_id": self.run_config_id,
+        }
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
-    driver_info = IREE_DRIVERS_INFOS.get(json_object["runner"])
-    if not driver_info:
-      raise ValueError(f"Unrecognized runner: {json_object['runner']}")
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        driver_info = IREE_DRIVERS_INFOS.get(json_object["runner"])
+        if not driver_info:
+            raise ValueError(f"Unrecognized runner: {json_object['runner']}")
 
-    return BenchmarkInfo(name=json_object["name"],
-                         model_name=json_object["model_name"],
-                         model_tags=json_object["model_tags"],
-                         model_source=json_object["model_source"],
-                         bench_mode=json_object["bench_mode"],
-                         compile_tags=json_object.get("compile_tags"),
-                         driver_info=driver_info,
-                         device_info=DeviceInfo.from_json_object(
-                             json_object["device_info"]),
-                         run_config_id=json_object.get("run_config_id"))
+        return BenchmarkInfo(
+            name=json_object["name"],
+            model_name=json_object["model_name"],
+            model_tags=json_object["model_tags"],
+            model_source=json_object["model_source"],
+            bench_mode=json_object["bench_mode"],
+            compile_tags=json_object.get("compile_tags"),
+            driver_info=driver_info,
+            device_info=DeviceInfo.from_json_object(json_object["device_info"]),
+            run_config_id=json_object.get("run_config_id"),
+        )
 
 
 @dataclasses.dataclass(frozen=True)
 class BenchmarkLatency:
-  """Stores latency statistics for a benchmark run."""
-  mean: int
-  median: int
-  stddev: int
-  unit: str
+    """Stores latency statistics for a benchmark run."""
 
-  def to_json_object(self) -> Dict[str, Any]:
-    return dataclasses.asdict(self)
+    mean: int
+    median: int
+    stddev: int
+    unit: str
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
-    return BenchmarkLatency(**json_object)
+    def to_json_object(self) -> Dict[str, Any]:
+        return dataclasses.asdict(self)
+
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        return BenchmarkLatency(**json_object)
 
 
 def _get_google_benchmark_latencies(
-    benchmark_json: Dict[str,
-                         Any]) -> Tuple[BenchmarkLatency, BenchmarkLatency]:
-  """Returns the Google Benchmark aggregate latencies.
+    benchmark_json: Dict[str, Any]
+) -> Tuple[BenchmarkLatency, BenchmarkLatency]:
+    """Returns the Google Benchmark aggregate latencies.
 
     Args:
       benchmark_json: The JSON string or object returned by Google Benchmark.
@@ -389,267 +396,276 @@
     Returns:
       Real time and CPU time BenchmarkLatency.
     """
-  real_time_object: Dict[str, Any] = dict(unit="ns")
-  cpu_time_object: Dict[str, Any] = dict(unit="ns")
-  metrics = ["mean", "median", "stddev"]
-  for case in benchmark_json["benchmarks"]:
-    if any(case["name"].endswith(f"real_time_{m}") for m in metrics):
-      if case["time_unit"] != "ns":
-        raise ValueError(f"Expected ns as time unit")
-      metric = case["name"].split("_")[-1]
-      real_time_object[metric] = int(round(case["real_time"]))
-      cpu_time_object[metric] = int(round(case["cpu_time"]))
+    real_time_object: Dict[str, Any] = dict(unit="ns")
+    cpu_time_object: Dict[str, Any] = dict(unit="ns")
+    metrics = ["mean", "median", "stddev"]
+    for case in benchmark_json["benchmarks"]:
+        if any(case["name"].endswith(f"real_time_{m}") for m in metrics):
+            if case["time_unit"] != "ns":
+                raise ValueError(f"Expected ns as time unit")
+            metric = case["name"].split("_")[-1]
+            real_time_object[metric] = int(round(case["real_time"]))
+            cpu_time_object[metric] = int(round(case["cpu_time"]))
 
-  # from_json_object implicitly validates that all metrics were found.
-  real_time = BenchmarkLatency.from_json_object(real_time_object)
-  cpu_time = BenchmarkLatency.from_json_object(cpu_time_object)
-  return real_time, cpu_time
+    # from_json_object implicitly validates that all metrics were found.
+    real_time = BenchmarkLatency.from_json_object(real_time_object)
+    cpu_time = BenchmarkLatency.from_json_object(cpu_time_object)
+    return real_time, cpu_time
 
 
 @dataclasses.dataclass(frozen=True)
 class BenchmarkMemory:
-  """Stores memory statistics for a benchmark run."""
-  peak: int
-  allocated: int
-  freed: int
-  live: int
-  unit: str
+    """Stores memory statistics for a benchmark run."""
 
-  def to_json_object(self) -> Dict[str, int]:
-    return dataclasses.asdict(self)
+    peak: int
+    allocated: int
+    freed: int
+    live: int
+    unit: str
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
-    return BenchmarkMemory(**json_object)
+    def to_json_object(self) -> Dict[str, int]:
+        return dataclasses.asdict(self)
+
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        return BenchmarkMemory(**json_object)
 
 
-def _get_iree_memory_statistics(benchmark_stderr: str,
-                                device: str) -> BenchmarkMemory:
-  """Extracts IREE's memory statistics for a given device."""
-  # The memory statistics for each device are listed on their own line.
-  pattern = (rf"{device}:"
-             r"\s*(?P<peak>\d+)B peak /"
-             r"\s*(?P<allocated>\d+)B allocated /"
-             r"\s*(?P<freed>\d+)B freed /"
-             r"\s*(?P<live>\d+)B live")
-  match = re.search(pattern, benchmark_stderr)
-  if match is None:
-    raise ValueError(
-        f"Unable to find memory statistics in '{benchmark_stderr}'")
-  return BenchmarkMemory(
-      peak=int(match["peak"]),
-      allocated=int(match["allocated"]),
-      freed=int(match["freed"]),
-      live=int(match["live"]),
-      unit="bytes",
-  )
+def _get_iree_memory_statistics(benchmark_stderr: str, device: str) -> BenchmarkMemory:
+    """Extracts IREE's memory statistics for a given device."""
+    # The memory statistics for each device are listed on their own line.
+    pattern = (
+        rf"{device}:"
+        r"\s*(?P<peak>\d+)B peak /"
+        r"\s*(?P<allocated>\d+)B allocated /"
+        r"\s*(?P<freed>\d+)B freed /"
+        r"\s*(?P<live>\d+)B live"
+    )
+    match = re.search(pattern, benchmark_stderr)
+    if match is None:
+        raise ValueError(f"Unable to find memory statistics in '{benchmark_stderr}'")
+    return BenchmarkMemory(
+        peak=int(match["peak"]),
+        allocated=int(match["allocated"]),
+        freed=int(match["freed"]),
+        live=int(match["live"]),
+        unit="bytes",
+    )
 
 
 @dataclasses.dataclass(frozen=True)
 class BenchmarkMetrics(object):
-  """An object describing the results from a single benchmark.
+    """An object describing the results from a single benchmark.
 
-  - real_time: the real time latency statistics returned by the benchmarking
-      framework.
-  - cpu_time: the cpu time latency statistics returned by the benchmarking
-      framework.
-  - host_memory: the host memory statistics returned by the benchmarking
-      framework.
-  - device_memory: the device memory statistics returned by the benchmarking
-      framework.
-  - raw_data: additional JSON-compatible raw results returned by the
-      benchmarking framework.
-  """
-  real_time: BenchmarkLatency
-  cpu_time: BenchmarkLatency
-  host_memory: BenchmarkMemory
-  device_memory: BenchmarkMemory
-  raw_data: Dict[str, Any]
+    - real_time: the real time latency statistics returned by the benchmarking
+        framework.
+    - cpu_time: the cpu time latency statistics returned by the benchmarking
+        framework.
+    - host_memory: the host memory statistics returned by the benchmarking
+        framework.
+    - device_memory: the device memory statistics returned by the benchmarking
+        framework.
+    - raw_data: additional JSON-compatible raw results returned by the
+        benchmarking framework.
+    """
 
-  def to_json_object(self) -> Dict[str, Any]:
-    return {
-        "real_time": self.real_time.to_json_object(),
-        "cpu_time": self.cpu_time.to_json_object(),
-        "host_memory": self.host_memory.to_json_object(),
-        "device_memory": self.device_memory.to_json_object(),
-        "raw_data": self.raw_data,
-    }
+    real_time: BenchmarkLatency
+    cpu_time: BenchmarkLatency
+    host_memory: BenchmarkMemory
+    device_memory: BenchmarkMemory
+    raw_data: Dict[str, Any]
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
+    def to_json_object(self) -> Dict[str, Any]:
+        return {
+            "real_time": self.real_time.to_json_object(),
+            "cpu_time": self.cpu_time.to_json_object(),
+            "host_memory": self.host_memory.to_json_object(),
+            "device_memory": self.device_memory.to_json_object(),
+            "raw_data": self.raw_data,
+        }
+
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        return BenchmarkMetrics(
+            real_time=BenchmarkLatency.from_json_object(json_object["real_time"]),
+            cpu_time=BenchmarkLatency.from_json_object(json_object["cpu_time"]),
+            host_memory=BenchmarkMemory.from_json_object(json_object["host_memory"]),
+            device_memory=BenchmarkMemory.from_json_object(
+                json_object["device_memory"]
+            ),
+            raw_data=json_object["raw_data"],
+        )
+
+
+def parse_iree_benchmark_metrics(
+    benchmark_stdout: str, benchmark_stderr: str
+) -> BenchmarkMetrics:
+    """Extract benchmark metrics from the output of iree-benchmark-module.
+
+    Args:
+      benchmark_stdout: The stdout of iree-benchmark-module with
+        --benchmark_format=json.
+      benchmark_stdout: The stderr of iree-benchmark-module with
+        --print_statistics=true.
+
+    Returns:
+      A populated BenchmarkMetrics dataclass.
+    """
+    benchmark_json = json.loads(benchmark_stdout)
+    real_time, cpu_time = _get_google_benchmark_latencies(benchmark_json)
     return BenchmarkMetrics(
-        real_time=BenchmarkLatency.from_json_object(json_object["real_time"]),
-        cpu_time=BenchmarkLatency.from_json_object(json_object["cpu_time"]),
-        host_memory=BenchmarkMemory.from_json_object(
-            json_object["host_memory"]),
-        device_memory=BenchmarkMemory.from_json_object(
-            json_object["device_memory"]),
-        raw_data=json_object["raw_data"],
+        real_time=real_time,
+        cpu_time=cpu_time,
+        host_memory=_get_iree_memory_statistics(benchmark_stderr, "HOST_LOCAL"),
+        device_memory=_get_iree_memory_statistics(benchmark_stderr, "DEVICE_LOCAL"),
+        raw_data=benchmark_json,
     )
 
 
-def parse_iree_benchmark_metrics(benchmark_stdout: str,
-                                 benchmark_stderr: str) -> BenchmarkMetrics:
-  """Extract benchmark metrics from the output of iree-benchmark-module.
-
-  Args:
-    benchmark_stdout: The stdout of iree-benchmark-module with
-      --benchmark_format=json.
-    benchmark_stdout: The stderr of iree-benchmark-module with
-      --print_statistics=true.
-
-  Returns:
-    A populated BenchmarkMetrics dataclass.
-  """
-  benchmark_json = json.loads(benchmark_stdout)
-  real_time, cpu_time = _get_google_benchmark_latencies(benchmark_json)
-  return BenchmarkMetrics(
-      real_time=real_time,
-      cpu_time=cpu_time,
-      host_memory=_get_iree_memory_statistics(benchmark_stderr, "HOST_LOCAL"),
-      device_memory=_get_iree_memory_statistics(benchmark_stderr,
-                                                "DEVICE_LOCAL"),
-      raw_data=benchmark_json,
-  )
-
-
 @dataclasses.dataclass(frozen=True)
 class BenchmarkRun(object):
-  """An object describing a single run of the benchmark binary.
+    """An object describing a single run of the benchmark binary.
 
-  - info: a BenchmarkInfo object describing the benchmark setup.
-  - metrics: a BenchmarkMetrics object containing the results of the benchmark.
-  """
-  info: BenchmarkInfo
-  metrics: BenchmarkMetrics
+    - info: a BenchmarkInfo object describing the benchmark setup.
+    - metrics: a BenchmarkMetrics object containing the results of the benchmark.
+    """
 
-  def to_json_object(self) -> Dict[str, Any]:
-    return {
-        "info": self.info.to_json_object(),
-        "metrics": self.metrics.to_json_object(),
-    }
+    info: BenchmarkInfo
+    metrics: BenchmarkMetrics
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
-    return BenchmarkRun(
-        BenchmarkInfo.from_json_object(json_object["info"]),
-        BenchmarkMetrics.from_json_object(json_object["metrics"]),
-    )
+    def to_json_object(self) -> Dict[str, Any]:
+        return {
+            "info": self.info.to_json_object(),
+            "metrics": self.metrics.to_json_object(),
+        }
+
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        return BenchmarkRun(
+            BenchmarkInfo.from_json_object(json_object["info"]),
+            BenchmarkMetrics.from_json_object(json_object["metrics"]),
+        )
 
 
 class BenchmarkResults(object):
-  """An object describing a set of benchmarks for one particular commit.
+    """An object describing a set of benchmarks for one particular commit.
 
     It contains the following fields:
     - commit: the commit SHA for this set of benchmarks.
     - benchmarks: a list of BenchmarkRun objects
     """
 
-  def __init__(self):
-    self.commit: str = "<unknown>"
-    self.benchmarks: List[BenchmarkRun] = []
+    def __init__(self):
+        self.commit: str = "<unknown>"
+        self.benchmarks: List[BenchmarkRun] = []
 
-  def set_commit(self, commit: str):
-    self.commit = commit
+    def set_commit(self, commit: str):
+        self.commit = commit
 
-  def merge(self, other):
-    if self.commit != other.commit:
-      raise ValueError("Inconsistent pull request commit")
-    self.benchmarks.extend(other.benchmarks)
+    def merge(self, other):
+        if self.commit != other.commit:
+            raise ValueError("Inconsistent pull request commit")
+        self.benchmarks.extend(other.benchmarks)
 
-  def to_json_str(self) -> str:
-    json_object = {"commit": self.commit, "benchmarks": []}
-    json_object["benchmarks"] = [b.to_json_object() for b in self.benchmarks]
-    return json.dumps(json_object, indent=2)
+    def to_json_str(self) -> str:
+        json_object = {"commit": self.commit, "benchmarks": []}
+        json_object["benchmarks"] = [b.to_json_object() for b in self.benchmarks]
+        return json.dumps(json_object, indent=2)
 
-  @staticmethod
-  def from_json_str(json_str: str):
-    json_object = json.loads(json_str)
-    results = BenchmarkResults()
-    results.set_commit(json_object["commit"])
-    results.benchmarks = [
-        BenchmarkRun.from_json_object(b) for b in json_object["benchmarks"]
-    ]
-    return results
+    @staticmethod
+    def from_json_str(json_str: str):
+        json_object = json.loads(json_str)
+        results = BenchmarkResults()
+        results.set_commit(json_object["commit"])
+        results.benchmarks = [
+            BenchmarkRun.from_json_object(b) for b in json_object["benchmarks"]
+        ]
+        return results
 
 
 @dataclasses.dataclass(frozen=True)
 class CompilationInfo(object):
-  name: str
-  model_name: str
-  model_tags: Tuple[str]
-  model_source: str
-  target_arch: str
-  compile_tags: Tuple[str]
-  gen_config_id: Optional[str] = None
+    name: str
+    model_name: str
+    model_tags: Tuple[str]
+    model_source: str
+    target_arch: str
+    compile_tags: Tuple[str]
+    gen_config_id: Optional[str] = None
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
-    return CompilationInfo(name=json_object["name"],
-                           model_name=json_object["model_name"],
-                           model_tags=tuple(json_object["model_tags"]),
-                           model_source=json_object["model_source"],
-                           target_arch=json_object["target_arch"],
-                           compile_tags=tuple(json_object["compile_tags"]),
-                           gen_config_id=json_object.get("gen_config_id"))
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        return CompilationInfo(
+            name=json_object["name"],
+            model_name=json_object["model_name"],
+            model_tags=tuple(json_object["model_tags"]),
+            model_source=json_object["model_source"],
+            target_arch=json_object["target_arch"],
+            compile_tags=tuple(json_object["compile_tags"]),
+            gen_config_id=json_object.get("gen_config_id"),
+        )
 
 
 @dataclasses.dataclass(frozen=True)
 class ModuleComponentSizes(object):
-  file_bytes: int
-  vm_component_bytes: int
-  const_component_bytes: int
-  total_dispatch_component_bytes: int
+    file_bytes: int
+    vm_component_bytes: int
+    const_component_bytes: int
+    total_dispatch_component_bytes: int
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
-    return ModuleComponentSizes(**json_object)
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        return ModuleComponentSizes(**json_object)
 
 
 @dataclasses.dataclass(frozen=True)
 class IRStatistics(object):
-  # Number of cmd.dispatch ops in IR.
-  stream_dispatch_count: int
+    # Number of cmd.dispatch ops in IR.
+    stream_dispatch_count: int
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
-    return IRStatistics(**json_object)
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        return IRStatistics(**json_object)
 
 
 @dataclasses.dataclass(frozen=True)
 class CompilationStatistics(object):
-  compilation_info: CompilationInfo
-  # Module file and component sizes.
-  module_component_sizes: ModuleComponentSizes
-  # Module compilation time in ms.
-  compilation_time_ms: int
-  # IR-level statistics
-  ir_stats: IRStatistics
+    compilation_info: CompilationInfo
+    # Module file and component sizes.
+    module_component_sizes: ModuleComponentSizes
+    # Module compilation time in ms.
+    compilation_time_ms: int
+    # IR-level statistics
+    ir_stats: IRStatistics
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
-    return CompilationStatistics(
-        compilation_info=CompilationInfo.from_json_object(
-            json_object["compilation_info"]),
-        module_component_sizes=ModuleComponentSizes.from_json_object(
-            json_object["module_component_sizes"]),
-        compilation_time_ms=json_object["compilation_time_ms"],
-        ir_stats=IRStatistics.from_json_object(json_object["ir_stats"]))
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        return CompilationStatistics(
+            compilation_info=CompilationInfo.from_json_object(
+                json_object["compilation_info"]
+            ),
+            module_component_sizes=ModuleComponentSizes.from_json_object(
+                json_object["module_component_sizes"]
+            ),
+            compilation_time_ms=json_object["compilation_time_ms"],
+            ir_stats=IRStatistics.from_json_object(json_object["ir_stats"]),
+        )
 
 
 @dataclasses.dataclass(frozen=True)
 class CompilationResults(object):
-  commit: str
-  compilation_statistics: Sequence[CompilationStatistics]
+    commit: str
+    compilation_statistics: Sequence[CompilationStatistics]
 
-  @staticmethod
-  def from_json_object(json_object: Dict[str, Any]):
-    return CompilationResults(
-        commit=json_object["commit"],
-        compilation_statistics=[
-            CompilationStatistics.from_json_object(obj)
-            for obj in json_object["compilation_statistics"]
-        ])
+    @staticmethod
+    def from_json_object(json_object: Dict[str, Any]):
+        return CompilationResults(
+            commit=json_object["commit"],
+            compilation_statistics=[
+                CompilationStatistics.from_json_object(obj)
+                for obj in json_object["compilation_statistics"]
+            ],
+        )
diff --git a/build_tools/benchmarks/common/benchmark_driver.py b/build_tools/benchmarks/common/benchmark_driver.py
index efa168c..49b7fd6 100644
--- a/build_tools/benchmarks/common/benchmark_driver.py
+++ b/build_tools/benchmarks/common/benchmark_driver.py
@@ -10,245 +10,279 @@
 from typing import List, Optional, Sequence, Set, Tuple
 from common.benchmark_suite import BenchmarkCase, BenchmarkSuite
 from common.benchmark_config import BenchmarkConfig
-from common.benchmark_definition import (BenchmarkInfo, BenchmarkResults,
-                                         BenchmarkMetrics, BenchmarkRun,
-                                         DeviceInfo)
+from common.benchmark_definition import (
+    BenchmarkInfo,
+    BenchmarkResults,
+    BenchmarkMetrics,
+    BenchmarkRun,
+    DeviceInfo,
+)
 
 
 class BenchmarkDriver(object):
-  """Abstract driver runs the whole benchmark flow."""
+    """Abstract driver runs the whole benchmark flow."""
 
-  def __init__(self,
-               device_info: DeviceInfo,
-               benchmark_config: BenchmarkConfig,
-               benchmark_suite: BenchmarkSuite,
-               benchmark_grace_time: float = 0.0,
-               verbose: bool = False):
-    self.device_info = device_info
-    self.config = benchmark_config
-    self.benchmark_suite = benchmark_suite
-    self.benchmark_grace_time = benchmark_grace_time
-    self.verbose = verbose
-    self.finished_benchmarks: List[Tuple[BenchmarkInfo, pathlib.Path]] = []
-    self.finished_captures: List[pathlib.Path] = []
-    self.benchmark_errors = []
-    self._seen_benchmark_names: Set[str] = set()
+    def __init__(
+        self,
+        device_info: DeviceInfo,
+        benchmark_config: BenchmarkConfig,
+        benchmark_suite: BenchmarkSuite,
+        benchmark_grace_time: float = 0.0,
+        verbose: bool = False,
+    ):
+        self.device_info = device_info
+        self.config = benchmark_config
+        self.benchmark_suite = benchmark_suite
+        self.benchmark_grace_time = benchmark_grace_time
+        self.verbose = verbose
+        self.finished_benchmarks: List[Tuple[BenchmarkInfo, pathlib.Path]] = []
+        self.finished_captures: List[pathlib.Path] = []
+        self.benchmark_errors = []
+        self._seen_benchmark_names: Set[str] = set()
 
-  def run_benchmark_case(self, benchmark_case: BenchmarkCase,
-                         benchmark_results_filename: Optional[pathlib.Path],
-                         capture_filename: Optional[pathlib.Path]) -> None:
-    """Runs the benchmark case and serializes the results.
+    def run_benchmark_case(
+        self,
+        benchmark_case: BenchmarkCase,
+        benchmark_results_filename: Optional[pathlib.Path],
+        capture_filename: Optional[pathlib.Path],
+    ) -> None:
+        """Runs the benchmark case and serializes the results.
 
-    Args:
-      benchmark_case: the benchmark_case.
-      benchmark_results_filename: the path to store the serialized
-        BenchmarkMetrics. Benchmarking is required if set.
-      capture_filename: the path to store captured trace. Trace capturing is
-        required if set.
+        Args:
+          benchmark_case: the benchmark_case.
+          benchmark_results_filename: the path to store the serialized
+            BenchmarkMetrics. Benchmarking is required if set.
+          capture_filename: the path to store captured trace. Trace capturing is
+            required if set.
 
-    Raises:
-      Exception during benchmarking.
-    """
-    raise NotImplementedError("Should be overwritten by a subclass.")
+        Raises:
+          Exception during benchmarking.
+        """
+        raise NotImplementedError("Should be overwritten by a subclass.")
 
-  def run(self) -> None:
-    """Execute the benchmark flow.
+    def run(self) -> None:
+        """Execute the benchmark flow.
 
-    It performs the following steps:
-      1. Enumerate and filter benchmark cases.
-      2. Call 'run_benchmark_case' for each benchmark case.
-      3. Collect the benchmark results and captures.
-    """
+        It performs the following steps:
+          1. Enumerate and filter benchmark cases.
+          2. Call 'run_benchmark_case' for each benchmark case.
+          3. Collect the benchmark results and captures.
+        """
 
-    self.config.benchmark_results_dir.mkdir(parents=True, exist_ok=True)
-    if self.config.trace_capture_config is not None:
-      self.config.trace_capture_config.capture_tmp_dir.mkdir(parents=True,
-                                                             exist_ok=True)
+        self.config.benchmark_results_dir.mkdir(parents=True, exist_ok=True)
+        if self.config.trace_capture_config is not None:
+            self.config.trace_capture_config.capture_tmp_dir.mkdir(
+                parents=True, exist_ok=True
+            )
 
-    cpu_target_arch = self.device_info.get_cpu_arch()
-    gpu_target_arch = self.device_info.get_gpu_arch()
-    detected_architectures = [
-        arch for arch in [cpu_target_arch, gpu_target_arch] if arch is not None
-    ]
-    if self.config.use_compatible_filter:
-      if cpu_target_arch is None:
-        print("INFO: Detected unsupported CPU architecture in"
-              f' "{self.device_info}", CPU benchmarking is disabled.')
-      if gpu_target_arch is None:
-        print("INFO: Detected unsupported GPU architecture in"
-              f' "{self.device_info}", GPU benchmarking is disabled.')
-      compatible_arch_filter = detected_architectures
-    else:
-      # No compatible filter on the target architectures.
-      compatible_arch_filter = None
+        cpu_target_arch = self.device_info.get_cpu_arch()
+        gpu_target_arch = self.device_info.get_gpu_arch()
+        detected_architectures = [
+            arch for arch in [cpu_target_arch, gpu_target_arch] if arch is not None
+        ]
+        if self.config.use_compatible_filter:
+            if cpu_target_arch is None:
+                print(
+                    "INFO: Detected unsupported CPU architecture in"
+                    f' "{self.device_info}", CPU benchmarking is disabled.'
+                )
+            if gpu_target_arch is None:
+                print(
+                    "INFO: Detected unsupported GPU architecture in"
+                    f' "{self.device_info}", GPU benchmarking is disabled.'
+                )
+            compatible_arch_filter = detected_architectures
+        else:
+            # No compatible filter on the target architectures.
+            compatible_arch_filter = None
 
-    drivers, loaders = self.__get_available_drivers_and_loaders()
+        drivers, loaders = self.__get_available_drivers_and_loaders()
 
-    benchmark_cases = self.benchmark_suite.filter_benchmarks(
-        available_drivers=drivers,
-        available_loaders=loaders,
-        target_architectures=compatible_arch_filter,
-        driver_filter=self.config.driver_filter,
-        mode_filter=self.config.mode_filter,
-        model_name_filter=self.config.model_name_filter)
+        benchmark_cases = self.benchmark_suite.filter_benchmarks(
+            available_drivers=drivers,
+            available_loaders=loaders,
+            target_architectures=compatible_arch_filter,
+            driver_filter=self.config.driver_filter,
+            mode_filter=self.config.mode_filter,
+            model_name_filter=self.config.model_name_filter,
+        )
 
-    for benchmark_case in benchmark_cases:
-      benchmark_info = self.__get_benchmark_info_from_case(
-          benchmark_case=benchmark_case)
-      benchmark_name = str(benchmark_info)
+        for benchmark_case in benchmark_cases:
+            benchmark_info = self.__get_benchmark_info_from_case(
+                benchmark_case=benchmark_case
+            )
+            benchmark_name = str(benchmark_info)
 
-      if benchmark_case.target_arch not in detected_architectures:
-        print(f"WARNING: Benchmark '{benchmark_name}' may be incompatible"
-              f" with the detected architectures '{detected_architectures}'"
-              f" on the device. Pass --compatible-only to skip incompatible"
-              f" benchmarks.")
+            if benchmark_case.target_arch not in detected_architectures:
+                print(
+                    f"WARNING: Benchmark '{benchmark_name}' may be incompatible"
+                    f" with the detected architectures '{detected_architectures}'"
+                    f" on the device. Pass --compatible-only to skip incompatible"
+                    f" benchmarks."
+                )
 
-      # Sanity check for the uniqueness of benchmark names.
-      if benchmark_name in self._seen_benchmark_names:
-        raise ValueError(
-            f"Found duplicate benchmark {benchmark_name} in the suites.")
-      self._seen_benchmark_names.add(benchmark_name)
+            # Sanity check for the uniqueness of benchmark names.
+            if benchmark_name in self._seen_benchmark_names:
+                raise ValueError(
+                    f"Found duplicate benchmark {benchmark_name} in the suites."
+                )
+            self._seen_benchmark_names.add(benchmark_name)
 
-      results_path, capture_path = self.__get_output_paths(benchmark_name)
-      # If we continue from the previous results, check and skip if the result
-      # files exist.
-      if self.config.continue_from_previous:
-        if results_path is not None and results_path.exists():
-          self.finished_benchmarks.append((benchmark_info, results_path))
-          results_path = None
+            results_path, capture_path = self.__get_output_paths(benchmark_name)
+            # If we continue from the previous results, check and skip if the result
+            # files exist.
+            if self.config.continue_from_previous:
+                if results_path is not None and results_path.exists():
+                    self.finished_benchmarks.append((benchmark_info, results_path))
+                    results_path = None
 
-        if capture_path is not None and capture_path.exists():
-          self.finished_captures.append(capture_path)
-          capture_path = None
+                if capture_path is not None and capture_path.exists():
+                    self.finished_captures.append(capture_path)
+                    capture_path = None
 
-      # Skip if no need to benchmark and capture.
-      if results_path is None and capture_path is None:
-        continue
+            # Skip if no need to benchmark and capture.
+            if results_path is None and capture_path is None:
+                continue
 
-      print(f"--> Benchmark started: {benchmark_name} <--")
+            print(f"--> Benchmark started: {benchmark_name} <--")
 
-      try:
-        self.run_benchmark_case(benchmark_case, results_path, capture_path)
-      except Exception as e:
-        # Delete unfinished results if they exist.
-        if results_path is not None:
-          results_path.unlink(missing_ok=True)
-        if capture_path is not None:
-          capture_path.unlink(missing_ok=True)
+            try:
+                self.run_benchmark_case(benchmark_case, results_path, capture_path)
+            except Exception as e:
+                # Delete unfinished results if they exist.
+                if results_path is not None:
+                    results_path.unlink(missing_ok=True)
+                if capture_path is not None:
+                    capture_path.unlink(missing_ok=True)
 
-        if not self.config.keep_going:
-          raise e
+                if not self.config.keep_going:
+                    raise e
 
-        print(f"Processing of benchmark failed with: {e}")
-        self.benchmark_errors.append(e)
-        continue
-      finally:
-        # Some grace time.
-        time.sleep(self.benchmark_grace_time)
+                print(f"Processing of benchmark failed with: {e}")
+                self.benchmark_errors.append(e)
+                continue
+            finally:
+                # Some grace time.
+                time.sleep(self.benchmark_grace_time)
 
-      print("Benchmark completed")
+            print("Benchmark completed")
 
-      if results_path:
-        self.finished_benchmarks.append((benchmark_info, results_path))
-      if capture_path:
-        self.finished_captures.append(capture_path)
+            if results_path:
+                self.finished_benchmarks.append((benchmark_info, results_path))
+            if capture_path:
+                self.finished_captures.append(capture_path)
 
-  def get_benchmark_results(self) -> BenchmarkResults:
-    """Returns the finished benchmark results."""
+    def get_benchmark_results(self) -> BenchmarkResults:
+        """Returns the finished benchmark results."""
 
-    results = BenchmarkResults()
-    results.set_commit(self.config.git_commit_hash)
+        results = BenchmarkResults()
+        results.set_commit(self.config.git_commit_hash)
 
-    finished_benchmarks = sorted(self.finished_benchmarks,
-                                 key=lambda pair: str(pair[0]))
-    for info, path in finished_benchmarks:
-      benchmark_metrics_json_object = json.loads(path.read_text())
-      benchmark_run = BenchmarkRun(info=info,
-                                   metrics=BenchmarkMetrics.from_json_object(
-                                       benchmark_metrics_json_object))
-      results.benchmarks.append(benchmark_run)
+        finished_benchmarks = sorted(
+            self.finished_benchmarks, key=lambda pair: str(pair[0])
+        )
+        for info, path in finished_benchmarks:
+            benchmark_metrics_json_object = json.loads(path.read_text())
+            benchmark_run = BenchmarkRun(
+                info=info,
+                metrics=BenchmarkMetrics.from_json_object(
+                    benchmark_metrics_json_object
+                ),
+            )
+            results.benchmarks.append(benchmark_run)
 
-    return results
+        return results
 
-  def get_benchmark_result_filenames(self) -> Sequence[pathlib.Path]:
-    """Returns the json file paths of finished benchmarks."""
-    return [path for info, path in self.finished_benchmarks]
+    def get_benchmark_result_filenames(self) -> Sequence[pathlib.Path]:
+        """Returns the json file paths of finished benchmarks."""
+        return [path for info, path in self.finished_benchmarks]
 
-  def get_capture_filenames(self) -> Sequence[pathlib.Path]:
-    """Returns the tracy file paths of finished captures."""
-    return self.finished_captures
+    def get_capture_filenames(self) -> Sequence[pathlib.Path]:
+        """Returns the tracy file paths of finished captures."""
+        return self.finished_captures
 
-  def get_benchmark_errors(self):
-    """Returns the exceptions captured during benchmarking."""
-    return self.benchmark_errors
+    def get_benchmark_errors(self):
+        """Returns the exceptions captured during benchmarking."""
+        return self.benchmark_errors
 
-  def __get_output_paths(self, benchmark_name: str):
-    """Get output paths for the results and capture. The path of results/capture
-    is None if the benchmark/capture doesn't need to be run.
-    """
+    def __get_output_paths(self, benchmark_name: str):
+        """Get output paths for the results and capture. The path of results/capture
+        is None if the benchmark/capture doesn't need to be run.
+        """
 
-    benchmark_results_filename = None
-    if self.config.normal_benchmark_tool_dir:
-      benchmark_results_filename = self.config.benchmark_results_dir / f"{benchmark_name}.json"
+        benchmark_results_filename = None
+        if self.config.normal_benchmark_tool_dir:
+            benchmark_results_filename = (
+                self.config.benchmark_results_dir / f"{benchmark_name}.json"
+            )
 
-    capture_filename = None
-    if self.config.trace_capture_config:
-      capture_filename = self.config.trace_capture_config.capture_tmp_dir / f"{benchmark_name}.tracy"
+        capture_filename = None
+        if self.config.trace_capture_config:
+            capture_filename = (
+                self.config.trace_capture_config.capture_tmp_dir
+                / f"{benchmark_name}.tracy"
+            )
 
-    return (benchmark_results_filename, capture_filename)
+        return (benchmark_results_filename, capture_filename)
 
-  def __get_benchmark_info_from_case(
-      self, benchmark_case: BenchmarkCase) -> BenchmarkInfo:
-    run_config = benchmark_case.run_config
-    run_tags = run_config.module_execution_config.tags
-    gen_config = run_config.module_generation_config
-    model_source = str(gen_config.imported_model.model.source_type)
-    compile_tags = gen_config.compile_config.tags
-    return BenchmarkInfo(name=run_config.name,
-                         model_name=benchmark_case.model_name,
-                         model_tags=benchmark_case.model_tags,
-                         model_source=model_source,
-                         bench_mode=run_tags,
-                         compile_tags=compile_tags,
-                         driver_info=benchmark_case.driver_info,
-                         device_info=self.device_info,
-                         run_config_id=run_config.composite_id)
+    def __get_benchmark_info_from_case(
+        self, benchmark_case: BenchmarkCase
+    ) -> BenchmarkInfo:
+        run_config = benchmark_case.run_config
+        run_tags = run_config.module_execution_config.tags
+        gen_config = run_config.module_generation_config
+        model_source = str(gen_config.imported_model.model.source_type)
+        compile_tags = gen_config.compile_config.tags
+        return BenchmarkInfo(
+            name=run_config.name,
+            model_name=benchmark_case.model_name,
+            model_tags=benchmark_case.model_tags,
+            model_source=model_source,
+            bench_mode=run_tags,
+            compile_tags=compile_tags,
+            driver_info=benchmark_case.driver_info,
+            device_info=self.device_info,
+            run_config_id=run_config.composite_id,
+        )
 
-  def __get_available_drivers_and_loaders(
-      self) -> Tuple[Sequence[str], Sequence[str]]:
-    any_tool_dir = (self.config.normal_benchmark_tool_dir
-                    if self.config.normal_benchmark_tool_dir else
-                    self.config.trace_capture_config.traced_benchmark_tool_dir)
-    config_txt_file_path = any_tool_dir / "build_config.txt"
-    config_txt_file_lines = config_txt_file_path.read_text().splitlines()
+    def __get_available_drivers_and_loaders(
+        self,
+    ) -> Tuple[Sequence[str], Sequence[str]]:
+        any_tool_dir = (
+            self.config.normal_benchmark_tool_dir
+            if self.config.normal_benchmark_tool_dir
+            else self.config.trace_capture_config.traced_benchmark_tool_dir
+        )
+        config_txt_file_path = any_tool_dir / "build_config.txt"
+        config_txt_file_lines = config_txt_file_path.read_text().splitlines()
 
-    available_drivers = []
-    available_loaders = []
-    for line in config_txt_file_lines:
-      name, value = line.strip().split("=")
-      if value != "ON":
-        continue
-      if name == "IREE_HAL_DRIVER_CUDA":
-        available_drivers.append("cuda")
-      elif name == "IREE_HAL_DRIVER_LOCAL_SYNC":
-        available_drivers.append("local-sync")
-      elif name == "IREE_HAL_DRIVER_LOCAL_TASK":
-        available_drivers.append("local-task")
-      elif name == "IREE_HAL_DRIVER_VULKAN":
-        available_drivers.append("vulkan")
-      elif name == "IREE_HAL_EXECUTABLE_LOADER_EMBEDDED_ELF":
-        available_loaders.append("embedded-elf")
-      elif name == "IREE_HAL_EXECUTABLE_LOADER_SYSTEM_LIBRARY":
-        available_loaders.append("system-library")
-      elif name == "IREE_HAL_EXECUTABLE_LOADER_VMVX_MODULE":
-        available_loaders.append("vmvx-module")
-      else:
-        continue
+        available_drivers = []
+        available_loaders = []
+        for line in config_txt_file_lines:
+            name, value = line.strip().split("=")
+            if value != "ON":
+                continue
+            if name == "IREE_HAL_DRIVER_CUDA":
+                available_drivers.append("cuda")
+            elif name == "IREE_HAL_DRIVER_LOCAL_SYNC":
+                available_drivers.append("local-sync")
+            elif name == "IREE_HAL_DRIVER_LOCAL_TASK":
+                available_drivers.append("local-task")
+            elif name == "IREE_HAL_DRIVER_VULKAN":
+                available_drivers.append("vulkan")
+            elif name == "IREE_HAL_EXECUTABLE_LOADER_EMBEDDED_ELF":
+                available_loaders.append("embedded-elf")
+            elif name == "IREE_HAL_EXECUTABLE_LOADER_SYSTEM_LIBRARY":
+                available_loaders.append("system-library")
+            elif name == "IREE_HAL_EXECUTABLE_LOADER_VMVX_MODULE":
+                available_loaders.append("vmvx-module")
+            else:
+                continue
 
-    if self.verbose:
-      available_drivers_str = ', '.join(available_drivers)
-      print(f"Available drivers: {available_drivers_str}")
-      available_loaders_str = ', '.join(available_loaders)
-      print(f"Available loaders: {available_loaders_str}")
+        if self.verbose:
+            available_drivers_str = ", ".join(available_drivers)
+            print(f"Available drivers: {available_drivers_str}")
+            available_loaders_str = ", ".join(available_loaders)
+            print(f"Available loaders: {available_loaders_str}")
 
-    return available_drivers, available_loaders
+        return available_drivers, available_loaders
diff --git a/build_tools/benchmarks/common/benchmark_driver_test.py b/build_tools/benchmarks/common/benchmark_driver_test.py
index 106cb55..f2dd076 100644
--- a/build_tools/benchmarks/common/benchmark_driver_test.py
+++ b/build_tools/benchmarks/common/benchmark_driver_test.py
@@ -14,254 +14,301 @@
 from common import benchmark_config
 from common.benchmark_suite import BenchmarkCase, BenchmarkSuite
 from common.benchmark_driver import BenchmarkDriver
-from common.benchmark_definition import (IREE_DRIVERS_INFOS, DeviceInfo,
-                                         PlatformType, BenchmarkLatency,
-                                         BenchmarkMemory, BenchmarkMetrics)
+from common.benchmark_definition import (
+    IREE_DRIVERS_INFOS,
+    DeviceInfo,
+    PlatformType,
+    BenchmarkLatency,
+    BenchmarkMemory,
+    BenchmarkMetrics,
+)
 from e2e_test_framework.definitions import common_definitions, iree_definitions
 
 
 class FakeBenchmarkDriver(BenchmarkDriver):
+    def __init__(
+        self, *args, raise_exception_on_case: Optional[BenchmarkCase] = None, **kwargs
+    ):
+        super().__init__(*args, **kwargs)
+        self.raise_exception_on_case = raise_exception_on_case
+        self.run_benchmark_cases = []
 
-  def __init__(self,
-               *args,
-               raise_exception_on_case: Optional[BenchmarkCase] = None,
-               **kwargs):
-    super().__init__(*args, **kwargs)
-    self.raise_exception_on_case = raise_exception_on_case
-    self.run_benchmark_cases = []
+    def run_benchmark_case(
+        self,
+        benchmark_case: BenchmarkCase,
+        benchmark_results_filename: Optional[pathlib.Path],
+        capture_filename: Optional[pathlib.Path],
+    ) -> None:
+        if self.raise_exception_on_case == benchmark_case:
+            raise Exception("fake exception")
 
-  def run_benchmark_case(self, benchmark_case: BenchmarkCase,
-                         benchmark_results_filename: Optional[pathlib.Path],
-                         capture_filename: Optional[pathlib.Path]) -> None:
-    if self.raise_exception_on_case == benchmark_case:
-      raise Exception("fake exception")
+        self.run_benchmark_cases.append(benchmark_case)
 
-    self.run_benchmark_cases.append(benchmark_case)
-
-    if benchmark_results_filename:
-      fake_benchmark_metrics = BenchmarkMetrics(
-          real_time=BenchmarkLatency(0, 0, 0, "ns"),
-          cpu_time=BenchmarkLatency(0, 0, 0, "ns"),
-          host_memory=BenchmarkMemory(0, 0, 0, 0, "bytes"),
-          device_memory=BenchmarkMemory(0, 0, 0, 0, "bytes"),
-          raw_data={},
-      )
-      benchmark_results_filename.write_text(
-          json.dumps(fake_benchmark_metrics.to_json_object()))
-    if capture_filename:
-      capture_filename.write_text("{}")
+        if benchmark_results_filename:
+            fake_benchmark_metrics = BenchmarkMetrics(
+                real_time=BenchmarkLatency(0, 0, 0, "ns"),
+                cpu_time=BenchmarkLatency(0, 0, 0, "ns"),
+                host_memory=BenchmarkMemory(0, 0, 0, 0, "bytes"),
+                device_memory=BenchmarkMemory(0, 0, 0, 0, "bytes"),
+                raw_data={},
+            )
+            benchmark_results_filename.write_text(
+                json.dumps(fake_benchmark_metrics.to_json_object())
+            )
+        if capture_filename:
+            capture_filename.write_text("{}")
 
 
 class BenchmarkDriverTest(unittest.TestCase):
+    def setUp(self):
+        self._tmp_dir_obj = tempfile.TemporaryDirectory()
+        self._root_dir_obj = tempfile.TemporaryDirectory()
 
-  def setUp(self):
-    self._tmp_dir_obj = tempfile.TemporaryDirectory()
-    self._root_dir_obj = tempfile.TemporaryDirectory()
+        self.tmp_dir = pathlib.Path(self._tmp_dir_obj.name)
+        (self.tmp_dir / "build_config.txt").write_text(
+            "IREE_HAL_DRIVER_LOCAL_SYNC=ON\n"
+            "IREE_HAL_DRIVER_LOCAL_TASK=ON\n"
+            "IREE_HAL_EXECUTABLE_LOADER_EMBEDDED_ELF=ON\n"
+        )
 
-    self.tmp_dir = pathlib.Path(self._tmp_dir_obj.name)
-    (self.tmp_dir / "build_config.txt").write_text(
-        "IREE_HAL_DRIVER_LOCAL_SYNC=ON\n"
-        "IREE_HAL_DRIVER_LOCAL_TASK=ON\n"
-        "IREE_HAL_EXECUTABLE_LOADER_EMBEDDED_ELF=ON\n")
+        self.benchmark_results_dir = (
+            self.tmp_dir / benchmark_config.BENCHMARK_RESULTS_REL_PATH
+        )
+        self.captures_dir = self.tmp_dir / benchmark_config.CAPTURES_REL_PATH
+        self.benchmark_results_dir.mkdir()
+        self.captures_dir.mkdir()
 
-    self.benchmark_results_dir = (self.tmp_dir /
-                                  benchmark_config.BENCHMARK_RESULTS_REL_PATH)
-    self.captures_dir = (self.tmp_dir / benchmark_config.CAPTURES_REL_PATH)
-    self.benchmark_results_dir.mkdir()
-    self.captures_dir.mkdir()
+        self.config = benchmark_config.BenchmarkConfig(
+            root_benchmark_dir=pathlib.Path(self._root_dir_obj.name),
+            benchmark_results_dir=self.benchmark_results_dir,
+            git_commit_hash="abcd",
+            normal_benchmark_tool_dir=self.tmp_dir,
+            trace_capture_config=benchmark_config.TraceCaptureConfig(
+                traced_benchmark_tool_dir=self.tmp_dir,
+                trace_capture_tool=self.tmp_dir / "capture_tool",
+                capture_tarball=self.tmp_dir / "captures.tar",
+                capture_tmp_dir=self.captures_dir,
+            ),
+            use_compatible_filter=True,
+        )
 
-    self.config = benchmark_config.BenchmarkConfig(
-        root_benchmark_dir=pathlib.Path(self._root_dir_obj.name),
-        benchmark_results_dir=self.benchmark_results_dir,
-        git_commit_hash="abcd",
-        normal_benchmark_tool_dir=self.tmp_dir,
-        trace_capture_config=benchmark_config.TraceCaptureConfig(
-            traced_benchmark_tool_dir=self.tmp_dir,
-            trace_capture_tool=self.tmp_dir / "capture_tool",
-            capture_tarball=self.tmp_dir / "captures.tar",
-            capture_tmp_dir=self.captures_dir),
-        use_compatible_filter=True)
+        self.device_info = DeviceInfo(
+            platform_type=PlatformType.LINUX,
+            model="Unknown",
+            cpu_abi="x86_64",
+            cpu_uarch="CascadeLake",
+            cpu_features=[],
+            gpu_name="unknown",
+        )
 
-    self.device_info = DeviceInfo(platform_type=PlatformType.LINUX,
-                                  model="Unknown",
-                                  cpu_abi="x86_64",
-                                  cpu_uarch="CascadeLake",
-                                  cpu_features=[],
-                                  gpu_name="unknown")
+        model_tflite = common_definitions.Model(
+            id="tflite",
+            name="model_tflite",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="",
+            entry_function="predict",
+            input_types=["1xf32"],
+        )
+        device_spec = common_definitions.DeviceSpec.build(
+            id="dev",
+            device_name="test_dev",
+            architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+            device_parameters=[],
+            tags=[],
+        )
+        compile_target = iree_definitions.CompileTarget(
+            target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+            target_architecture=(
+                common_definitions.DeviceArchitecture.X86_64_CASCADELAKE
+            ),
+            target_abi=iree_definitions.TargetABI.LINUX_GNU,
+        )
+        gen_config = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=iree_definitions.ImportedModel.from_model(model_tflite),
+            compile_config=iree_definitions.CompileConfig.build(
+                id="comp_a", tags=[], compile_targets=[compile_target]
+            ),
+        )
+        exec_config_a = iree_definitions.ModuleExecutionConfig.build(
+            id="exec_a",
+            tags=["sync"],
+            loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+            driver=iree_definitions.RuntimeDriver.LOCAL_SYNC,
+        )
+        run_config_a = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=gen_config,
+            module_execution_config=exec_config_a,
+            target_device_spec=device_spec,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        exec_config_b = iree_definitions.ModuleExecutionConfig.build(
+            id="exec_b",
+            tags=["task"],
+            loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+            driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
+        )
+        run_config_b = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=gen_config,
+            module_execution_config=exec_config_b,
+            target_device_spec=device_spec,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        self.case1 = BenchmarkCase(
+            model_name="model_tflite",
+            model_tags=[],
+            bench_mode=["sync"],
+            target_arch=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+            driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu-sync"],
+            benchmark_case_dir=pathlib.Path("case1"),
+            benchmark_tool_name="tool",
+            run_config=run_config_a,
+        )
+        self.case2 = BenchmarkCase(
+            model_name="model_tflite",
+            model_tags=[],
+            bench_mode=["task"],
+            target_arch=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+            driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu"],
+            benchmark_case_dir=pathlib.Path("case2"),
+            benchmark_tool_name="tool",
+            run_config=run_config_b,
+        )
 
-    model_tflite = common_definitions.Model(
-        id="tflite",
-        name="model_tflite",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="",
-        entry_function="predict",
-        input_types=["1xf32"])
-    device_spec = common_definitions.DeviceSpec.build(
-        id="dev",
-        device_name="test_dev",
-        architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
-        device_parameters=[],
-        tags=[])
-    compile_target = iree_definitions.CompileTarget(
-        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-        target_architecture=(
-            common_definitions.DeviceArchitecture.X86_64_CASCADELAKE),
-        target_abi=iree_definitions.TargetABI.LINUX_GNU)
-    gen_config = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=iree_definitions.ImportedModel.from_model(model_tflite),
-        compile_config=iree_definitions.CompileConfig.build(
-            id="comp_a", tags=[], compile_targets=[compile_target]))
-    exec_config_a = iree_definitions.ModuleExecutionConfig.build(
-        id="exec_a",
-        tags=["sync"],
-        loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-        driver=iree_definitions.RuntimeDriver.LOCAL_SYNC)
-    run_config_a = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=gen_config,
-        module_execution_config=exec_config_a,
-        target_device_spec=device_spec,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    exec_config_b = iree_definitions.ModuleExecutionConfig.build(
-        id="exec_b",
-        tags=["task"],
-        loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-        driver=iree_definitions.RuntimeDriver.LOCAL_TASK)
-    run_config_b = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=gen_config,
-        module_execution_config=exec_config_b,
-        target_device_spec=device_spec,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    self.case1 = BenchmarkCase(
-        model_name="model_tflite",
-        model_tags=[],
-        bench_mode=["sync"],
-        target_arch=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
-        driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu-sync"],
-        benchmark_case_dir=pathlib.Path("case1"),
-        benchmark_tool_name="tool",
-        run_config=run_config_a)
-    self.case2 = BenchmarkCase(
-        model_name="model_tflite",
-        model_tags=[],
-        bench_mode=["task"],
-        target_arch=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
-        driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu"],
-        benchmark_case_dir=pathlib.Path("case2"),
-        benchmark_tool_name="tool",
-        run_config=run_config_b)
+        compile_target_rv64 = iree_definitions.CompileTarget(
+            target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+            target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            target_abi=iree_definitions.TargetABI.LINUX_GNU,
+        )
+        gen_config_rv64 = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=iree_definitions.ImportedModel.from_model(model_tflite),
+            compile_config=iree_definitions.CompileConfig.build(
+                id="comp_rv64", tags=[], compile_targets=[compile_target_rv64]
+            ),
+        )
+        device_spec_rv64 = common_definitions.DeviceSpec.build(
+            id="rv64_dev",
+            device_name="rv64_dev",
+            architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+            device_parameters=[],
+            tags=[],
+        )
+        run_config_incompatible = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=gen_config_rv64,
+            module_execution_config=exec_config_b,
+            target_device_spec=device_spec_rv64,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        self.incompatible_case = BenchmarkCase(
+            model_name="model_tflite",
+            model_tags=[],
+            bench_mode=["task"],
+            target_arch=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu"],
+            benchmark_case_dir=pathlib.Path("incompatible_case"),
+            benchmark_tool_name="tool",
+            run_config=run_config_incompatible,
+        )
+        self.benchmark_suite = BenchmarkSuite(
+            [
+                self.case1,
+                self.case2,
+                self.incompatible_case,
+            ]
+        )
 
-    compile_target_rv64 = iree_definitions.CompileTarget(
-        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-        target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        target_abi=iree_definitions.TargetABI.LINUX_GNU)
-    gen_config_rv64 = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=iree_definitions.ImportedModel.from_model(model_tflite),
-        compile_config=iree_definitions.CompileConfig.build(
-            id="comp_rv64", tags=[], compile_targets=[compile_target_rv64]))
-    device_spec_rv64 = common_definitions.DeviceSpec.build(
-        id="rv64_dev",
-        device_name="rv64_dev",
-        architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
-        device_parameters=[],
-        tags=[])
-    run_config_incompatible = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=gen_config_rv64,
-        module_execution_config=exec_config_b,
-        target_device_spec=device_spec_rv64,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    self.incompatible_case = BenchmarkCase(
-        model_name="model_tflite",
-        model_tags=[],
-        bench_mode=["task"],
-        target_arch=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu"],
-        benchmark_case_dir=pathlib.Path("incompatible_case"),
-        benchmark_tool_name="tool",
-        run_config=run_config_incompatible)
-    self.benchmark_suite = BenchmarkSuite([
-        self.case1,
-        self.case2,
-        self.incompatible_case,
-    ])
+    def tearDown(self) -> None:
+        self._tmp_dir_obj.cleanup()
+        self._root_dir_obj.cleanup()
 
-  def tearDown(self) -> None:
-    self._tmp_dir_obj.cleanup()
-    self._root_dir_obj.cleanup()
+    def test_run(self):
+        driver = FakeBenchmarkDriver(
+            self.device_info, self.config, self.benchmark_suite
+        )
 
-  def test_run(self):
-    driver = FakeBenchmarkDriver(self.device_info, self.config,
-                                 self.benchmark_suite)
+        driver.run()
 
-    driver.run()
+        self.assertEqual(driver.get_benchmark_results().commit, "abcd")
+        self.assertEqual(len(driver.get_benchmark_results().benchmarks), 2)
+        self.assertEqual(
+            driver.get_benchmark_results().benchmarks[0].metrics.raw_data, {}
+        )
+        self.assertEqual(
+            driver.get_benchmark_result_filenames(),
+            [
+                self.benchmark_results_dir / f"{self.case1.run_config}.json",
+                self.benchmark_results_dir / f"{self.case2.run_config}.json",
+            ],
+        )
+        self.assertEqual(
+            driver.get_capture_filenames(),
+            [
+                self.captures_dir / f"{self.case1.run_config}.tracy",
+                self.captures_dir / f"{self.case2.run_config}.tracy",
+            ],
+        )
+        self.assertEqual(driver.get_benchmark_errors(), [])
 
-    self.assertEqual(driver.get_benchmark_results().commit, "abcd")
-    self.assertEqual(len(driver.get_benchmark_results().benchmarks), 2)
-    self.assertEqual(
-        driver.get_benchmark_results().benchmarks[0].metrics.raw_data, {})
-    self.assertEqual(driver.get_benchmark_result_filenames(), [
-        self.benchmark_results_dir / f"{self.case1.run_config}.json",
-        self.benchmark_results_dir / f"{self.case2.run_config}.json"
-    ])
-    self.assertEqual(driver.get_capture_filenames(), [
-        self.captures_dir / f"{self.case1.run_config}.tracy",
-        self.captures_dir / f"{self.case2.run_config}.tracy"
-    ])
-    self.assertEqual(driver.get_benchmark_errors(), [])
+    def test_run_disable_compatible_filter(self):
+        self.config.use_compatible_filter = False
+        driver = FakeBenchmarkDriver(
+            self.device_info, self.config, self.benchmark_suite
+        )
 
-  def test_run_disable_compatible_filter(self):
-    self.config.use_compatible_filter = False
-    driver = FakeBenchmarkDriver(self.device_info, self.config,
-                                 self.benchmark_suite)
+        driver.run()
 
-    driver.run()
+        self.assertEqual(len(driver.get_benchmark_results().benchmarks), 3)
 
-    self.assertEqual(len(driver.get_benchmark_results().benchmarks), 3)
+    def test_run_with_no_capture(self):
+        self.config.trace_capture_config = None
+        driver = FakeBenchmarkDriver(
+            self.device_info, self.config, self.benchmark_suite
+        )
 
-  def test_run_with_no_capture(self):
-    self.config.trace_capture_config = None
-    driver = FakeBenchmarkDriver(self.device_info, self.config,
-                                 self.benchmark_suite)
+        driver.run()
 
-    driver.run()
+        self.assertEqual(len(driver.get_benchmark_result_filenames()), 2)
+        self.assertEqual(driver.get_capture_filenames(), [])
 
-    self.assertEqual(len(driver.get_benchmark_result_filenames()), 2)
-    self.assertEqual(driver.get_capture_filenames(), [])
+    def test_run_with_exception_and_keep_going(self):
+        self.config.keep_going = True
+        driver = FakeBenchmarkDriver(
+            self.device_info,
+            self.config,
+            self.benchmark_suite,
+            raise_exception_on_case=self.case1,
+        )
 
-  def test_run_with_exception_and_keep_going(self):
-    self.config.keep_going = True
-    driver = FakeBenchmarkDriver(self.device_info,
-                                 self.config,
-                                 self.benchmark_suite,
-                                 raise_exception_on_case=self.case1)
+        driver.run()
 
-    driver.run()
+        self.assertEqual(len(driver.get_benchmark_errors()), 1)
+        self.assertEqual(len(driver.get_benchmark_result_filenames()), 1)
 
-    self.assertEqual(len(driver.get_benchmark_errors()), 1)
-    self.assertEqual(len(driver.get_benchmark_result_filenames()), 1)
+    def test_run_with_previous_benchmarks_and_captures(self):
+        benchmark_filename = (
+            self.benchmark_results_dir / f"{self.case1.run_config}.json"
+        )
+        benchmark_filename.touch()
+        capture_filename = self.captures_dir / f"{self.case1.run_config}.tracy"
+        capture_filename.touch()
+        config = dataclasses.replace(self.config, continue_from_previous=True)
+        driver = FakeBenchmarkDriver(
+            device_info=self.device_info,
+            benchmark_config=config,
+            benchmark_suite=self.benchmark_suite,
+        )
 
-  def test_run_with_previous_benchmarks_and_captures(self):
-    benchmark_filename = (self.benchmark_results_dir /
-                          f"{self.case1.run_config}.json")
-    benchmark_filename.touch()
-    capture_filename = self.captures_dir / f"{self.case1.run_config}.tracy"
-    capture_filename.touch()
-    config = dataclasses.replace(self.config, continue_from_previous=True)
-    driver = FakeBenchmarkDriver(device_info=self.device_info,
-                                 benchmark_config=config,
-                                 benchmark_suite=self.benchmark_suite)
+        driver.run()
 
-    driver.run()
-
-    self.assertEqual(len(driver.run_benchmark_cases), 1)
-    self.assertEqual(len(driver.get_benchmark_result_filenames()), 2)
-    self.assertEqual(len(driver.get_capture_filenames()), 2)
+        self.assertEqual(len(driver.run_benchmark_cases), 1)
+        self.assertEqual(len(driver.get_benchmark_result_filenames()), 2)
+        self.assertEqual(len(driver.get_capture_filenames()), 2)
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/benchmarks/common/benchmark_presentation.py b/build_tools/benchmarks/common/benchmark_presentation.py
index 9cab2a7..6eafb2b 100644
--- a/build_tools/benchmarks/common/benchmark_presentation.py
+++ b/build_tools/benchmarks/common/benchmark_presentation.py
@@ -6,8 +6,18 @@
 
 from abc import ABC, abstractmethod
 from dataclasses import dataclass
-from typing import (Any, Callable, Dict, Generic, List, Optional, Sequence,
-                    Tuple, TypeVar, Union)
+from typing import (
+    Any,
+    Callable,
+    Dict,
+    Generic,
+    List,
+    Optional,
+    Sequence,
+    Tuple,
+    TypeVar,
+    Union,
+)
 import pathlib
 import dataclasses
 import json
@@ -16,11 +26,14 @@
 import math
 
 from common import benchmark_definition, benchmark_thresholds
-from common.benchmark_thresholds import (BENCHMARK_THRESHOLDS,
-                                         COMPILATION_TIME_THRESHOLDS,
-                                         TOTAL_ARTIFACT_SIZE_THRESHOLDS,
-                                         TOTAL_DISPATCH_SIZE_THRESHOLDS,
-                                         BenchmarkThreshold, ThresholdUnit)
+from common.benchmark_thresholds import (
+    BENCHMARK_THRESHOLDS,
+    COMPILATION_TIME_THRESHOLDS,
+    TOTAL_ARTIFACT_SIZE_THRESHOLDS,
+    TOTAL_DISPATCH_SIZE_THRESHOLDS,
+    BenchmarkThreshold,
+    ThresholdUnit,
+)
 
 GetMetricFunc = Callable[[Any], Tuple[int, Optional[int]]]
 
@@ -36,7 +49,9 @@
 COMPILATION_TIME_METRIC_ID = "e54cd682-c079-4c42-b4ad-d92c4bedea13"
 COMPILATION_TIME_SERIES_SUFFIX = "compilation:module:compilation-time"
 TOTAL_DISPATCH_SIZE_METRIC_ID = "9e15f7e6-383c-47ec-bd38-ecba55a5f10a"
-TOTAL_DISPATCH_SIZE_SERIES_SUFFIX = "compilation:module:component-size:total-dispatch-size"
+TOTAL_DISPATCH_SIZE_SERIES_SUFFIX = (
+    "compilation:module:component-size:total-dispatch-size"
+)
 TOTAL_ARTIFACT_SIZE_METRIC_ID = "2c8a9198-c01c-45b9-a7da-69c82cf749f7"
 TOTAL_ARTIFACT_SIZE_SERIES_SUFFIX = "compilation:module:total-artifact-size"
 STREAM_IR_DISPATCH_COUNT_METRIC_ID = "7b72cd9e-43ed-4078-b6d3-20b810f9e4ad"
@@ -45,290 +60,312 @@
 
 @dataclass
 class AggregateBenchmarkLatency:
-  """An object for describing aggregate latency numbers for a benchmark."""
-  name: str
-  benchmark_info: benchmark_definition.BenchmarkInfo
-  mean_time: int
-  median_time: int
-  stddev_time: int
-  # The average latency time for the base commit to compare against.
-  base_mean_time: Optional[int] = None
+    """An object for describing aggregate latency numbers for a benchmark."""
 
-  def __str__(self) -> str:
-    return self.name
+    name: str
+    benchmark_info: benchmark_definition.BenchmarkInfo
+    mean_time: int
+    median_time: int
+    stddev_time: int
+    # The average latency time for the base commit to compare against.
+    base_mean_time: Optional[int] = None
+
+    def __str__(self) -> str:
+        return self.name
 
 
 @dataclass(frozen=True)
 class CompilationMetrics:
-  """An object for describing the summary of statistics and the reference."""
-  name: str
-  compilation_info: benchmark_definition.CompilationInfo
-  compilation_time_ms: int
-  total_dispatch_component_bytes: int
-  total_artifact_bytes: int
-  stream_ir_dispatch_count: int
-  base_compilation_time_ms: Optional[int] = None
-  base_total_artifact_bytes: Optional[int] = None
-  base_total_dispatch_component_bytes: Optional[int] = None
-  base_stream_ir_dispatch_count: Optional[int] = None
+    """An object for describing the summary of statistics and the reference."""
 
-  def __str__(self) -> str:
-    return self.name
+    name: str
+    compilation_info: benchmark_definition.CompilationInfo
+    compilation_time_ms: int
+    total_dispatch_component_bytes: int
+    total_artifact_bytes: int
+    stream_ir_dispatch_count: int
+    base_compilation_time_ms: Optional[int] = None
+    base_total_artifact_bytes: Optional[int] = None
+    base_total_dispatch_component_bytes: Optional[int] = None
+    base_stream_ir_dispatch_count: Optional[int] = None
+
+    def __str__(self) -> str:
+        return self.name
 
 
 T = TypeVar("T")
 
 
 class MetricsToTableMapper(ABC, Generic[T]):
-  """Abstract class to help map benchmark metrics to table.
+    """Abstract class to help map benchmark metrics to table.
 
     It contains a set of methods to help table generator get the required
     information for a metric. For example, extract the current and base metric
     value, the metric thresholds, the table header of the metrics, ...
-  """
+    """
 
-  @abstractmethod
-  def update_base_value(self, obj: T, base_value: Any) -> T:
-    """Sets the base value and returns the updated metric object."""
-    raise NotImplementedError()
+    @abstractmethod
+    def update_base_value(self, obj: T, base_value: Any) -> T:
+        """Sets the base value and returns the updated metric object."""
+        raise NotImplementedError()
 
-  @abstractmethod
-  def get_current_and_base_value(self, obj: T) -> Tuple[int, Optional[int]]:
-    """Returns the current and base (can be None) value."""
-    raise NotImplementedError()
+    @abstractmethod
+    def get_current_and_base_value(self, obj: T) -> Tuple[int, Optional[int]]:
+        """Returns the current and base (can be None) value."""
+        raise NotImplementedError()
 
-  def get_series_id(self, benchmark_id: str) -> str:
-    """Returns the dashboard series id."""
-    return f"{benchmark_id}-{self.get_metric_id()}"
+    def get_series_id(self, benchmark_id: str) -> str:
+        """Returns the dashboard series id."""
+        return f"{benchmark_id}-{self.get_metric_id()}"
 
-  @abstractmethod
-  def get_metric_id(self) -> str:
-    """Returns the dashboard series id."""
-    raise NotImplementedError()
+    @abstractmethod
+    def get_metric_id(self) -> str:
+        """Returns the dashboard series id."""
+        raise NotImplementedError()
 
-  @abstractmethod
-  def get_series_name(self, name: str) -> str:
-    """Returns the dashboard series name."""
-    raise NotImplementedError()
+    @abstractmethod
+    def get_series_name(self, name: str) -> str:
+        """Returns the dashboard series name."""
+        raise NotImplementedError()
 
-  @abstractmethod
-  def get_unit(self) -> str:
-    """Returns the unit of the metric value."""
-    raise NotImplementedError()
+    @abstractmethod
+    def get_unit(self) -> str:
+        """Returns the unit of the metric value."""
+        raise NotImplementedError()
 
-  @abstractmethod
-  def get_table_header(self) -> str:
-    """Returns the header of the table."""
-    raise NotImplementedError()
+    @abstractmethod
+    def get_table_header(self) -> str:
+        """Returns the header of the table."""
+        raise NotImplementedError()
 
-  @staticmethod
-  @abstractmethod
-  def get_metric_thresholds() -> Sequence[BenchmarkThreshold]:
-    raise NotImplementedError()
+    @staticmethod
+    @abstractmethod
+    def get_metric_thresholds() -> Sequence[BenchmarkThreshold]:
+        raise NotImplementedError()
 
-  @staticmethod
-  @abstractmethod
-  def get_table_title() -> str:
-    raise NotImplementedError()
+    @staticmethod
+    @abstractmethod
+    def get_table_title() -> str:
+        raise NotImplementedError()
 
 
 class CompilationTimeToTable(MetricsToTableMapper[CompilationMetrics]):
-  """Helper to map CompilationMetrics to compilation time column."""
+    """Helper to map CompilationMetrics to compilation time column."""
 
-  def update_base_value(self, compile_metrics: CompilationMetrics,
-                        base_value: Any) -> CompilationMetrics:
-    return dataclasses.replace(compile_metrics,
-                               base_compilation_time_ms=base_value)
+    def update_base_value(
+        self, compile_metrics: CompilationMetrics, base_value: Any
+    ) -> CompilationMetrics:
+        return dataclasses.replace(compile_metrics, base_compilation_time_ms=base_value)
 
-  def get_current_and_base_value(
-      self, compile_metrics: CompilationMetrics) -> Tuple[int, Optional[int]]:
-    return (compile_metrics.compilation_time_ms,
-            compile_metrics.base_compilation_time_ms)
+    def get_current_and_base_value(
+        self, compile_metrics: CompilationMetrics
+    ) -> Tuple[int, Optional[int]]:
+        return (
+            compile_metrics.compilation_time_ms,
+            compile_metrics.base_compilation_time_ms,
+        )
 
-  def get_metric_id(self) -> str:
-    return COMPILATION_TIME_METRIC_ID
+    def get_metric_id(self) -> str:
+        return COMPILATION_TIME_METRIC_ID
 
-  def get_series_name(self, name: str) -> str:
-    return f"{name} [{COMPILATION_TIME_SERIES_SUFFIX}]"
+    def get_series_name(self, name: str) -> str:
+        return f"{name} [{COMPILATION_TIME_SERIES_SUFFIX}]"
 
-  def get_unit(self) -> str:
-    return "ms"
+    def get_unit(self) -> str:
+        return "ms"
 
-  def get_table_header(self) -> str:
-    return f"Compilation Time ({self.get_unit()})"
+    def get_table_header(self) -> str:
+        return f"Compilation Time ({self.get_unit()})"
 
-  @staticmethod
-  def get_metric_thresholds() -> Sequence[BenchmarkThreshold]:
-    return COMPILATION_TIME_THRESHOLDS
+    @staticmethod
+    def get_metric_thresholds() -> Sequence[BenchmarkThreshold]:
+        return COMPILATION_TIME_THRESHOLDS
 
-  @staticmethod
-  def get_table_title() -> str:
-    return "Compilation Times"
+    @staticmethod
+    def get_table_title() -> str:
+        return "Compilation Times"
 
 
 class TotalDispatchSizeToTable(MetricsToTableMapper[CompilationMetrics]):
-  """Helper to map CompilationMetrics to total dispatch size column."""
+    """Helper to map CompilationMetrics to total dispatch size column."""
 
-  def update_base_value(self, compile_metrics: CompilationMetrics,
-                        base_value: Any) -> CompilationMetrics:
-    return dataclasses.replace(compile_metrics,
-                               base_total_dispatch_component_bytes=base_value)
+    def update_base_value(
+        self, compile_metrics: CompilationMetrics, base_value: Any
+    ) -> CompilationMetrics:
+        return dataclasses.replace(
+            compile_metrics, base_total_dispatch_component_bytes=base_value
+        )
 
-  def get_current_and_base_value(
-      self, compile_metrics: CompilationMetrics) -> Tuple[int, Optional[int]]:
-    return (compile_metrics.total_dispatch_component_bytes,
-            compile_metrics.base_total_dispatch_component_bytes)
+    def get_current_and_base_value(
+        self, compile_metrics: CompilationMetrics
+    ) -> Tuple[int, Optional[int]]:
+        return (
+            compile_metrics.total_dispatch_component_bytes,
+            compile_metrics.base_total_dispatch_component_bytes,
+        )
 
-  def get_metric_id(self) -> str:
-    return TOTAL_DISPATCH_SIZE_METRIC_ID
+    def get_metric_id(self) -> str:
+        return TOTAL_DISPATCH_SIZE_METRIC_ID
 
-  def get_series_name(self, name: str) -> str:
-    return f"{name} [{TOTAL_DISPATCH_SIZE_SERIES_SUFFIX}]"
+    def get_series_name(self, name: str) -> str:
+        return f"{name} [{TOTAL_DISPATCH_SIZE_SERIES_SUFFIX}]"
 
-  def get_unit(self) -> str:
-    return "bytes"
+    def get_unit(self) -> str:
+        return "bytes"
 
-  def get_table_header(self) -> str:
-    return f"Total Dispatch Size ({self.get_unit()})"
+    def get_table_header(self) -> str:
+        return f"Total Dispatch Size ({self.get_unit()})"
 
-  @staticmethod
-  def get_metric_thresholds() -> Sequence[BenchmarkThreshold]:
-    return TOTAL_DISPATCH_SIZE_THRESHOLDS
+    @staticmethod
+    def get_metric_thresholds() -> Sequence[BenchmarkThreshold]:
+        return TOTAL_DISPATCH_SIZE_THRESHOLDS
 
-  @staticmethod
-  def get_table_title() -> str:
-    return "Total Dispatch Sizes"
+    @staticmethod
+    def get_table_title() -> str:
+        return "Total Dispatch Sizes"
 
 
 class TotalArtifactSizeToTable(MetricsToTableMapper[CompilationMetrics]):
-  """Helper to map CompilationMetrics to total artifact size column."""
+    """Helper to map CompilationMetrics to total artifact size column."""
 
-  def update_base_value(self, compile_metrics: CompilationMetrics,
-                        base_value: Any) -> CompilationMetrics:
-    return dataclasses.replace(compile_metrics,
-                               base_total_artifact_bytes=base_value)
+    def update_base_value(
+        self, compile_metrics: CompilationMetrics, base_value: Any
+    ) -> CompilationMetrics:
+        return dataclasses.replace(
+            compile_metrics, base_total_artifact_bytes=base_value
+        )
 
-  def get_current_and_base_value(
-      self, compile_metrics: CompilationMetrics) -> Tuple[int, Optional[int]]:
-    return (compile_metrics.total_artifact_bytes,
-            compile_metrics.base_total_artifact_bytes)
+    def get_current_and_base_value(
+        self, compile_metrics: CompilationMetrics
+    ) -> Tuple[int, Optional[int]]:
+        return (
+            compile_metrics.total_artifact_bytes,
+            compile_metrics.base_total_artifact_bytes,
+        )
 
-  def get_metric_id(self) -> str:
-    return TOTAL_ARTIFACT_SIZE_METRIC_ID
+    def get_metric_id(self) -> str:
+        return TOTAL_ARTIFACT_SIZE_METRIC_ID
 
-  def get_series_name(self, name: str) -> str:
-    return f"{name} [{TOTAL_ARTIFACT_SIZE_SERIES_SUFFIX}]"
+    def get_series_name(self, name: str) -> str:
+        return f"{name} [{TOTAL_ARTIFACT_SIZE_SERIES_SUFFIX}]"
 
-  def get_unit(self) -> str:
-    return "bytes"
+    def get_unit(self) -> str:
+        return "bytes"
 
-  def get_table_header(self) -> str:
-    return f"Total Artifact Size ({self.get_unit()})"
+    def get_table_header(self) -> str:
+        return f"Total Artifact Size ({self.get_unit()})"
 
-  @staticmethod
-  def get_metric_thresholds() -> Sequence[BenchmarkThreshold]:
-    return TOTAL_ARTIFACT_SIZE_THRESHOLDS
+    @staticmethod
+    def get_metric_thresholds() -> Sequence[BenchmarkThreshold]:
+        return TOTAL_ARTIFACT_SIZE_THRESHOLDS
 
-  @staticmethod
-  def get_table_title() -> str:
-    return "Total Artifact Sizes"
+    @staticmethod
+    def get_table_title() -> str:
+        return "Total Artifact Sizes"
 
 
 class StreamIRDispatchCountToTable(MetricsToTableMapper[CompilationMetrics]):
-  """Helper to map CompilationMetrics to Stream IR Dispatch Count column."""
+    """Helper to map CompilationMetrics to Stream IR Dispatch Count column."""
 
-  def update_base_value(self, compile_metrics: CompilationMetrics,
-                        base_value: Any) -> CompilationMetrics:
-    return dataclasses.replace(compile_metrics,
-                               base_stream_ir_dispatch_count=base_value)
+    def update_base_value(
+        self, compile_metrics: CompilationMetrics, base_value: Any
+    ) -> CompilationMetrics:
+        return dataclasses.replace(
+            compile_metrics, base_stream_ir_dispatch_count=base_value
+        )
 
-  def get_current_and_base_value(
-      self, compile_metrics: CompilationMetrics) -> Tuple[int, Optional[int]]:
-    return (compile_metrics.stream_ir_dispatch_count,
-            compile_metrics.base_stream_ir_dispatch_count)
+    def get_current_and_base_value(
+        self, compile_metrics: CompilationMetrics
+    ) -> Tuple[int, Optional[int]]:
+        return (
+            compile_metrics.stream_ir_dispatch_count,
+            compile_metrics.base_stream_ir_dispatch_count,
+        )
 
-  def get_metric_id(self) -> str:
-    return STREAM_IR_DISPATCH_COUNT_METRIC_ID
+    def get_metric_id(self) -> str:
+        return STREAM_IR_DISPATCH_COUNT_METRIC_ID
 
-  def get_series_name(self, name: str) -> str:
-    return f"{name} [{STREAM_IR_DISPATCH_COUNT_SERIES_SUFFIX}]"
+    def get_series_name(self, name: str) -> str:
+        return f"{name} [{STREAM_IR_DISPATCH_COUNT_SERIES_SUFFIX}]"
 
-  def get_unit(self) -> str:
-    return "number"
+    def get_unit(self) -> str:
+        return "number"
 
-  def get_table_header(self) -> str:
-    return f"Stream IR Dispatch Count (# of cmd.dispatch ops)"
+    def get_table_header(self) -> str:
+        return f"Stream IR Dispatch Count (# of cmd.dispatch ops)"
 
-  @staticmethod
-  def get_metric_thresholds() -> Sequence[BenchmarkThreshold]:
-    return benchmark_thresholds.STREAM_IR_DISPATCH_COUNT_THRESHOLDS
+    @staticmethod
+    def get_metric_thresholds() -> Sequence[BenchmarkThreshold]:
+        return benchmark_thresholds.STREAM_IR_DISPATCH_COUNT_THRESHOLDS
 
-  @staticmethod
-  def get_table_title() -> str:
-    return "Stream IR Dispatch Count (# of cmd.dispatch ops)"
+    @staticmethod
+    def get_table_title() -> str:
+        return "Stream IR Dispatch Count (# of cmd.dispatch ops)"
 
 
-COMPILATION_METRICS_TO_TABLE_MAPPERS: List[
-    MetricsToTableMapper[CompilationMetrics]] = [
-        CompilationTimeToTable(),
-        TotalDispatchSizeToTable(),
-        TotalArtifactSizeToTable(),
-        StreamIRDispatchCountToTable(),
-    ]
+COMPILATION_METRICS_TO_TABLE_MAPPERS: List[MetricsToTableMapper[CompilationMetrics]] = [
+    CompilationTimeToTable(),
+    TotalDispatchSizeToTable(),
+    TotalArtifactSizeToTable(),
+    StreamIRDispatchCountToTable(),
+]
 
 
 def aggregate_all_benchmarks(
-    benchmark_files: Sequence[pathlib.Path],
-    expected_pr_commit: Optional[str] = None
+    benchmark_files: Sequence[pathlib.Path], expected_pr_commit: Optional[str] = None
 ) -> Dict[str, AggregateBenchmarkLatency]:
-  """Aggregates all benchmarks in the given files.
+    """Aggregates all benchmarks in the given files.
 
-  Args:
-  - benchmark_files: A list of JSON files, each can be decoded as a
-    BenchmarkResults.
-  - expected_pr_commit: An optional Git commit SHA to match against.
+    Args:
+    - benchmark_files: A list of JSON files, each can be decoded as a
+      BenchmarkResults.
+    - expected_pr_commit: An optional Git commit SHA to match against.
 
-  Returns:
-  - A dict of benchmark names to AggregateBenchmarkLatency numbers.
-  """
+    Returns:
+    - A dict of benchmark names to AggregateBenchmarkLatency numbers.
+    """
 
-  aggregate_results = {}
-  benchmark_names = set()
-  for benchmark_file in benchmark_files:
-    file_results = benchmark_definition.BenchmarkResults.from_json_str(
-        benchmark_file.read_text())
+    aggregate_results = {}
+    benchmark_names = set()
+    for benchmark_file in benchmark_files:
+        file_results = benchmark_definition.BenchmarkResults.from_json_str(
+            benchmark_file.read_text()
+        )
 
-    if ((expected_pr_commit is not None) and
-        (file_results.commit != expected_pr_commit)):
-      raise ValueError("Inconsistent pull request commit")
+        if (expected_pr_commit is not None) and (
+            file_results.commit != expected_pr_commit
+        ):
+            raise ValueError("Inconsistent pull request commit")
 
-    for benchmark_index in range(len(file_results.benchmarks)):
-      benchmark_run = file_results.benchmarks[benchmark_index]
+        for benchmark_index in range(len(file_results.benchmarks)):
+            benchmark_run = file_results.benchmarks[benchmark_index]
 
-      series_name = str(benchmark_run.info)
-      # Make sure each benchmark has a unique name.
-      if series_name in benchmark_names:
-        raise ValueError(f"Duplicated benchmark name: {series_name}")
-      benchmark_names.add(series_name)
+            series_name = str(benchmark_run.info)
+            # Make sure each benchmark has a unique name.
+            if series_name in benchmark_names:
+                raise ValueError(f"Duplicated benchmark name: {series_name}")
+            benchmark_names.add(series_name)
 
-      series_id = benchmark_run.info.run_config_id
-      if series_id in aggregate_results:
-        raise ValueError(f"Duplicated benchmark id: {series_id}")
+            series_id = benchmark_run.info.run_config_id
+            if series_id in aggregate_results:
+                raise ValueError(f"Duplicated benchmark id: {series_id}")
 
-      aggregate_results[series_id] = AggregateBenchmarkLatency(
-          name=series_name,
-          benchmark_info=benchmark_run.info,
-          mean_time=benchmark_run.metrics.real_time.mean,
-          median_time=benchmark_run.metrics.real_time.median,
-          stddev_time=benchmark_run.metrics.real_time.stddev)
+            aggregate_results[series_id] = AggregateBenchmarkLatency(
+                name=series_name,
+                benchmark_info=benchmark_run.info,
+                mean_time=benchmark_run.metrics.real_time.mean,
+                median_time=benchmark_run.metrics.real_time.median,
+                stddev_time=benchmark_run.metrics.real_time.stddev,
+            )
 
-  return aggregate_results
+    return aggregate_results
 
 
 def collect_all_compilation_metrics(
     compile_stats_files: Sequence[pathlib.Path],
-    expected_pr_commit: Optional[str] = None) -> Dict[str, CompilationMetrics]:
-  """Collects all compilation statistics in the given files.
+    expected_pr_commit: Optional[str] = None,
+) -> Dict[str, CompilationMetrics]:
+    """Collects all compilation statistics in the given files.
 
     Args:
       compile_stats_files: A list of JSON files, each can be decoded as a
@@ -337,80 +374,81 @@
 
     Returns:
       A dict of benchmark names to CompilationMetrics.
-  """
-  compile_metrics = {}
-  target_names = set()
-  for compile_stats_file in compile_stats_files:
-    with compile_stats_file.open("r") as f:
-      file_results = benchmark_definition.CompilationResults.from_json_object(
-          json.load(f))
+    """
+    compile_metrics = {}
+    target_names = set()
+    for compile_stats_file in compile_stats_files:
+        with compile_stats_file.open("r") as f:
+            file_results = benchmark_definition.CompilationResults.from_json_object(
+                json.load(f)
+            )
 
-    if ((expected_pr_commit is not None) and
-        (file_results.commit != expected_pr_commit)):
-      raise ValueError("Inconsistent pull request commit")
+        if (expected_pr_commit is not None) and (
+            file_results.commit != expected_pr_commit
+        ):
+            raise ValueError("Inconsistent pull request commit")
 
-    for compile_stats in file_results.compilation_statistics:
-      component_sizes = compile_stats.module_component_sizes
-      stream_dispatch_count = compile_stats.ir_stats.stream_dispatch_count
+        for compile_stats in file_results.compilation_statistics:
+            component_sizes = compile_stats.module_component_sizes
+            stream_dispatch_count = compile_stats.ir_stats.stream_dispatch_count
 
-      target_name = str(compile_stats.compilation_info)
-      if target_name in target_names:
-        raise ValueError(f"Duplicated target name: {target_name}")
-      target_names.add(target_name)
+            target_name = str(compile_stats.compilation_info)
+            if target_name in target_names:
+                raise ValueError(f"Duplicated target name: {target_name}")
+            target_names.add(target_name)
 
-      target_id = compile_stats.compilation_info.gen_config_id
-      if target_id in compile_metrics:
-        raise ValueError(f"Duplicated target id: {target_id}")
+            target_id = compile_stats.compilation_info.gen_config_id
+            if target_id in compile_metrics:
+                raise ValueError(f"Duplicated target id: {target_id}")
 
-      compile_metrics[target_id] = CompilationMetrics(
-          name=target_name,
-          compilation_info=compile_stats.compilation_info,
-          compilation_time_ms=compile_stats.compilation_time_ms,
-          total_artifact_bytes=component_sizes.file_bytes,
-          total_dispatch_component_bytes=component_sizes.
-          total_dispatch_component_bytes,
-          stream_ir_dispatch_count=stream_dispatch_count)
+            compile_metrics[target_id] = CompilationMetrics(
+                name=target_name,
+                compilation_info=compile_stats.compilation_info,
+                compilation_time_ms=compile_stats.compilation_time_ms,
+                total_artifact_bytes=component_sizes.file_bytes,
+                total_dispatch_component_bytes=component_sizes.total_dispatch_component_bytes,
+                stream_ir_dispatch_count=stream_dispatch_count,
+            )
 
-  return compile_metrics
+    return compile_metrics
 
 
 def _make_series_link(name: str, series_id: str) -> str:
-  """Add link to the given benchmark name.
+    """Add link to the given benchmark name.
 
     Args:
       name: the text to show on the link.
       series_id: the dashboard series id.
-  """
-  url = PERFBOARD_SERIES_PREFIX + urllib.parse.quote(series_id, safe="()[]@,")
-  return md.link(name, url)
+    """
+    url = PERFBOARD_SERIES_PREFIX + urllib.parse.quote(series_id, safe="()[]@,")
+    return md.link(name, url)
 
 
-def _add_header_and_get_markdown_table(headers: Sequence[str],
-                                       rows: Sequence[Tuple],
-                                       size_cut: Optional[int] = None) -> str:
-  """Generates a markdown table with headers.
+def _add_header_and_get_markdown_table(
+    headers: Sequence[str], rows: Sequence[Tuple], size_cut: Optional[int] = None
+) -> str:
+    """Generates a markdown table with headers.
 
-  Args:
-    headers: list of table headers.
-    rows: list of rows. Each row is a tuple with the same length as headers.
-    size_cut: If not None, only show the top N results for each table.
-  """
+    Args:
+      headers: list of table headers.
+      rows: list of rows. Each row is a tuple with the same length as headers.
+      size_cut: If not None, only show the top N results for each table.
+    """
 
-  total_size = len(rows)
-  if size_cut is not None:
-    rows = rows[0:size_cut]
+    total_size = len(rows)
+    if size_cut is not None:
+        rows = rows[0:size_cut]
 
-  columns = [[header] for header in headers]
-  for row in rows:
-    for column, item in zip(columns, row):
-      column.append(item)
+    columns = [[header] for header in headers]
+    for row in rows:
+        for column, item in zip(columns, row):
+            column.append(item)
 
-  table_str = md.table(columns)
-  if size_cut is not None and size_cut < total_size:
-    table_str += "\n\n"
-    table_str += md.italics(
-        f"[Top {size_cut} out of {total_size} results showed]")
-  return table_str
+    table_str = md.table(columns)
+    if size_cut is not None and size_cut < total_size:
+        table_str += "\n\n"
+        table_str += md.italics(f"[Top {size_cut} out of {total_size} results showed]")
+    return table_str
 
 
 T = TypeVar("T")
@@ -422,7 +460,7 @@
     thresholds: Sequence[BenchmarkThreshold],
     metric_unit: str,
 ) -> Tuple[Dict[str, T], Dict[str, T], Dict[str, T], Dict[str, T]]:
-  """Categorize the metrics object into regressed, improved, similar, and the
+    """Categorize the metrics object into regressed, improved, similar, and the
     raw group (the group with no base to compare to).
 
     Args:
@@ -431,98 +469,106 @@
       thresholds: list of threshold settings to match for categorizing.
     Returns:
       A tuple of (regressed, improved, similar, raw) groups.
-  """
+    """
 
-  regressed_map = {}
-  improved_map = {}
-  similar_map = {}
-  raw_map = {}
-  for series_id, metrics_obj in metrics_map.items():
-    current, base = metric_func(metrics_obj)
-    if base is None:
-      raw_map[series_id] = metrics_obj
-      continue
+    regressed_map = {}
+    improved_map = {}
+    similar_map = {}
+    raw_map = {}
+    for series_id, metrics_obj in metrics_map.items():
+        current, base = metric_func(metrics_obj)
+        if base is None:
+            raw_map[series_id] = metrics_obj
+            continue
 
-    series_name = str(metrics_obj)
-    similar_threshold = None
-    for threshold in thresholds:
-      if threshold.regex.match(series_name):
-        similar_threshold = threshold
-        break
-    if similar_threshold is None:
-      raise ValueError(f"No matched threshold setting for: {series_name}")
+        series_name = str(metrics_obj)
+        similar_threshold = None
+        for threshold in thresholds:
+            if threshold.regex.match(series_name):
+                similar_threshold = threshold
+                break
+        if similar_threshold is None:
+            raise ValueError(f"No matched threshold setting for: {series_name}")
 
-    if similar_threshold.unit == ThresholdUnit.PERCENTAGE:
-      ratio = abs(current - base) / base * 100
-    elif similar_threshold.unit.value == metric_unit:
-      ratio = abs(current - base)
-    else:
-      raise ValueError(
-          f"Mismatch between metric unit '{metric_unit}' and threshold unit '{similar_threshold.unit.value}'"
-      )
+        if similar_threshold.unit == ThresholdUnit.PERCENTAGE:
+            ratio = abs(current - base) / base * 100
+        elif similar_threshold.unit.value == metric_unit:
+            ratio = abs(current - base)
+        else:
+            raise ValueError(
+                f"Mismatch between metric unit '{metric_unit}' and threshold unit '{similar_threshold.unit.value}'"
+            )
 
-    if ratio <= similar_threshold.threshold:
-      similar_map[series_id] = metrics_obj
-    elif current > base:
-      regressed_map[series_id] = metrics_obj
-    else:
-      improved_map[series_id] = metrics_obj
+        if ratio <= similar_threshold.threshold:
+            similar_map[series_id] = metrics_obj
+        elif current > base:
+            regressed_map[series_id] = metrics_obj
+        else:
+            improved_map[series_id] = metrics_obj
 
-  return (regressed_map, improved_map, similar_map, raw_map)
+    return (regressed_map, improved_map, similar_map, raw_map)
 
 
 def _get_fixed_point_str(value: Union[int, float], digits=3) -> str:
-  if isinstance(value, int) or value.is_integer():
-    return str(math.floor(value))
-  return f"{{:.{digits}f}}".format(value)
+    if isinstance(value, int) or value.is_integer():
+        return str(math.floor(value))
+    return f"{{:.{digits}f}}".format(value)
 
 
 def _get_compare_text(current: float, base: Optional[int]) -> str:
-  """Generates the text of comparison between current and base value. Returns
+    """Generates the text of comparison between current and base value. Returns
     the current value if the base value is None.
-  """
-  # If base is None, don't need to do compare.
-  if base is None:
-    return f"{_get_fixed_point_str(current)}"
+    """
+    # If base is None, don't need to do compare.
+    if base is None:
+        return f"{_get_fixed_point_str(current)}"
 
-  ratio = abs(current - base) / base
-  direction = "↑" if current > base else ("↓" if current < base else "")
-  return f"{_get_fixed_point_str(current)} (vs. {_get_fixed_point_str(base)}, {ratio:.2%}{direction})"
+    ratio = abs(current - base) / base
+    direction = "↑" if current > base else ("↓" if current < base else "")
+    return f"{_get_fixed_point_str(current)} (vs. {_get_fixed_point_str(base)}, {ratio:.2%}{direction})"
 
 
-def _sort_benchmarks_and_get_table(benchmarks: Dict[str,
-                                                    AggregateBenchmarkLatency],
-                                   size_cut: Optional[int] = None) -> str:
-  """Sorts all benchmarks according to the improvement/regression ratio and
+def _sort_benchmarks_and_get_table(
+    benchmarks: Dict[str, AggregateBenchmarkLatency], size_cut: Optional[int] = None
+) -> str:
+    """Sorts all benchmarks according to the improvement/regression ratio and
     returns a markdown table for it.
 
     Args:
       benchmarks_map: map of (series_id, benchmark object).
       size_cut: If not None, only show the top N results for each table.
-  """
-  sorted_rows = []
-  for series_id, benchmark in benchmarks.items():
-    current = benchmark.mean_time / 1e6
-    base = benchmark.base_mean_time / 1e6
-    ratio = abs(current - base) / base
-    str_mean = _get_compare_text(current, base)
-    clickable_name = _make_series_link(benchmark.name, series_id)
-    sorted_rows.append(
-        (ratio, (clickable_name, str_mean,
-                 f"{_get_fixed_point_str(benchmark.median_time / 1e6)}",
-                 f"{_get_fixed_point_str(benchmark.stddev_time / 1e6)}")))
-  sorted_rows.sort(key=lambda row: row[0], reverse=True)
+    """
+    sorted_rows = []
+    for series_id, benchmark in benchmarks.items():
+        current = benchmark.mean_time / 1e6
+        base = benchmark.base_mean_time / 1e6
+        ratio = abs(current - base) / base
+        str_mean = _get_compare_text(current, base)
+        clickable_name = _make_series_link(benchmark.name, series_id)
+        sorted_rows.append(
+            (
+                ratio,
+                (
+                    clickable_name,
+                    str_mean,
+                    f"{_get_fixed_point_str(benchmark.median_time / 1e6)}",
+                    f"{_get_fixed_point_str(benchmark.stddev_time / 1e6)}",
+                ),
+            )
+        )
+    sorted_rows.sort(key=lambda row: row[0], reverse=True)
 
-  return _add_header_and_get_markdown_table(
-      headers=BENCHMARK_RESULTS_HEADERS,
-      rows=[row[1] for row in sorted_rows],
-      size_cut=size_cut)
+    return _add_header_and_get_markdown_table(
+        headers=BENCHMARK_RESULTS_HEADERS,
+        rows=[row[1] for row in sorted_rows],
+        size_cut=size_cut,
+    )
 
 
-def categorize_benchmarks_into_tables(benchmarks: Dict[
-    str, AggregateBenchmarkLatency],
-                                      size_cut: Optional[int] = None) -> str:
-  """Splits benchmarks into regressed/improved/similar/raw categories and
+def categorize_benchmarks_into_tables(
+    benchmarks: Dict[str, AggregateBenchmarkLatency], size_cut: Optional[int] = None
+) -> str:
+    """Splits benchmarks into regressed/improved/similar/raw categories and
     returns their markdown tables.
 
     If size_cut is None, the table includes regressed/improved/similar/raw
@@ -531,41 +577,51 @@
     Args:
       benchmarks: A dictionary of benchmark names to its aggregate info.
       size_cut: If not None, only show the top N results for each table.
-  """
-  regressed, improved, similar, raw = _categorize_on_single_metric(
-      benchmarks, lambda results: (results.mean_time, results.base_mean_time),
-      BENCHMARK_THRESHOLDS, "ns")
+    """
+    regressed, improved, similar, raw = _categorize_on_single_metric(
+        benchmarks,
+        lambda results: (results.mean_time, results.base_mean_time),
+        BENCHMARK_THRESHOLDS,
+        "ns",
+    )
 
-  tables = []
-  if regressed:
-    tables.append(md.header("Regressed Latencies 🚩", 3))
-    tables.append(_sort_benchmarks_and_get_table(regressed, size_cut))
-  if improved:
-    tables.append(md.header("Improved Latencies 🎉", 3))
-    tables.append(_sort_benchmarks_and_get_table(improved, size_cut))
-  # If we want to abbreviate, similar results won't be interesting.
-  if similar and size_cut is None:
-    tables.append(md.header("Similar Latencies", 3))
-    tables.append(_sort_benchmarks_and_get_table(similar, size_cut))
-  if raw:
-    tables.append(md.header("Raw Latencies", 3))
-    raw_list = [(_make_series_link(name=v.name, series_id=k),
-                 f"{_get_fixed_point_str(v.mean_time / 1e6)}",
-                 f"{_get_fixed_point_str(v.median_time / 1e6)}",
-                 f"{_get_fixed_point_str(v.stddev_time / 1e6)}")
-                for k, v in raw.items()]
-    tables.append(
-        _add_header_and_get_markdown_table(BENCHMARK_RESULTS_HEADERS,
-                                           raw_list,
-                                           size_cut=size_cut))
-  return "\n\n".join(tables)
+    tables = []
+    if regressed:
+        tables.append(md.header("Regressed Latencies 🚩", 3))
+        tables.append(_sort_benchmarks_and_get_table(regressed, size_cut))
+    if improved:
+        tables.append(md.header("Improved Latencies 🎉", 3))
+        tables.append(_sort_benchmarks_and_get_table(improved, size_cut))
+    # If we want to abbreviate, similar results won't be interesting.
+    if similar and size_cut is None:
+        tables.append(md.header("Similar Latencies", 3))
+        tables.append(_sort_benchmarks_and_get_table(similar, size_cut))
+    if raw:
+        tables.append(md.header("Raw Latencies", 3))
+        raw_list = [
+            (
+                _make_series_link(name=v.name, series_id=k),
+                f"{_get_fixed_point_str(v.mean_time / 1e6)}",
+                f"{_get_fixed_point_str(v.median_time / 1e6)}",
+                f"{_get_fixed_point_str(v.stddev_time / 1e6)}",
+            )
+            for k, v in raw.items()
+        ]
+        tables.append(
+            _add_header_and_get_markdown_table(
+                BENCHMARK_RESULTS_HEADERS, raw_list, size_cut=size_cut
+            )
+        )
+    return "\n\n".join(tables)
 
 
-def _sort_metrics_objects_and_get_table(metrics_objs: Dict[str, T],
-                                        mapper: MetricsToTableMapper[T],
-                                        headers: Sequence[str],
-                                        size_cut: Optional[int] = None) -> str:
-  """Sorts all metrics objects according to the improvement/regression ratio and
+def _sort_metrics_objects_and_get_table(
+    metrics_objs: Dict[str, T],
+    mapper: MetricsToTableMapper[T],
+    headers: Sequence[str],
+    size_cut: Optional[int] = None,
+) -> str:
+    """Sorts all metrics objects according to the improvement/regression ratio and
     returns a markdown table for it.
 
     Args:
@@ -574,27 +630,35 @@
       mapper: MetricsToTableMapper for metrics_objs.
       headers: list of table headers.
       size_cut: If not None, only show the top N results for each table.
-  """
-  sorted_rows = []
-  for target_id, metrics_obj in metrics_objs.items():
-    current, base = mapper.get_current_and_base_value(metrics_obj)
-    if base is None:
-      raise AssertionError("Base can't be None for sorting.")
-    ratio = abs(current - base) / base
-    sorted_rows.append((ratio, (
-        _make_series_link(str(metrics_obj), mapper.get_series_id(target_id)),
-        _get_compare_text(current, base),
-    )))
-  sorted_rows.sort(key=lambda row: row[0], reverse=True)
+    """
+    sorted_rows = []
+    for target_id, metrics_obj in metrics_objs.items():
+        current, base = mapper.get_current_and_base_value(metrics_obj)
+        if base is None:
+            raise AssertionError("Base can't be None for sorting.")
+        ratio = abs(current - base) / base
+        sorted_rows.append(
+            (
+                ratio,
+                (
+                    _make_series_link(
+                        str(metrics_obj), mapper.get_series_id(target_id)
+                    ),
+                    _get_compare_text(current, base),
+                ),
+            )
+        )
+    sorted_rows.sort(key=lambda row: row[0], reverse=True)
 
-  return _add_header_and_get_markdown_table(
-      headers=headers, rows=[row[1] for row in sorted_rows], size_cut=size_cut)
+    return _add_header_and_get_markdown_table(
+        headers=headers, rows=[row[1] for row in sorted_rows], size_cut=size_cut
+    )
 
 
 def categorize_compilation_metrics_into_tables(
-    compile_metrics_map: Dict[str, CompilationMetrics],
-    size_cut: Optional[int] = None) -> str:
-  """Splits compilation metrics into regressed/improved/all categories
+    compile_metrics_map: Dict[str, CompilationMetrics], size_cut: Optional[int] = None
+) -> str:
+    """Splits compilation metrics into regressed/improved/all categories
     and returns their markdown tables.
 
     If size_cut is None, the table includes regressed/improved/all categories;
@@ -604,51 +668,61 @@
       compile_metrics_map: A dictionary of benchmark names to its compilation
         metrics.
       size_cut: If not None, only show the top N results for each table.
-  """
+    """
 
-  tables = []
-  for mapper in COMPILATION_METRICS_TO_TABLE_MAPPERS:
-    regressed, improved, _, _ = _categorize_on_single_metric(
-        compile_metrics_map, mapper.get_current_and_base_value,
-        mapper.get_metric_thresholds(), mapper.get_unit())
+    tables = []
+    for mapper in COMPILATION_METRICS_TO_TABLE_MAPPERS:
+        regressed, improved, _, _ = _categorize_on_single_metric(
+            compile_metrics_map,
+            mapper.get_current_and_base_value,
+            mapper.get_metric_thresholds(),
+            mapper.get_unit(),
+        )
 
-    table_title = mapper.get_table_title()
-    table_header = mapper.get_table_header()
-    if regressed:
-      tables.append(md.header(f"Regressed {table_title} 🚩", 3))
-      tables.append(
-          _sort_metrics_objects_and_get_table(
-              metrics_objs=regressed,
-              mapper=mapper,
-              headers=["Benchmark Name", table_header],
-              size_cut=size_cut))
-    if improved:
-      tables.append(md.header(f"Improved {table_title} 🎉", 3))
-      tables.append(
-          _sort_metrics_objects_and_get_table(
-              metrics_objs=improved,
-              mapper=mapper,
-              headers=["Benchmark Name", table_header],
-              size_cut=size_cut))
+        table_title = mapper.get_table_title()
+        table_header = mapper.get_table_header()
+        if regressed:
+            tables.append(md.header(f"Regressed {table_title} 🚩", 3))
+            tables.append(
+                _sort_metrics_objects_and_get_table(
+                    metrics_objs=regressed,
+                    mapper=mapper,
+                    headers=["Benchmark Name", table_header],
+                    size_cut=size_cut,
+                )
+            )
+        if improved:
+            tables.append(md.header(f"Improved {table_title} 🎉", 3))
+            tables.append(
+                _sort_metrics_objects_and_get_table(
+                    metrics_objs=improved,
+                    mapper=mapper,
+                    headers=["Benchmark Name", table_header],
+                    size_cut=size_cut,
+                )
+            )
 
-  # If we want to abbreviate, similar results won't be interesting.
-  if size_cut is None and compile_metrics_map:
-    tables.append(md.header("All Compilation Metrics", 3))
-    headers = ["Benchmark Name"] + [
-        mapper.get_table_header()
-        for mapper in COMPILATION_METRICS_TO_TABLE_MAPPERS
-    ]
-    rows = []
-    for target_id, metrics in compile_metrics_map.items():
-      row = [metrics.name]
-      for mapper in COMPILATION_METRICS_TO_TABLE_MAPPERS:
-        current, base = mapper.get_current_and_base_value(metrics)
-        row.append(
-            _make_series_link(_get_compare_text(current, base),
-                              mapper.get_series_id(target_id)))
-      rows.append(tuple(row))
+    # If we want to abbreviate, similar results won't be interesting.
+    if size_cut is None and compile_metrics_map:
+        tables.append(md.header("All Compilation Metrics", 3))
+        headers = ["Benchmark Name"] + [
+            mapper.get_table_header() for mapper in COMPILATION_METRICS_TO_TABLE_MAPPERS
+        ]
+        rows = []
+        for target_id, metrics in compile_metrics_map.items():
+            row = [metrics.name]
+            for mapper in COMPILATION_METRICS_TO_TABLE_MAPPERS:
+                current, base = mapper.get_current_and_base_value(metrics)
+                row.append(
+                    _make_series_link(
+                        _get_compare_text(current, base),
+                        mapper.get_series_id(target_id),
+                    )
+                )
+            rows.append(tuple(row))
 
-    tables.append(
-        _add_header_and_get_markdown_table(headers, rows, size_cut=size_cut))
+        tables.append(
+            _add_header_and_get_markdown_table(headers, rows, size_cut=size_cut)
+        )
 
-  return "\n\n".join(tables)
+    return "\n\n".join(tables)
diff --git a/build_tools/benchmarks/common/benchmark_suite.py b/build_tools/benchmarks/common/benchmark_suite.py
index d673909..da094d7 100644
--- a/build_tools/benchmarks/common/benchmark_suite.py
+++ b/build_tools/benchmarks/common/benchmark_suite.py
@@ -25,7 +25,7 @@
 
 @dataclass
 class BenchmarkCase:
-  """Represents a benchmark case.
+    """Represents a benchmark case.
 
     model_name: the source model, e.g., 'MobileSSD'.
     model_tags: the source model tags, e.g., ['f32'].
@@ -35,148 +35,182 @@
     benchmark_tool_name: the benchmark tool, e.g., 'iree-benchmark-module'.
     benchmark_case_dir: the path to benchmark case directory.
     run_config: the run config from e2e test framework.
-  """
+    """
 
-  model_name: str
-  model_tags: Sequence[str]
-  bench_mode: Sequence[str]
-  target_arch: common_definitions.DeviceArchitecture
-  driver_info: DriverInfo
-  benchmark_tool_name: str
-  benchmark_case_dir: pathlib.Path
-  run_config: iree_definitions.E2EModelRunConfig
+    model_name: str
+    model_tags: Sequence[str]
+    bench_mode: Sequence[str]
+    target_arch: common_definitions.DeviceArchitecture
+    driver_info: DriverInfo
+    benchmark_tool_name: str
+    benchmark_case_dir: pathlib.Path
+    run_config: iree_definitions.E2EModelRunConfig
 
 
 # A map from execution config to driver info. This is temporary during migration
 # before we can drop the DriverInfo.
-EXECUTION_CONFIG_TO_DRIVER_INFO_KEY_MAP: Dict[Tuple[
-    iree_definitions.RuntimeDriver, iree_definitions.RuntimeLoader], str] = {
-        (iree_definitions.RuntimeDriver.LOCAL_TASK, iree_definitions.RuntimeLoader.EMBEDDED_ELF):
-            "iree-llvm-cpu",
-        (iree_definitions.RuntimeDriver.LOCAL_SYNC, iree_definitions.RuntimeLoader.EMBEDDED_ELF):
-            "iree-llvm-cpu-sync",
-        (iree_definitions.RuntimeDriver.LOCAL_TASK, iree_definitions.RuntimeLoader.VMVX_MODULE):
-            "iree-vmvx",
-        (iree_definitions.RuntimeDriver.LOCAL_SYNC, iree_definitions.RuntimeLoader.VMVX_MODULE):
-            "iree-vmvx-sync",
-        (iree_definitions.RuntimeDriver.VULKAN, iree_definitions.RuntimeLoader.NONE):
-            "iree-vulkan",
-        (iree_definitions.RuntimeDriver.CUDA, iree_definitions.RuntimeLoader.NONE):
-            "iree-cuda",
-    }
+EXECUTION_CONFIG_TO_DRIVER_INFO_KEY_MAP: Dict[
+    Tuple[iree_definitions.RuntimeDriver, iree_definitions.RuntimeLoader], str
+] = {
+    (
+        iree_definitions.RuntimeDriver.LOCAL_TASK,
+        iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+    ): "iree-llvm-cpu",
+    (
+        iree_definitions.RuntimeDriver.LOCAL_SYNC,
+        iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+    ): "iree-llvm-cpu-sync",
+    (
+        iree_definitions.RuntimeDriver.LOCAL_TASK,
+        iree_definitions.RuntimeLoader.VMVX_MODULE,
+    ): "iree-vmvx",
+    (
+        iree_definitions.RuntimeDriver.LOCAL_SYNC,
+        iree_definitions.RuntimeLoader.VMVX_MODULE,
+    ): "iree-vmvx-sync",
+    (
+        iree_definitions.RuntimeDriver.VULKAN,
+        iree_definitions.RuntimeLoader.NONE,
+    ): "iree-vulkan",
+    (
+        iree_definitions.RuntimeDriver.CUDA,
+        iree_definitions.RuntimeLoader.NONE,
+    ): "iree-cuda",
+}
 
 
 class BenchmarkSuite(object):
-  """Represents the benchmarks in benchmark suite directory."""
+    """Represents the benchmarks in benchmark suite directory."""
 
-  def __init__(self, benchmark_cases: Sequence[BenchmarkCase]):
-    """Construct a benchmark suite.
+    def __init__(self, benchmark_cases: Sequence[BenchmarkCase]):
+        """Construct a benchmark suite.
 
-    Args:
-      benchmark_cases: list of benchmark cases.
-    """
-    self.benchmark_cases = list(benchmark_cases)
+        Args:
+          benchmark_cases: list of benchmark cases.
+        """
+        self.benchmark_cases = list(benchmark_cases)
 
-  def filter_benchmarks(
-      self,
-      available_drivers: Optional[Sequence[str]] = None,
-      available_loaders: Optional[Sequence[str]] = None,
-      target_architectures: Optional[Sequence[
-          common_definitions.DeviceArchitecture]] = None,
-      driver_filter: Optional[str] = None,
-      mode_filter: Optional[str] = None,
-      model_name_filter: Optional[str] = None) -> Sequence[BenchmarkCase]:
-    """Filters benchmarks.
-      Args:
-        available_drivers: list of drivers supported by the tools. None means to
-          match any driver.
-        available_loaders: list of executable loaders supported by the tools.
-          None means to match any loader.
-        target_architectures: list of target architectures to be included. None
-          means no filter.
-        driver_filter: driver filter regex.
-        mode_filter: benchmark mode regex.
-        model_name_filter: model name regex.
-      Returns:
-        A list of matched benchmark cases.
-    """
+    def filter_benchmarks(
+        self,
+        available_drivers: Optional[Sequence[str]] = None,
+        available_loaders: Optional[Sequence[str]] = None,
+        target_architectures: Optional[
+            Sequence[common_definitions.DeviceArchitecture]
+        ] = None,
+        driver_filter: Optional[str] = None,
+        mode_filter: Optional[str] = None,
+        model_name_filter: Optional[str] = None,
+    ) -> Sequence[BenchmarkCase]:
+        """Filters benchmarks.
+        Args:
+          available_drivers: list of drivers supported by the tools. None means to
+            match any driver.
+          available_loaders: list of executable loaders supported by the tools.
+            None means to match any loader.
+          target_architectures: list of target architectures to be included. None
+            means no filter.
+          driver_filter: driver filter regex.
+          mode_filter: benchmark mode regex.
+          model_name_filter: model name regex.
+        Returns:
+          A list of matched benchmark cases.
+        """
 
-    chosen_cases = []
-    for benchmark_case in self.benchmark_cases:
-      driver_info = benchmark_case.driver_info
+        chosen_cases = []
+        for benchmark_case in self.benchmark_cases:
+            driver_info = benchmark_case.driver_info
 
-      driver_name = driver_info.driver_name
-      matched_available_driver = (available_drivers is None or
-                                  driver_name in available_drivers)
-      matched_driver_filter = driver_filter is None or re.match(
-          driver_filter, driver_name) is not None
-      matched_driver = matched_available_driver and matched_driver_filter
+            driver_name = driver_info.driver_name
+            matched_available_driver = (
+                available_drivers is None or driver_name in available_drivers
+            )
+            matched_driver_filter = (
+                driver_filter is None
+                or re.match(driver_filter, driver_name) is not None
+            )
+            matched_driver = matched_available_driver and matched_driver_filter
 
-      matched_loader = not driver_info.loader_name or available_loaders is None or (
-          driver_info.loader_name in available_loaders)
+            matched_loader = (
+                not driver_info.loader_name
+                or available_loaders is None
+                or (driver_info.loader_name in available_loaders)
+            )
 
-      if target_architectures is None:
-        matched_arch = True
-      else:
-        matched_arch = benchmark_case.target_arch in target_architectures
+            if target_architectures is None:
+                matched_arch = True
+            else:
+                matched_arch = benchmark_case.target_arch in target_architectures
 
-      bench_mode = ','.join(benchmark_case.bench_mode)
-      matched_mode = (mode_filter is None or
-                      re.match(mode_filter, bench_mode) is not None)
+            bench_mode = ",".join(benchmark_case.bench_mode)
+            matched_mode = (
+                mode_filter is None or re.match(mode_filter, bench_mode) is not None
+            )
 
-      model_name_with_tags = benchmark_case.model_name
-      if len(benchmark_case.model_tags) > 0:
-        model_name_with_tags += f"-{','.join(benchmark_case.model_tags)}"
-      matched_model_name = (model_name_filter is None or re.match(
-          model_name_filter, model_name_with_tags) is not None)
+            model_name_with_tags = benchmark_case.model_name
+            if len(benchmark_case.model_tags) > 0:
+                model_name_with_tags += f"-{','.join(benchmark_case.model_tags)}"
+            matched_model_name = (
+                model_name_filter is None
+                or re.match(model_name_filter, model_name_with_tags) is not None
+            )
 
-      if (matched_driver and matched_loader and matched_arch and
-          matched_model_name and matched_mode):
-        chosen_cases.append(benchmark_case)
+            if (
+                matched_driver
+                and matched_loader
+                and matched_arch
+                and matched_model_name
+                and matched_mode
+            ):
+                chosen_cases.append(benchmark_case)
 
-    return chosen_cases
+        return chosen_cases
 
-  @staticmethod
-  def load_from_run_configs(
-      run_configs: Sequence[iree_definitions.E2EModelRunConfig],
-      root_benchmark_dir: pathlib.Path):
-    """Loads the benchmarks from the run configs.
+    @staticmethod
+    def load_from_run_configs(
+        run_configs: Sequence[iree_definitions.E2EModelRunConfig],
+        root_benchmark_dir: pathlib.Path,
+    ):
+        """Loads the benchmarks from the run configs.
 
-    Args:
-      run_configs: list of benchmark run configs.
-    Returns:
-      A benchmark suite.
-    """
+        Args:
+          run_configs: list of benchmark run configs.
+        Returns:
+          A benchmark suite.
+        """
 
-    benchmark_cases = []
-    for run_config in run_configs:
-      module_gen_config = run_config.module_generation_config
-      module_exec_config = run_config.module_execution_config
-      target_device_spec = run_config.target_device_spec
+        benchmark_cases = []
+        for run_config in run_configs:
+            module_gen_config = run_config.module_generation_config
+            module_exec_config = run_config.module_execution_config
+            target_device_spec = run_config.target_device_spec
 
-      driver_info_key = EXECUTION_CONFIG_TO_DRIVER_INFO_KEY_MAP.get(
-          (module_exec_config.driver, module_exec_config.loader))
-      if driver_info_key is None:
-        raise ValueError(
-            f"Can't map execution config to driver info: {module_exec_config}.")
-      driver_info = IREE_DRIVERS_INFOS[driver_info_key]
+            driver_info_key = EXECUTION_CONFIG_TO_DRIVER_INFO_KEY_MAP.get(
+                (module_exec_config.driver, module_exec_config.loader)
+            )
+            if driver_info_key is None:
+                raise ValueError(
+                    f"Can't map execution config to driver info: {module_exec_config}."
+                )
+            driver_info = IREE_DRIVERS_INFOS[driver_info_key]
 
-      target_arch = target_device_spec.architecture
-      model = module_gen_config.imported_model.model
+            target_arch = target_device_spec.architecture
+            model = module_gen_config.imported_model.model
 
-      module_dir_path = iree_artifacts.get_module_dir_path(
-          module_generation_config=module_gen_config,
-          root_path=root_benchmark_dir)
-      module_dir_path = pathlib.Path(module_dir_path)
+            module_dir_path = iree_artifacts.get_module_dir_path(
+                module_generation_config=module_gen_config, root_path=root_benchmark_dir
+            )
+            module_dir_path = pathlib.Path(module_dir_path)
 
-      benchmark_case = BenchmarkCase(model_name=model.name,
-                                     model_tags=model.tags,
-                                     bench_mode=module_exec_config.tags,
-                                     target_arch=target_arch,
-                                     driver_info=driver_info,
-                                     benchmark_tool_name=run_config.tool.value,
-                                     benchmark_case_dir=module_dir_path,
-                                     run_config=run_config)
-      benchmark_cases.append(benchmark_case)
+            benchmark_case = BenchmarkCase(
+                model_name=model.name,
+                model_tags=model.tags,
+                bench_mode=module_exec_config.tags,
+                target_arch=target_arch,
+                driver_info=driver_info,
+                benchmark_tool_name=run_config.tool.value,
+                benchmark_case_dir=module_dir_path,
+                run_config=run_config,
+            )
+            benchmark_cases.append(benchmark_case)
 
-    return BenchmarkSuite(benchmark_cases=benchmark_cases)
+        return BenchmarkSuite(benchmark_cases=benchmark_cases)
diff --git a/build_tools/benchmarks/common/benchmark_suite_test.py b/build_tools/benchmarks/common/benchmark_suite_test.py
index 7a8d69f..208c35e 100644
--- a/build_tools/benchmarks/common/benchmark_suite_test.py
+++ b/build_tools/benchmarks/common/benchmark_suite_test.py
@@ -14,208 +14,244 @@
 
 
 class BenchmarkSuiteTest(unittest.TestCase):
+    def test_filter_benchmarks(self):
+        model = common_definitions.Model(
+            id="model",
+            name="model",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
+            source_url="",
+            entry_function="predict",
+            input_types=["1xf32"],
+        )
+        exec_config = iree_definitions.ModuleExecutionConfig.build(
+            id="exec",
+            tags=[],
+            loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+            driver=iree_definitions.RuntimeDriver.LOCAL_SYNC,
+        )
+        device_spec = common_definitions.DeviceSpec.build(
+            id="dev",
+            device_name="dev",
+            architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+            device_parameters=[],
+            tags=[],
+        )
+        compile_target = iree_definitions.CompileTarget(
+            target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+            target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            target_abi=iree_definitions.TargetABI.LINUX_GNU,
+        )
+        dummy_run_config = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=iree_definitions.ModuleGenerationConfig.build(
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+                compile_config=iree_definitions.CompileConfig.build(
+                    id="1", tags=[], compile_targets=[compile_target]
+                ),
+            ),
+            module_execution_config=exec_config,
+            target_device_spec=device_spec,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
 
-  def test_filter_benchmarks(self):
-    model = common_definitions.Model(
-        id="model",
-        name="model",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
-        source_url="",
-        entry_function="predict",
-        input_types=["1xf32"])
-    exec_config = iree_definitions.ModuleExecutionConfig.build(
-        id="exec",
-        tags=[],
-        loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-        driver=iree_definitions.RuntimeDriver.LOCAL_SYNC)
-    device_spec = common_definitions.DeviceSpec.build(
-        id="dev",
-        device_name="dev",
-        architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
-        device_parameters=[],
-        tags=[])
-    compile_target = iree_definitions.CompileTarget(
-        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-        target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        target_abi=iree_definitions.TargetABI.LINUX_GNU)
-    dummy_run_config = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=iree_definitions.ModuleGenerationConfig.build(
-            imported_model=iree_definitions.ImportedModel.from_model(model),
-            compile_config=iree_definitions.CompileConfig.build(
-                id="1", tags=[], compile_targets=[compile_target])),
-        module_execution_config=exec_config,
-        target_device_spec=device_spec,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
+        case1 = BenchmarkCase(
+            model_name="deepnet",
+            model_tags=[],
+            bench_mode=["1-thread", "full-inference"],
+            target_arch=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
+            driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu"],
+            benchmark_case_dir=pathlib.Path("case1"),
+            benchmark_tool_name="tool",
+            run_config=dummy_run_config,
+        )
+        case2 = BenchmarkCase(
+            model_name="deepnetv2",
+            model_tags=["f32"],
+            bench_mode=["full-inference"],
+            target_arch=common_definitions.DeviceArchitecture.ARM_VALHALL,
+            driver_info=IREE_DRIVERS_INFOS["iree-vulkan"],
+            benchmark_case_dir=pathlib.Path("case2"),
+            benchmark_tool_name="tool",
+            run_config=dummy_run_config,
+        )
+        case3 = BenchmarkCase(
+            model_name="deepnetv3",
+            model_tags=["f32"],
+            bench_mode=["full-inference"],
+            target_arch=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+            driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu-sync"],
+            benchmark_case_dir=pathlib.Path("case3"),
+            benchmark_tool_name="tool",
+            run_config=dummy_run_config,
+        )
+        suite = BenchmarkSuite([case1, case2, case3])
 
-    case1 = BenchmarkCase(
-        model_name="deepnet",
-        model_tags=[],
-        bench_mode=["1-thread", "full-inference"],
-        target_arch=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
-        driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu"],
-        benchmark_case_dir=pathlib.Path("case1"),
-        benchmark_tool_name="tool",
-        run_config=dummy_run_config)
-    case2 = BenchmarkCase(
-        model_name="deepnetv2",
-        model_tags=["f32"],
-        bench_mode=["full-inference"],
-        target_arch=common_definitions.DeviceArchitecture.ARM_VALHALL,
-        driver_info=IREE_DRIVERS_INFOS["iree-vulkan"],
-        benchmark_case_dir=pathlib.Path("case2"),
-        benchmark_tool_name="tool",
-        run_config=dummy_run_config)
-    case3 = BenchmarkCase(
-        model_name="deepnetv3",
-        model_tags=["f32"],
-        bench_mode=["full-inference"],
-        target_arch=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
-        driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu-sync"],
-        benchmark_case_dir=pathlib.Path("case3"),
-        benchmark_tool_name="tool",
-        run_config=dummy_run_config)
-    suite = BenchmarkSuite([case1, case2, case3])
-
-    cpu_and_gpu_benchmarks = suite.filter_benchmarks(
-        available_drivers=["local-task", "vulkan"],
-        available_loaders=["embedded-elf"],
-        target_architectures=[
-            common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
-            common_definitions.DeviceArchitecture.ARM_VALHALL,
-        ],
-        driver_filter=None,
-        mode_filter=".*full-inference.*",
-        model_name_filter="deepnet.*")
-    gpu_benchmarks = suite.filter_benchmarks(
-        available_drivers=["local-task", "vulkan"],
-        available_loaders=["embedded-elf"],
-        target_architectures=[
-            common_definitions.DeviceArchitecture.ARM_VALHALL,
-        ],
-        driver_filter="vulkan",
-        mode_filter=".*full-inference.*",
-        model_name_filter="deepnet.*")
-    all_benchmarks = suite.filter_benchmarks(available_drivers=None,
-                                             target_architectures=None,
-                                             driver_filter=None,
-                                             mode_filter=None,
-                                             model_name_filter=None)
-
-    self.assertEqual(cpu_and_gpu_benchmarks, [case1, case2])
-    self.assertEqual(gpu_benchmarks, [case2])
-    self.assertEqual(all_benchmarks, [case1, case2, case3])
-
-  def test_load_from_run_configs(self):
-    model_tflite = common_definitions.Model(
-        id="tflite",
-        name="model_tflite",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="",
-        entry_function="predict",
-        input_types=["1xf32"])
-    model_tf = common_definitions.Model(
-        id="tf",
-        name="model_tf",
-        tags=["fp32"],
-        source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
-        source_url="",
-        entry_function="predict",
-        input_types=["1xf32"])
-    exec_config_a = iree_definitions.ModuleExecutionConfig.build(
-        id="exec_a",
-        tags=["defaults"],
-        loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-        driver=iree_definitions.RuntimeDriver.LOCAL_SYNC)
-    exec_config_b = iree_definitions.ModuleExecutionConfig.build(
-        id="exec_b",
-        tags=["experimental"],
-        loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-        driver=iree_definitions.RuntimeDriver.LOCAL_TASK)
-    device_spec_a = common_definitions.DeviceSpec.build(
-        id="dev_a",
-        device_name="a",
-        architecture=common_definitions.DeviceArchitecture.RV32_GENERIC,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
-        device_parameters=[],
-        tags=[])
-    device_spec_b = common_definitions.DeviceSpec.build(
-        id="dev_b",
-        device_name="b",
-        architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
-        device_parameters=[],
-        tags=[])
-    compile_target = iree_definitions.CompileTarget(
-        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-        target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        target_abi=iree_definitions.TargetABI.LINUX_GNU)
-    run_config_a = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=iree_definitions.ModuleGenerationConfig.build(
-            imported_model=iree_definitions.ImportedModel.from_model(
-                model_tflite),
-            compile_config=iree_definitions.CompileConfig.build(
-                id="1", tags=[], compile_targets=[compile_target])),
-        module_execution_config=exec_config_a,
-        target_device_spec=device_spec_a,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    run_config_b = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=iree_definitions.ModuleGenerationConfig.build(
-            imported_model=iree_definitions.ImportedModel.from_model(
-                model_tflite),
-            compile_config=iree_definitions.CompileConfig.build(
-                id="2", tags=[], compile_targets=[compile_target])),
-        module_execution_config=exec_config_b,
-        target_device_spec=device_spec_b,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    run_config_c = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=iree_definitions.ModuleGenerationConfig.build(
-            imported_model=iree_definitions.ImportedModel.from_model(model_tf),
-            compile_config=iree_definitions.CompileConfig.build(
-                id="3", tags=[], compile_targets=[compile_target])),
-        module_execution_config=exec_config_a,
-        target_device_spec=device_spec_a,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    run_configs = [run_config_a, run_config_b, run_config_c]
-    root_dir = pathlib.Path("root")
-
-    suite = BenchmarkSuite.load_from_run_configs(run_configs=run_configs,
-                                                 root_benchmark_dir=root_dir)
-
-    loaded_run_configs = [case.run_config for case in suite.filter_benchmarks()]
-    self.assertEqual(loaded_run_configs, [
-        run_config_a,
-        run_config_b,
-        run_config_c,
-    ])
-    run_config_c_case_dir = pathlib.Path(
-        iree_artifacts.get_module_dir_path(
-            run_config_c.module_generation_config, root_dir))
-    self.assertEqual(
-        suite.filter_benchmarks(
+        cpu_and_gpu_benchmarks = suite.filter_benchmarks(
+            available_drivers=["local-task", "vulkan"],
+            available_loaders=["embedded-elf"],
             target_architectures=[
-                common_definitions.DeviceArchitecture.RV32_GENERIC
+                common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
+                common_definitions.DeviceArchitecture.ARM_VALHALL,
             ],
-            model_name_filter="model_tf.*fp32",
-            mode_filter="defaults",
-        ), [
-            BenchmarkCase(
-                model_name=model_tf.name,
-                model_tags=model_tf.tags,
-                bench_mode=exec_config_a.tags,
-                target_arch=common_definitions.DeviceArchitecture.RV32_GENERIC,
-                driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu-sync"],
-                benchmark_tool_name="iree-benchmark-module",
-                benchmark_case_dir=run_config_c_case_dir,
-                run_config=run_config_c)
-        ])
+            driver_filter=None,
+            mode_filter=".*full-inference.*",
+            model_name_filter="deepnet.*",
+        )
+        gpu_benchmarks = suite.filter_benchmarks(
+            available_drivers=["local-task", "vulkan"],
+            available_loaders=["embedded-elf"],
+            target_architectures=[
+                common_definitions.DeviceArchitecture.ARM_VALHALL,
+            ],
+            driver_filter="vulkan",
+            mode_filter=".*full-inference.*",
+            model_name_filter="deepnet.*",
+        )
+        all_benchmarks = suite.filter_benchmarks(
+            available_drivers=None,
+            target_architectures=None,
+            driver_filter=None,
+            mode_filter=None,
+            model_name_filter=None,
+        )
+
+        self.assertEqual(cpu_and_gpu_benchmarks, [case1, case2])
+        self.assertEqual(gpu_benchmarks, [case2])
+        self.assertEqual(all_benchmarks, [case1, case2, case3])
+
+    def test_load_from_run_configs(self):
+        model_tflite = common_definitions.Model(
+            id="tflite",
+            name="model_tflite",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="",
+            entry_function="predict",
+            input_types=["1xf32"],
+        )
+        model_tf = common_definitions.Model(
+            id="tf",
+            name="model_tf",
+            tags=["fp32"],
+            source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
+            source_url="",
+            entry_function="predict",
+            input_types=["1xf32"],
+        )
+        exec_config_a = iree_definitions.ModuleExecutionConfig.build(
+            id="exec_a",
+            tags=["defaults"],
+            loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+            driver=iree_definitions.RuntimeDriver.LOCAL_SYNC,
+        )
+        exec_config_b = iree_definitions.ModuleExecutionConfig.build(
+            id="exec_b",
+            tags=["experimental"],
+            loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+            driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
+        )
+        device_spec_a = common_definitions.DeviceSpec.build(
+            id="dev_a",
+            device_name="a",
+            architecture=common_definitions.DeviceArchitecture.RV32_GENERIC,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+            device_parameters=[],
+            tags=[],
+        )
+        device_spec_b = common_definitions.DeviceSpec.build(
+            id="dev_b",
+            device_name="b",
+            architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+            device_parameters=[],
+            tags=[],
+        )
+        compile_target = iree_definitions.CompileTarget(
+            target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+            target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            target_abi=iree_definitions.TargetABI.LINUX_GNU,
+        )
+        run_config_a = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=iree_definitions.ModuleGenerationConfig.build(
+                imported_model=iree_definitions.ImportedModel.from_model(model_tflite),
+                compile_config=iree_definitions.CompileConfig.build(
+                    id="1", tags=[], compile_targets=[compile_target]
+                ),
+            ),
+            module_execution_config=exec_config_a,
+            target_device_spec=device_spec_a,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        run_config_b = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=iree_definitions.ModuleGenerationConfig.build(
+                imported_model=iree_definitions.ImportedModel.from_model(model_tflite),
+                compile_config=iree_definitions.CompileConfig.build(
+                    id="2", tags=[], compile_targets=[compile_target]
+                ),
+            ),
+            module_execution_config=exec_config_b,
+            target_device_spec=device_spec_b,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        run_config_c = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=iree_definitions.ModuleGenerationConfig.build(
+                imported_model=iree_definitions.ImportedModel.from_model(model_tf),
+                compile_config=iree_definitions.CompileConfig.build(
+                    id="3", tags=[], compile_targets=[compile_target]
+                ),
+            ),
+            module_execution_config=exec_config_a,
+            target_device_spec=device_spec_a,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        run_configs = [run_config_a, run_config_b, run_config_c]
+        root_dir = pathlib.Path("root")
+
+        suite = BenchmarkSuite.load_from_run_configs(
+            run_configs=run_configs, root_benchmark_dir=root_dir
+        )
+
+        loaded_run_configs = [case.run_config for case in suite.filter_benchmarks()]
+        self.assertEqual(
+            loaded_run_configs,
+            [
+                run_config_a,
+                run_config_b,
+                run_config_c,
+            ],
+        )
+        run_config_c_case_dir = pathlib.Path(
+            iree_artifacts.get_module_dir_path(
+                run_config_c.module_generation_config, root_dir
+            )
+        )
+        self.assertEqual(
+            suite.filter_benchmarks(
+                target_architectures=[
+                    common_definitions.DeviceArchitecture.RV32_GENERIC
+                ],
+                model_name_filter="model_tf.*fp32",
+                mode_filter="defaults",
+            ),
+            [
+                BenchmarkCase(
+                    model_name=model_tf.name,
+                    model_tags=model_tf.tags,
+                    bench_mode=exec_config_a.tags,
+                    target_arch=common_definitions.DeviceArchitecture.RV32_GENERIC,
+                    driver_info=IREE_DRIVERS_INFOS["iree-llvm-cpu-sync"],
+                    benchmark_tool_name="iree-benchmark-module",
+                    benchmark_case_dir=run_config_c_case_dir,
+                    run_config=run_config_c,
+                )
+            ],
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/benchmarks/common/benchmark_thresholds.py b/build_tools/benchmarks/common/benchmark_thresholds.py
index f10afbd..5d12cb9 100644
--- a/build_tools/benchmarks/common/benchmark_thresholds.py
+++ b/build_tools/benchmarks/common/benchmark_thresholds.py
@@ -12,28 +12,29 @@
 
 
 class ThresholdUnit(Enum):
-  PERCENTAGE = "%"  # Percentage
-  VALUE_NS = "ns"  # Absolute value in nanoseconds
+    PERCENTAGE = "%"  # Percentage
+    VALUE_NS = "ns"  # Absolute value in nanoseconds
 
 
 @dataclass
 class BenchmarkThreshold:
-  """Similarity threshold for benchmarks matching a regular expression."""
-  # A regular expression to match against the benchmark identifier.
-  regex: re.Pattern
-  # A threshold for computing the benchmark value average. Benchmark sample
-  # values from consecutive runs and within the given range will be considered
-  # as similar (with some noise). They will be used to compute the moving
-  # average. The number will be interpreted according to the given unit.
-  # What value to set depends on the noise range of the particular benchmark.
-  threshold: int
-  unit: ThresholdUnit
+    """Similarity threshold for benchmarks matching a regular expression."""
 
-  def get_threshold_str(self):
-    """Returns a string representation of the threshold."""
-    if self.unit == ThresholdUnit.PERCENTAGE:
-      return f"{self.threshold}%"
-    return self.threshold
+    # A regular expression to match against the benchmark identifier.
+    regex: re.Pattern
+    # A threshold for computing the benchmark value average. Benchmark sample
+    # values from consecutive runs and within the given range will be considered
+    # as similar (with some noise). They will be used to compute the moving
+    # average. The number will be interpreted according to the given unit.
+    # What value to set depends on the noise range of the particular benchmark.
+    threshold: int
+    unit: ThresholdUnit
+
+    def get_threshold_str(self):
+        """Returns a string representation of the threshold."""
+        if self.unit == ThresholdUnit.PERCENTAGE:
+            return f"{self.threshold}%"
+        return self.threshold
 
 
 # A list of benchmarks and their similarity thresholds.
@@ -41,63 +42,95 @@
 # match is used.
 BENCHMARK_THRESHOLDS = [
     # Fluctuating benchmarks on ARM64 CPUs.
-    BenchmarkThreshold(re.compile(r"^DeepLabV3.*big-core.*LLVM-CPU.* @ Pixel"),
-                       20, ThresholdUnit.PERCENTAGE),
     BenchmarkThreshold(
-        re.compile(r"^MobileBertSquad.*big-core.*LLVM-CPU-Sync @ Pixel-4"), 20,
-        ThresholdUnit.PERCENTAGE),
-    BenchmarkThreshold(re.compile(r"^MobileNetV2.*LLVM-CPU.* @ Pixel"), 15,
-                       ThresholdUnit.PERCENTAGE),
-    BenchmarkThreshold(re.compile(r"^MobileNetV3Small.*LLVM-CPU.* @ Pixel"), 25,
-                       ThresholdUnit.PERCENTAGE),
+        re.compile(r"^DeepLabV3.*big-core.*LLVM-CPU.* @ Pixel"),
+        20,
+        ThresholdUnit.PERCENTAGE,
+    ),
     BenchmarkThreshold(
-        re.compile(r"^MobileSSD.*little-core.*LLVM-CPU.* @ Pixel-6"), 20,
-        ThresholdUnit.PERCENTAGE),
-    BenchmarkThreshold(re.compile(r"^PoseNet.*big-core.*LLVM-CPU.* @ Pixel"),
-                       15, ThresholdUnit.PERCENTAGE),
-
+        re.compile(r"^MobileBertSquad.*big-core.*LLVM-CPU-Sync @ Pixel-4"),
+        20,
+        ThresholdUnit.PERCENTAGE,
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^MobileNetV2.*LLVM-CPU.* @ Pixel"), 15, ThresholdUnit.PERCENTAGE
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^MobileNetV3Small.*LLVM-CPU.* @ Pixel"),
+        25,
+        ThresholdUnit.PERCENTAGE,
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^MobileSSD.*little-core.*LLVM-CPU.* @ Pixel-6"),
+        20,
+        ThresholdUnit.PERCENTAGE,
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^PoseNet.*big-core.*LLVM-CPU.* @ Pixel"),
+        15,
+        ThresholdUnit.PERCENTAGE,
+    ),
     # Benchmarks that complete <= 10ms on X86_64 CPUs; using percentage is not
     # suitable anymore.
-    BenchmarkThreshold(re.compile(r"^DeepLabV3_fp32.*x86_64"), 1 * 10**6,
-                       ThresholdUnit.VALUE_NS),
-    BenchmarkThreshold(re.compile(r"^EfficientNet_int8.*x86_64"), 1 * 10**6,
-                       ThresholdUnit.VALUE_NS),
-    BenchmarkThreshold(re.compile(r"^MobileNetV1_fp32.*x86_64"), 1 * 10**6,
-                       ThresholdUnit.VALUE_NS),
-    BenchmarkThreshold(re.compile(r"^MobileNetV2_fp32.*x86_64"), 2 * 10**6,
-                       ThresholdUnit.VALUE_NS),
-    BenchmarkThreshold(re.compile(r"^MobileNetV3Small_fp32.*x86_64"), 1 * 10**6,
-                       ThresholdUnit.VALUE_NS),
-    BenchmarkThreshold(re.compile(r"^PersonDetect_int8.*x86_64"), 5 * 10**5,
-                       ThresholdUnit.VALUE_NS),
-    BenchmarkThreshold(re.compile(r"^PoseNet_fp32.*x86_64"), 1 * 10**6,
-                       ThresholdUnit.VALUE_NS),
-
+    BenchmarkThreshold(
+        re.compile(r"^DeepLabV3_fp32.*x86_64"), 1 * 10**6, ThresholdUnit.VALUE_NS
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^EfficientNet_int8.*x86_64"), 1 * 10**6, ThresholdUnit.VALUE_NS
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^MobileNetV1_fp32.*x86_64"), 1 * 10**6, ThresholdUnit.VALUE_NS
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^MobileNetV2_fp32.*x86_64"), 2 * 10**6, ThresholdUnit.VALUE_NS
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^MobileNetV3Small_fp32.*x86_64"),
+        1 * 10**6,
+        ThresholdUnit.VALUE_NS,
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^PersonDetect_int8.*x86_64"), 5 * 10**5, ThresholdUnit.VALUE_NS
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^PoseNet_fp32.*x86_64"), 1 * 10**6, ThresholdUnit.VALUE_NS
+    ),
     # Fluctuating benchmarks on mobile GPUs.
     BenchmarkThreshold(
-        re.compile(r"^MobileBertSquad.*int8.*full-inference.*GPU-Mali"), 10,
-        ThresholdUnit.PERCENTAGE),
+        re.compile(r"^MobileBertSquad.*int8.*full-inference.*GPU-Mali"),
+        10,
+        ThresholdUnit.PERCENTAGE,
+    ),
     BenchmarkThreshold(
-        re.compile(r"^MobileBertSquad.*fp16.*full-inference.*GPU-Mali"), 10,
-        ThresholdUnit.PERCENTAGE),
+        re.compile(r"^MobileBertSquad.*fp16.*full-inference.*GPU-Mali"),
+        10,
+        ThresholdUnit.PERCENTAGE,
+    ),
     BenchmarkThreshold(
-        re.compile(r"^MobileNetV3Small.*full-inference.*GPU-Mali"), 2 * 10**6,
-        ThresholdUnit.VALUE_NS),
-
+        re.compile(r"^MobileNetV3Small.*full-inference.*GPU-Mali"),
+        2 * 10**6,
+        ThresholdUnit.VALUE_NS,
+    ),
     # Benchmarks that complete <= 10ms on GPUs; using percentage is not
     # suitable anymore.
-    BenchmarkThreshold(re.compile(r"^DeepLabV3.*GPU-Mali"), 1 * 10**6,
-                       ThresholdUnit.VALUE_NS),
-    BenchmarkThreshold(re.compile(r"^PersonDetect.*int8.*GPU-Mali"), 2 * 10**5,
-                       ThresholdUnit.VALUE_NS),
-    BenchmarkThreshold(re.compile(r"^EfficientNet.*int8.*GPU-Mali"), 15 * 10**5,
-                       ThresholdUnit.VALUE_NS),
-    BenchmarkThreshold(re.compile(r"^MobileNet.*GPU"), 1 * 10**6,
-                       ThresholdUnit.VALUE_NS),
-
+    BenchmarkThreshold(
+        re.compile(r"^DeepLabV3.*GPU-Mali"), 1 * 10**6, ThresholdUnit.VALUE_NS
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^PersonDetect.*int8.*GPU-Mali"),
+        2 * 10**5,
+        ThresholdUnit.VALUE_NS,
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^EfficientNet.*int8.*GPU-Mali"),
+        15 * 10**5,
+        ThresholdUnit.VALUE_NS,
+    ),
+    BenchmarkThreshold(
+        re.compile(r"^MobileNet.*GPU"), 1 * 10**6, ThresholdUnit.VALUE_NS
+    ),
     # Default threshold for all ARM64/X86_64 benchmarks: 10%.
-    BenchmarkThreshold(re.compile(r".*CPU-ARM.*"), 10,
-                       ThresholdUnit.PERCENTAGE),
+    BenchmarkThreshold(re.compile(r".*CPU-ARM.*"), 10, ThresholdUnit.PERCENTAGE),
     BenchmarkThreshold(re.compile(r".*x86_64.*"), 10, ThresholdUnit.PERCENTAGE),
     # Default threshold for all benchmarks: 5%.
     BenchmarkThreshold(re.compile(r".*"), 5, ThresholdUnit.PERCENTAGE),
diff --git a/build_tools/benchmarks/common/common_arguments.py b/build_tools/benchmarks/common/common_arguments.py
index 258265f..fb38be9 100644
--- a/build_tools/benchmarks/common/common_arguments.py
+++ b/build_tools/benchmarks/common/common_arguments.py
@@ -13,160 +13,189 @@
 
 
 def _check_dir_path(path):
-  path = pathlib.Path(path)
-  if path.is_dir():
-    return path
-  else:
-    raise argparse.ArgumentTypeError(path)
+    path = pathlib.Path(path)
+    if path.is_dir():
+        return path
+    else:
+        raise argparse.ArgumentTypeError(path)
 
 
 def _check_file_path(path):
-  path = pathlib.Path(path)
-  if path.is_file():
-    return path
-  else:
-    raise argparse.ArgumentTypeError(f"'{path}' is not found")
+    path = pathlib.Path(path)
+    if path.is_file():
+        return path
+    else:
+        raise argparse.ArgumentTypeError(f"'{path}' is not found")
 
 
 def _check_exe_path(path):
-  path = pathlib.Path(path)
-  if os.access(path, os.X_OK):
-    return path
-  else:
-    raise argparse.ArgumentTypeError(f"'{path}' is not an executable")
+    path = pathlib.Path(path)
+    if os.access(path, os.X_OK):
+        return path
+    else:
+        raise argparse.ArgumentTypeError(f"'{path}' is not an executable")
 
 
 class Parser(argparse.ArgumentParser):
-  """Argument parser that includes common arguments and does validation."""
+    """Argument parser that includes common arguments and does validation."""
 
-  def __init__(self, *args, **kwargs):
-    super().__init__(*args, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
 
-    self.add_argument("--e2e_test_artifacts_dir",
-                      metavar="<e2e-test-artifacts-dir>",
-                      type=_check_dir_path,
-                      required=True,
-                      help="Path to the IREE e2e test artifacts directory.")
+        self.add_argument(
+            "--e2e_test_artifacts_dir",
+            metavar="<e2e-test-artifacts-dir>",
+            type=_check_dir_path,
+            required=True,
+            help="Path to the IREE e2e test artifacts directory.",
+        )
 
-    self.add_argument(
-        "--normal_benchmark_tool_dir",
-        "--normal-benchmark-tool-dir",
-        type=_check_dir_path,
-        default=None,
-        help="Path to the normal (non-tracing) iree tool directory")
-    self.add_argument("--traced_benchmark_tool_dir",
-                      "--traced-benchmark-tool-dir",
-                      type=_check_dir_path,
-                      default=None,
-                      help="Path to the tracing-enabled iree tool directory")
-    self.add_argument("--trace_capture_tool",
-                      "--trace-capture-tool",
-                      type=_check_exe_path,
-                      default=None,
-                      help="Path to the tool for collecting captured traces")
-    self.add_argument(
-        "--driver-filter-regex",
-        "--driver_filter_regex",
-        type=str,
-        default=None,
-        help="Only run benchmarks matching the given driver regex")
-    self.add_argument(
-        "--model-name-regex",
-        "--model_name_regex",
-        type=str,
-        default=None,
-        help="Only run benchmarks matching the given model name regex")
-    self.add_argument(
-        "--mode-regex",
-        "--mode_regex",
-        type=str,
-        default=None,
-        help="Only run benchmarks matching the given benchmarking mode regex")
-    self.add_argument("--output",
-                      "-o",
-                      default=None,
-                      type=pathlib.Path,
-                      help="Path to the output file")
-    self.add_argument("--capture_tarball",
-                      "--capture-tarball",
-                      default=None,
-                      type=pathlib.Path,
-                      help="Path to the tarball for captures")
-    self.add_argument("--no-clean",
-                      action="store_true",
-                      help="Do not clean up the temporary directory used for "
-                      "benchmarking on the Android device")
-    self.add_argument("--verbose",
-                      action="store_true",
-                      help="Print internal information during execution")
-    self.add_argument(
-        "--pin-cpu-freq",
-        "--pin_cpu_freq",
-        action="store_true",
-        help="Pin CPU frequency for all cores to the maximum. Requires root")
-    self.add_argument("--pin-gpu-freq",
-                      "--pin_gpu_freq",
-                      action="store_true",
-                      help="Pin GPU frequency to the maximum. Requires root")
-    self.add_argument(
-        "--keep_going",
-        "--keep-going",
-        action="store_true",
-        help="Continue running after a failed benchmark. The overall exit status"
-        " will still indicate failure and all errors will be reported at the end."
-    )
-    self.add_argument(
-        "--tmp_dir",
-        "--tmp-dir",
-        "--tmpdir",
-        default=pathlib.Path("/tmp/iree-benchmarks"),
-        type=_check_dir_path,
-        help="Base directory in which to store temporary files. A subdirectory"
-        " with a name matching the git commit hash will be created.")
-    self.add_argument(
-        "--continue_from_previous",
-        "--continue-from-previous",
-        action="store_true",
-        help="Previous benchmark and capture results will be used and not "
-        "rerun if they are found in the benchmark results directory.")
-    self.add_argument(
-        "--benchmark_min_time",
-        "--benchmark-min-time",
-        default=0,
-        type=float,
-        help="If specified, this will be passed as --benchmark_min_time to the"
-        "iree-benchmark-module (minimum number of seconds to repeat running "
-        "for). In that case, no --benchmark_repetitions flag will be passed."
-        " If not specified, a --benchmark_repetitions will be passed "
-        "instead.")
-    self.add_argument(
-        "--compatible_only",
-        "--compatible-only",
-        action="store_true",
-        help="Only run compatible benchmarks based on the detected device "
-        "information")
-    self.add_argument("--execution_benchmark_config",
-                      type=_check_file_path,
-                      required=True,
-                      help="JSON config for the execution benchmarks")
-    self.add_argument("--target_device_name",
-                      type=str,
-                      required=True,
-                      help="Target device in benchmark config to run")
+        self.add_argument(
+            "--normal_benchmark_tool_dir",
+            "--normal-benchmark-tool-dir",
+            type=_check_dir_path,
+            default=None,
+            help="Path to the normal (non-tracing) iree tool directory",
+        )
+        self.add_argument(
+            "--traced_benchmark_tool_dir",
+            "--traced-benchmark-tool-dir",
+            type=_check_dir_path,
+            default=None,
+            help="Path to the tracing-enabled iree tool directory",
+        )
+        self.add_argument(
+            "--trace_capture_tool",
+            "--trace-capture-tool",
+            type=_check_exe_path,
+            default=None,
+            help="Path to the tool for collecting captured traces",
+        )
+        self.add_argument(
+            "--driver-filter-regex",
+            "--driver_filter_regex",
+            type=str,
+            default=None,
+            help="Only run benchmarks matching the given driver regex",
+        )
+        self.add_argument(
+            "--model-name-regex",
+            "--model_name_regex",
+            type=str,
+            default=None,
+            help="Only run benchmarks matching the given model name regex",
+        )
+        self.add_argument(
+            "--mode-regex",
+            "--mode_regex",
+            type=str,
+            default=None,
+            help="Only run benchmarks matching the given benchmarking mode regex",
+        )
+        self.add_argument(
+            "--output",
+            "-o",
+            default=None,
+            type=pathlib.Path,
+            help="Path to the output file",
+        )
+        self.add_argument(
+            "--capture_tarball",
+            "--capture-tarball",
+            default=None,
+            type=pathlib.Path,
+            help="Path to the tarball for captures",
+        )
+        self.add_argument(
+            "--no-clean",
+            action="store_true",
+            help="Do not clean up the temporary directory used for "
+            "benchmarking on the Android device",
+        )
+        self.add_argument(
+            "--verbose",
+            action="store_true",
+            help="Print internal information during execution",
+        )
+        self.add_argument(
+            "--pin-cpu-freq",
+            "--pin_cpu_freq",
+            action="store_true",
+            help="Pin CPU frequency for all cores to the maximum. Requires root",
+        )
+        self.add_argument(
+            "--pin-gpu-freq",
+            "--pin_gpu_freq",
+            action="store_true",
+            help="Pin GPU frequency to the maximum. Requires root",
+        )
+        self.add_argument(
+            "--keep_going",
+            "--keep-going",
+            action="store_true",
+            help="Continue running after a failed benchmark. The overall exit status"
+            " will still indicate failure and all errors will be reported at the end.",
+        )
+        self.add_argument(
+            "--tmp_dir",
+            "--tmp-dir",
+            "--tmpdir",
+            default=pathlib.Path("/tmp/iree-benchmarks"),
+            type=_check_dir_path,
+            help="Base directory in which to store temporary files. A subdirectory"
+            " with a name matching the git commit hash will be created.",
+        )
+        self.add_argument(
+            "--continue_from_previous",
+            "--continue-from-previous",
+            action="store_true",
+            help="Previous benchmark and capture results will be used and not "
+            "rerun if they are found in the benchmark results directory.",
+        )
+        self.add_argument(
+            "--benchmark_min_time",
+            "--benchmark-min-time",
+            default=0,
+            type=float,
+            help="If specified, this will be passed as --benchmark_min_time to the"
+            "iree-benchmark-module (minimum number of seconds to repeat running "
+            "for). In that case, no --benchmark_repetitions flag will be passed."
+            " If not specified, a --benchmark_repetitions will be passed "
+            "instead.",
+        )
+        self.add_argument(
+            "--compatible_only",
+            "--compatible-only",
+            action="store_true",
+            help="Only run compatible benchmarks based on the detected device "
+            "information",
+        )
+        self.add_argument(
+            "--execution_benchmark_config",
+            type=_check_file_path,
+            required=True,
+            help="JSON config for the execution benchmarks",
+        )
+        self.add_argument(
+            "--target_device_name",
+            type=str,
+            required=True,
+            help="Target device in benchmark config to run",
+        )
 
 
 def expand_and_check_file_paths(paths: Sequence[str]) -> List[pathlib.Path]:
-  """Expands the wildcards in the paths and check if they are files.
+    """Expands the wildcards in the paths and check if they are files.
     Returns:
       List of expanded paths.
-  """
+    """
 
-  expanded_paths = []
-  for path in paths:
-    expanded_paths += [pathlib.Path(path) for path in glob.glob(path)]
+    expanded_paths = []
+    for path in paths:
+        expanded_paths += [pathlib.Path(path) for path in glob.glob(path)]
 
-  for path in expanded_paths:
-    if not path.is_file():
-      raise ValueError(f"{path} is not a file.")
+    for path in expanded_paths:
+        if not path.is_file():
+            raise ValueError(f"{path} is not a file.")
 
-  return expanded_paths
+    return expanded_paths
diff --git a/build_tools/benchmarks/common/common_arguments_test.py b/build_tools/benchmarks/common/common_arguments_test.py
index 1469261..714a8b9 100644
--- a/build_tools/benchmarks/common/common_arguments_test.py
+++ b/build_tools/benchmarks/common/common_arguments_test.py
@@ -13,65 +13,72 @@
 
 
 class CommonArgumentsTest(unittest.TestCase):
+    def setUp(self):
+        self._build_dir_manager = tempfile.TemporaryDirectory()
+        self.build_dir = pathlib.Path(self._build_dir_manager.name).resolve()
+        self.e2e_test_artifacts_dir = self.build_dir / "e2e_test_artifacts"
+        self.e2e_test_artifacts_dir.mkdir()
+        self.normal_tool_dir = self.build_dir / "normal_tool"
+        self.normal_tool_dir.mkdir()
+        self.traced_tool_dir = self.build_dir / "traced_tool"
+        self.traced_tool_dir.mkdir()
+        self.trace_capture_tool = self.build_dir / "tracy_capture"
+        # Create capture tool with executable file mode.
+        self.trace_capture_tool.touch(mode=0o755)
+        self.execution_config = self.build_dir / "execution_config.json"
+        self.execution_config.touch()
 
-  def setUp(self):
-    self._build_dir_manager = tempfile.TemporaryDirectory()
-    self.build_dir = pathlib.Path(self._build_dir_manager.name).resolve()
-    self.e2e_test_artifacts_dir = self.build_dir / "e2e_test_artifacts"
-    self.e2e_test_artifacts_dir.mkdir()
-    self.normal_tool_dir = self.build_dir / "normal_tool"
-    self.normal_tool_dir.mkdir()
-    self.traced_tool_dir = self.build_dir / "traced_tool"
-    self.traced_tool_dir.mkdir()
-    self.trace_capture_tool = self.build_dir / "tracy_capture"
-    # Create capture tool with executable file mode.
-    self.trace_capture_tool.touch(mode=0o755)
-    self.execution_config = self.build_dir / "execution_config.json"
-    self.execution_config.touch()
+    def tearDown(self):
+        self._build_dir_manager.cleanup()
 
-  def tearDown(self):
-    self._build_dir_manager.cleanup()
+    def test_parser(self):
+        common.common_arguments.Parser().parse_args(
+            [
+                f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
+                f"--traced_benchmark_tool_dir={self.traced_tool_dir}",
+                f"--trace_capture_tool={self.trace_capture_tool}",
+                f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
+                f"--execution_benchmark_config={self.execution_config}",
+                "--target_device=test",
+            ]
+        )
 
-  def test_parser(self):
-    common.common_arguments.Parser().parse_args([
-        f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
-        f"--traced_benchmark_tool_dir={self.traced_tool_dir}",
-        f"--trace_capture_tool={self.trace_capture_tool}",
-        f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
-        f"--execution_benchmark_config={self.execution_config}",
-        "--target_device=test",
-    ])
+    def test_parser_check_normal_benchmark_tool(self):
+        arg_parser = common.common_arguments.Parser()
+        with self.assertRaises(SystemExit):
+            arg_parser.parse_args(
+                [
+                    "--normal_benchmark_tool_dir=nonexistent",
+                    f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
+                    f"--execution_benchmark_config={self.execution_config}",
+                    "--target_device=test",
+                ]
+            )
 
-  def test_parser_check_normal_benchmark_tool(self):
-    arg_parser = common.common_arguments.Parser()
-    with self.assertRaises(SystemExit):
-      arg_parser.parse_args([
-          "--normal_benchmark_tool_dir=nonexistent",
-          f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
-          f"--execution_benchmark_config={self.execution_config}",
-          "--target_device=test",
-      ])
+    def test_parser_check_traced_benchmark_tool(self):
+        arg_parser = common.common_arguments.Parser()
+        with self.assertRaises(SystemExit):
+            arg_parser.parse_args(
+                [
+                    "--traced_benchmark_tool_dir=nonexistent",
+                    f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
+                    f"--execution_benchmark_config={self.execution_config}",
+                    "--target_device=test",
+                ]
+            )
 
-  def test_parser_check_traced_benchmark_tool(self):
-    arg_parser = common.common_arguments.Parser()
-    with self.assertRaises(SystemExit):
-      arg_parser.parse_args([
-          "--traced_benchmark_tool_dir=nonexistent",
-          f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
-          f"--execution_benchmark_config={self.execution_config}",
-          "--target_device=test",
-      ])
-
-  def test_parser_check_trace_capture_tool(self):
-    arg_parser = common.common_arguments.Parser()
-    with self.assertRaises(SystemExit):
-      arg_parser.parse_args([
-          "--trace_capture_tool=nonexistent",
-          f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
-          f"--execution_benchmark_config={self.execution_config}",
-          "--target_device=test",
-      ])
+    def test_parser_check_trace_capture_tool(self):
+        arg_parser = common.common_arguments.Parser()
+        with self.assertRaises(SystemExit):
+            arg_parser.parse_args(
+                [
+                    "--trace_capture_tool=nonexistent",
+                    f"--e2e_test_artifacts_dir={self.e2e_test_artifacts_dir}",
+                    f"--execution_benchmark_config={self.execution_config}",
+                    "--target_device=test",
+                ]
+            )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/benchmarks/common/linux_device_utils.py b/build_tools/benchmarks/common/linux_device_utils.py
index 9285782..e72b576 100644
--- a/build_tools/benchmarks/common/linux_device_utils.py
+++ b/build_tools/benchmarks/common/linux_device_utils.py
@@ -9,58 +9,65 @@
 import re
 from typing import Optional, Sequence
 
-from .benchmark_definition import (execute_cmd_and_get_stdout, DeviceInfo,
-                                   PlatformType)
+from .benchmark_definition import execute_cmd_and_get_stdout, DeviceInfo, PlatformType
 
 
 def _get_lscpu_field(lscpu_output: str, field_name: str) -> str:
-  (value,) = re.findall(f"^{field_name}:\s*(.+)", lscpu_output, re.MULTILINE)
-  return value
+    (value,) = re.findall(f"^{field_name}:\s*(.+)", lscpu_output, re.MULTILINE)
+    return value
 
 
 def get_linux_cpu_arch(lscpu_output: str) -> str:
-  """Returns CPU Architecture, e.g., 'x86_64'."""
-  return _get_lscpu_field(lscpu_output, "Architecture")
+    """Returns CPU Architecture, e.g., 'x86_64'."""
+    return _get_lscpu_field(lscpu_output, "Architecture")
 
 
 def get_linux_cpu_features(lscpu_output: str) -> Sequence[str]:
-  """Returns CPU feature lists, e.g., ['mmx', 'fxsr', 'sse', 'sse2']."""
-  return _get_lscpu_field(lscpu_output, "Flags").split(" ")
+    """Returns CPU feature lists, e.g., ['mmx', 'fxsr', 'sse', 'sse2']."""
+    return _get_lscpu_field(lscpu_output, "Flags").split(" ")
 
 
 def canonicalize_gpu_name(gpu_name: str) -> str:
-  # Replace all consecutive non-word characters with a single hyphen.
-  return re.sub(r"\W+", "-", gpu_name)
+    # Replace all consecutive non-word characters with a single hyphen.
+    return re.sub(r"\W+", "-", gpu_name)
 
 
-def get_linux_device_info(device_model: str = "Unknown",
-                          cpu_uarch: Optional[str] = None,
-                          gpu_id: str = "0",
-                          verbose: bool = False) -> DeviceInfo:
-  """Returns device info for the Linux device.
+def get_linux_device_info(
+    device_model: str = "Unknown",
+    cpu_uarch: Optional[str] = None,
+    gpu_id: str = "0",
+    verbose: bool = False,
+) -> DeviceInfo:
+    """Returns device info for the Linux device.
 
     Args:
     - device_model: the device model name, e.g., 'ThinkStation P520'
     - cpu_uarch: the CPU microarchitecture, e.g., 'CascadeLake'
     - gpu_id: the target GPU ID, e.g., '0' or 'GPU-<UUID>'
-  """
-  lscpu_output = execute_cmd_and_get_stdout(["lscpu"], verbose)
+    """
+    lscpu_output = execute_cmd_and_get_stdout(["lscpu"], verbose)
 
-  try:
-    gpu_name = execute_cmd_and_get_stdout([
-        "nvidia-smi", "--query-gpu=name", "--format=csv,noheader",
-        f"--id={gpu_id}"
-    ], verbose)
-  except FileNotFoundError:
-    # Set GPU name to Unknown if the tool "nvidia-smi" doesn't exist.
-    gpu_name = "Unknown"
+    try:
+        gpu_name = execute_cmd_and_get_stdout(
+            [
+                "nvidia-smi",
+                "--query-gpu=name",
+                "--format=csv,noheader",
+                f"--id={gpu_id}",
+            ],
+            verbose,
+        )
+    except FileNotFoundError:
+        # Set GPU name to Unknown if the tool "nvidia-smi" doesn't exist.
+        gpu_name = "Unknown"
 
-  return DeviceInfo(
-      PlatformType.LINUX,
-      # Includes CPU model as it is the key factor of the device performance.
-      model=device_model,
-      # Currently we only have x86, so CPU ABI = CPU arch.
-      cpu_abi=get_linux_cpu_arch(lscpu_output),
-      cpu_uarch=cpu_uarch,
-      cpu_features=get_linux_cpu_features(lscpu_output),
-      gpu_name=canonicalize_gpu_name(gpu_name))
+    return DeviceInfo(
+        PlatformType.LINUX,
+        # Includes CPU model as it is the key factor of the device performance.
+        model=device_model,
+        # Currently we only have x86, so CPU ABI = CPU arch.
+        cpu_abi=get_linux_cpu_arch(lscpu_output),
+        cpu_uarch=cpu_uarch,
+        cpu_features=get_linux_cpu_features(lscpu_output),
+        gpu_name=canonicalize_gpu_name(gpu_name),
+    )
diff --git a/build_tools/benchmarks/common/linux_device_utils_test.py b/build_tools/benchmarks/common/linux_device_utils_test.py
index 80ec58d..60e76c2 100644
--- a/build_tools/benchmarks/common/linux_device_utils_test.py
+++ b/build_tools/benchmarks/common/linux_device_utils_test.py
@@ -10,26 +10,33 @@
 from unittest import mock
 
 from common.benchmark_definition import DeviceInfo, PlatformType
-from common.linux_device_utils import canonicalize_gpu_name, get_linux_cpu_arch, get_linux_cpu_features
+from common.linux_device_utils import (
+    canonicalize_gpu_name,
+    get_linux_cpu_arch,
+    get_linux_cpu_features,
+)
 
-LSCPU_OUTPUT = ("Architecture:                    x86_64\n"
-                "Vendor ID:                       AuthenticAMD\n"
-                "Flags:                           fpu vme de pse tsc\n")
+LSCPU_OUTPUT = (
+    "Architecture:                    x86_64\n"
+    "Vendor ID:                       AuthenticAMD\n"
+    "Flags:                           fpu vme de pse tsc\n"
+)
 
 
 class LinuxDeviceUtilsTest(unittest.TestCase):
+    def test_get_linux_cpu_arch(self):
+        self.assertEqual(get_linux_cpu_arch(LSCPU_OUTPUT), "x86_64")
 
-  def test_get_linux_cpu_arch(self):
-    self.assertEqual(get_linux_cpu_arch(LSCPU_OUTPUT), "x86_64")
+    def test_get_linux_cpu_features(self):
+        self.assertEqual(
+            get_linux_cpu_features(LSCPU_OUTPUT), ["fpu", "vme", "de", "pse", "tsc"]
+        )
 
-  def test_get_linux_cpu_features(self):
-    self.assertEqual(get_linux_cpu_features(LSCPU_OUTPUT),
-                     ["fpu", "vme", "de", "pse", "tsc"])
-
-  def test_canonicalize_gpu_name(self):
-    self.assertEqual(canonicalize_gpu_name("Tesla  V100-SXM2-16GB"),
-                     "Tesla-V100-SXM2-16GB")
+    def test_canonicalize_gpu_name(self):
+        self.assertEqual(
+            canonicalize_gpu_name("Tesla  V100-SXM2-16GB"), "Tesla-V100-SXM2-16GB"
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/benchmarks/comparisons/common/benchmark_command.py b/build_tools/benchmarks/comparisons/common/benchmark_command.py
index bf3b78f..a49cc69 100644
--- a/build_tools/benchmarks/comparisons/common/benchmark_command.py
+++ b/build_tools/benchmarks/comparisons/common/benchmark_command.py
@@ -11,125 +11,129 @@
 
 
 class BenchmarkCommand(abc.ABC):
-  """Abstracts a benchmark command."""
+    """Abstracts a benchmark command."""
 
-  def __init__(self,
-               benchmark_binary: str,
-               model_name: str,
-               num_threads: int,
-               num_runs: int,
-               driver: Optional[str] = None,
-               taskset: Optional[str] = None):
-    self.benchmark_binary = benchmark_binary
-    self.model_name = model_name
-    self.taskset = taskset
-    self.num_threads = num_threads
-    self.num_runs = num_runs
-    self.driver = driver
-    self.args = []
+    def __init__(
+        self,
+        benchmark_binary: str,
+        model_name: str,
+        num_threads: int,
+        num_runs: int,
+        driver: Optional[str] = None,
+        taskset: Optional[str] = None,
+    ):
+        self.benchmark_binary = benchmark_binary
+        self.model_name = model_name
+        self.taskset = taskset
+        self.num_threads = num_threads
+        self.num_runs = num_runs
+        self.driver = driver
+        self.args = []
 
-  @property
-  @abc.abstractmethod
-  def runtime(self):
-    pass
+    @property
+    @abc.abstractmethod
+    def runtime(self):
+        pass
 
-  @abc.abstractmethod
-  def parse_latency_from_output(self, output: str) -> float:
-    pass
+    @abc.abstractmethod
+    def parse_latency_from_output(self, output: str) -> float:
+        pass
 
-  def generate_benchmark_command(self) -> list[str]:
-    """Returns a list of strings that correspond to the command to be run."""
-    command = []
-    if self.taskset:
-      command.append("taskset")
-      command.append(str(self.taskset))
-    command.append(self.benchmark_binary)
-    command.extend(self.args)
-    return command
+    def generate_benchmark_command(self) -> list[str]:
+        """Returns a list of strings that correspond to the command to be run."""
+        command = []
+        if self.taskset:
+            command.append("taskset")
+            command.append(str(self.taskset))
+        command.append(self.benchmark_binary)
+        command.extend(self.args)
+        return command
 
 
 class TFLiteBenchmarkCommand(BenchmarkCommand):
-  """Represents a TFLite benchmark command."""
+    """Represents a TFLite benchmark command."""
 
-  def __init__(self,
-               benchmark_binary: str,
-               model_name: str,
-               model_path: str,
-               num_threads: int,
-               num_runs: int,
-               taskset: Optional[str] = None):
-    super().__init__(benchmark_binary,
-                     model_name,
-                     num_threads,
-                     num_runs,
-                     taskset=taskset)
-    self.args.append("--graph=" + model_path)
-    self._latency_large_regex = re.compile(
-        r".*?Inference \(avg\): (\d+.?\d*e\+?\d*).*")
-    self._latency_regex = re.compile(r".*?Inference \(avg\): (\d+).*")
+    def __init__(
+        self,
+        benchmark_binary: str,
+        model_name: str,
+        model_path: str,
+        num_threads: int,
+        num_runs: int,
+        taskset: Optional[str] = None,
+    ):
+        super().__init__(
+            benchmark_binary, model_name, num_threads, num_runs, taskset=taskset
+        )
+        self.args.append("--graph=" + model_path)
+        self._latency_large_regex = re.compile(
+            r".*?Inference \(avg\): (\d+.?\d*e\+?\d*).*"
+        )
+        self._latency_regex = re.compile(r".*?Inference \(avg\): (\d+).*")
 
-  @property
-  def runtime(self):
-    return "tflite"
+    @property
+    def runtime(self):
+        return "tflite"
 
-  def parse_latency_from_output(self, output: str) -> float:
-    # First match whether a large number has been recorded e.g. 1.18859e+06.
-    matches = self._latency_large_regex.search(output)
-    if not matches:
-      # Otherwise, regular number e.g. 71495.6.
-      matches = self._latency_regex.search(output)
+    def parse_latency_from_output(self, output: str) -> float:
+        # First match whether a large number has been recorded e.g. 1.18859e+06.
+        matches = self._latency_large_regex.search(output)
+        if not matches:
+            # Otherwise, regular number e.g. 71495.6.
+            matches = self._latency_regex.search(output)
 
-    latency_ms = 0
-    if matches:
-      latency_ms = float(matches.group(1)) / 1000
-    else:
-      print("Warning! Could not parse latency. Defaulting to 0ms.")
-    return latency_ms
+        latency_ms = 0
+        if matches:
+            latency_ms = float(matches.group(1)) / 1000
+        else:
+            print("Warning! Could not parse latency. Defaulting to 0ms.")
+        return latency_ms
 
-  def generate_benchmark_command(self) -> list[str]:
-    command = super().generate_benchmark_command()
-    if self.driver == "gpu":
-      command.append("--use_gpu=true")
-    command.append("--num_threads=" + str(self.num_threads))
-    command.append("--num_runs=" + str(self.num_runs))
-    return command
+    def generate_benchmark_command(self) -> list[str]:
+        command = super().generate_benchmark_command()
+        if self.driver == "gpu":
+            command.append("--use_gpu=true")
+        command.append("--num_threads=" + str(self.num_threads))
+        command.append("--num_runs=" + str(self.num_runs))
+        return command
 
 
 class IreeBenchmarkCommand(BenchmarkCommand):
-  """Represents an IREE benchmark command."""
+    """Represents an IREE benchmark command."""
 
-  def __init__(self,
-               benchmark_binary: str,
-               model_name: str,
-               model_path: str,
-               num_threads: int,
-               num_runs: int,
-               taskset: Optional[str] = None):
-    super().__init__(benchmark_binary,
-                     model_name,
-                     num_threads,
-                     num_runs,
-                     taskset=taskset)
-    self.args.append("--module=" + model_path)
-    self._latency_regex = re.compile(
-        r".*?BM_main/process_time/real_time_mean\s+(.*?) ms.*")
+    def __init__(
+        self,
+        benchmark_binary: str,
+        model_name: str,
+        model_path: str,
+        num_threads: int,
+        num_runs: int,
+        taskset: Optional[str] = None,
+    ):
+        super().__init__(
+            benchmark_binary, model_name, num_threads, num_runs, taskset=taskset
+        )
+        self.args.append("--module=" + model_path)
+        self._latency_regex = re.compile(
+            r".*?BM_main/process_time/real_time_mean\s+(.*?) ms.*"
+        )
 
-  @property
-  def runtime(self):
-    return "iree"
+    @property
+    def runtime(self):
+        return "iree"
 
-  def parse_latency_from_output(self, output: str) -> float:
-    matches = self._latency_regex.search(output)
-    latency_ms = 0
-    if matches:
-      latency_ms = float(matches.group(1))
-    else:
-      print("Warning! Could not parse latency. Defaulting to 0ms.")
-    return latency_ms
+    def parse_latency_from_output(self, output: str) -> float:
+        matches = self._latency_regex.search(output)
+        latency_ms = 0
+        if matches:
+            latency_ms = float(matches.group(1))
+        else:
+            print("Warning! Could not parse latency. Defaulting to 0ms.")
+        return latency_ms
 
-  def generate_benchmark_command(self) -> list[str]:
-    command = super().generate_benchmark_command()
-    command.append("--device=" + self.driver)
-    command.append("--task_topology_max_group_count=" + str(self.num_threads))
-    command.append("--benchmark_repetitions=" + str(self.num_runs))
-    return command
+    def generate_benchmark_command(self) -> list[str]:
+        command = super().generate_benchmark_command()
+        command.append("--device=" + self.driver)
+        command.append("--task_topology_max_group_count=" + str(self.num_threads))
+        command.append("--benchmark_repetitions=" + str(self.num_runs))
+        return command
diff --git a/build_tools/benchmarks/comparisons/common/benchmark_command_factory.py b/build_tools/benchmarks/comparisons/common/benchmark_command_factory.py
index f417d47..d76ab5d 100644
--- a/build_tools/benchmarks/comparisons/common/benchmark_command_factory.py
+++ b/build_tools/benchmarks/comparisons/common/benchmark_command_factory.py
@@ -10,15 +10,16 @@
 
 
 class BenchmarkCommandFactory(abc.ABC):
-  """ An abstract factory that generates commands depending on config.
-  Args:
-    device: Currently 'desktop' or 'mobile' are supported.
-    driver: Currently 'cpu' or 'gpu' are supported.
-  Returns:
-    An array containing `BenchmarkCommand` objects.
-  """
+    """An abstract factory that generates commands depending on config.
+    Args:
+      device: Currently 'desktop' or 'mobile' are supported.
+      driver: Currently 'cpu' or 'gpu' are supported.
+    Returns:
+      An array containing `BenchmarkCommand` objects.
+    """
 
-  @abc.abstractmethod
-  def generate_benchmark_commands(self, device: str,
-                                  driver: str) -> list[BenchmarkCommand]:
-    pass
+    @abc.abstractmethod
+    def generate_benchmark_commands(
+        self, device: str, driver: str
+    ) -> list[BenchmarkCommand]:
+        pass
diff --git a/build_tools/benchmarks/comparisons/common/benchmark_runner.py b/build_tools/benchmarks/comparisons/common/benchmark_runner.py
index a41f326..3bc5db3 100644
--- a/build_tools/benchmarks/comparisons/common/benchmark_runner.py
+++ b/build_tools/benchmarks/comparisons/common/benchmark_runner.py
@@ -17,48 +17,51 @@
 
 
 def run_command(benchmark_command: BenchmarkCommand) -> list[float]:
-  """Runs `benchmark_command` and polls for memory consumption statistics.
-  Args:
-    benchmark_command: A `BenchmarkCommand` object containing information on how to run the benchmark and parse the output.
-  Returns:
-    An array containing values for [`latency`, `vmhwm`, `vmrss`, `rssfile`]
-  """
-  command = benchmark_command.generate_benchmark_command()
-  print("\n\nRunning command:\n" + " ".join(command))
-  benchmark_process = subprocess.Popen(command,
-                                       stdout=subprocess.PIPE,
-                                       stderr=subprocess.STDOUT)
+    """Runs `benchmark_command` and polls for memory consumption statistics.
+    Args:
+      benchmark_command: A `BenchmarkCommand` object containing information on how to run the benchmark and parse the output.
+    Returns:
+      An array containing values for [`latency`, `vmhwm`, `vmrss`, `rssfile`]
+    """
+    command = benchmark_command.generate_benchmark_command()
+    print("\n\nRunning command:\n" + " ".join(command))
+    benchmark_process = subprocess.Popen(
+        command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+    )
 
-  # Keep a record of the highest VmHWM corresponding VmRSS and RssFile values.
-  vmhwm = 0
-  vmrss = 0
-  rssfile = 0
-  while benchmark_process.poll() is None:
-    pid_status = subprocess.run(
-        ["cat", "/proc/" + str(benchmark_process.pid) + "/status"],
-        capture_output=True)
-    output = pid_status.stdout.decode()
-    vmhwm_matches = _VMHWM_REGEX.search(output)
-    vmrss_matches = _VMRSS_REGEX.search(output)
-    rssfile_matches = _RSSFILE_REGEX.search(output)
+    # Keep a record of the highest VmHWM corresponding VmRSS and RssFile values.
+    vmhwm = 0
+    vmrss = 0
+    rssfile = 0
+    while benchmark_process.poll() is None:
+        pid_status = subprocess.run(
+            ["cat", "/proc/" + str(benchmark_process.pid) + "/status"],
+            capture_output=True,
+        )
+        output = pid_status.stdout.decode()
+        vmhwm_matches = _VMHWM_REGEX.search(output)
+        vmrss_matches = _VMRSS_REGEX.search(output)
+        rssfile_matches = _RSSFILE_REGEX.search(output)
 
-    if vmhwm_matches and vmrss_matches and rssfile_matches:
-      curr_vmhwm = float(vmhwm_matches.group(1))
-      if curr_vmhwm > vmhwm:
-        vmhwm = curr_vmhwm
-        vmrss = float(vmrss_matches.group(1))
-        rssfile = float(rssfile_matches.group(1))
+        if vmhwm_matches and vmrss_matches and rssfile_matches:
+            curr_vmhwm = float(vmhwm_matches.group(1))
+            if curr_vmhwm > vmhwm:
+                vmhwm = curr_vmhwm
+                vmrss = float(vmrss_matches.group(1))
+                rssfile = float(rssfile_matches.group(1))
 
-    time.sleep(0.5)
+        time.sleep(0.5)
 
-  stdout_data, _ = benchmark_process.communicate()
+    stdout_data, _ = benchmark_process.communicate()
 
-  if benchmark_process.returncode != 0:
-    print(f"Warning! Benchmark command failed with return code:"
-          f" {benchmark_process.returncode}")
-    return [0, 0, 0, 0]
-  else:
-    print(stdout_data.decode())
+    if benchmark_process.returncode != 0:
+        print(
+            f"Warning! Benchmark command failed with return code:"
+            f" {benchmark_process.returncode}"
+        )
+        return [0, 0, 0, 0]
+    else:
+        print(stdout_data.decode())
 
-  latency_ms = benchmark_command.parse_latency_from_output(stdout_data.decode())
-  return [latency_ms, vmhwm, vmrss, rssfile]
+    latency_ms = benchmark_command.parse_latency_from_output(stdout_data.decode())
+    return [latency_ms, vmhwm, vmrss, rssfile]
diff --git a/build_tools/benchmarks/comparisons/common/utils.py b/build_tools/benchmarks/comparisons/common/utils.py
index 84b1f6f..43f92fe 100644
--- a/build_tools/benchmarks/comparisons/common/utils.py
+++ b/build_tools/benchmarks/comparisons/common/utils.py
@@ -6,8 +6,8 @@
 
 
 def write_benchmark_result(result: list[str], save_path: str):
-  """Writes an array to file as a comma-separated line."""
-  results_array = [str(i) for i in result]
-  print("Writing " + str(results_array))
-  with open(save_path, "a") as f:
-    f.write(",".join(results_array) + "\n")
+    """Writes an array to file as a comma-separated line."""
+    results_array = [str(i) for i in result]
+    print("Writing " + str(results_array))
+    with open(save_path, "a") as f:
+        f.write(",".join(results_array) + "\n")
diff --git a/build_tools/benchmarks/comparisons/mobilebert_fp32_commands.py b/build_tools/benchmarks/comparisons/mobilebert_fp32_commands.py
index f8682a2..b605e1a 100644
--- a/build_tools/benchmarks/comparisons/mobilebert_fp32_commands.py
+++ b/build_tools/benchmarks/comparisons/mobilebert_fp32_commands.py
@@ -15,190 +15,232 @@
 
 
 class TfliteMobilebertFP32(TFLiteBenchmarkCommand):
-  """ Specializes the benchmark command to use TFLite. """
+    """Specializes the benchmark command to use TFLite."""
 
-  def __init__(self,
-               benchmark_binary: str,
-               model_name: str,
-               model_path: str,
-               test_data_dir: str,
-               driver: str = "cpu",
-               num_threads: int = _DEFAULT_NUM_THREADS,
-               num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
-               taskset: Optional[str] = None):
-    super().__init__(benchmark_binary,
-                     model_name,
-                     model_path,
-                     num_threads,
-                     num_runs,
-                     taskset=taskset)
-    self.driver = driver
-    self.args.append("--input_layer=input_ids,input_mask,segment_ids")
-    self.args.append("--input_layer_value_files=input_ids:" + test_data_dir +
-                     "/input_word_id.bin,input_mask:" + test_data_dir +
-                     "/input_mask.bin,segment_ids:" + test_data_dir +
-                     "/input_type_id.bin")
-    self.args.append("--input_layer_shape=1,384:1,384:1,384")
+    def __init__(
+        self,
+        benchmark_binary: str,
+        model_name: str,
+        model_path: str,
+        test_data_dir: str,
+        driver: str = "cpu",
+        num_threads: int = _DEFAULT_NUM_THREADS,
+        num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
+        taskset: Optional[str] = None,
+    ):
+        super().__init__(
+            benchmark_binary,
+            model_name,
+            model_path,
+            num_threads,
+            num_runs,
+            taskset=taskset,
+        )
+        self.driver = driver
+        self.args.append("--input_layer=input_ids,input_mask,segment_ids")
+        self.args.append(
+            "--input_layer_value_files=input_ids:"
+            + test_data_dir
+            + "/input_word_id.bin,input_mask:"
+            + test_data_dir
+            + "/input_mask.bin,segment_ids:"
+            + test_data_dir
+            + "/input_type_id.bin"
+        )
+        self.args.append("--input_layer_shape=1,384:1,384:1,384")
 
 
 class IreeMobilebertFP32(IreeBenchmarkCommand):
-  """ Specializes the benchmark command to use IREE. """
+    """Specializes the benchmark command to use IREE."""
 
-  def __init__(self,
-               benchmark_binary: str,
-               model_name: str,
-               model_path: str,
-               driver: str = "local-task",
-               num_threads: int = _DEFAULT_NUM_THREADS,
-               num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
-               taskset: Optional[str] = None):
-    super().__init__(benchmark_binary,
-                     model_name,
-                     model_path,
-                     num_threads,
-                     num_runs,
-                     taskset=taskset)
-    self.driver = driver
-    self.args.append("--function=main")
-    self.args.append(
-        '--input="1x384xi32=101 2129 2116 19576 2015 2106 3854 4679 2486 1029 102 1996 14169 2165 2019 2220 2599 1999 3565 4605 2753 1998 2196 11145 1012 8446 2001 3132 2011 7573 1005 1055 3639 1010 2029 14159 2032 2698 2335 1998 3140 2032 2046 2093 20991 2015 1010 2164 1037 19576 2029 2027 6757 2005 1037 7921 1012 7573 15674 3854 4679 2001 2315 3565 4605 12041 1010 3405 2274 3948 10455 1010 1016 13714 14918 1010 1998 2048 3140 19576 2015 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
-    )
-    self.args.append(
-        '--input="1x384xi32=0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
-    )
-    self.args.append(
-        '--input="1x384xi32=1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
-    )
+    def __init__(
+        self,
+        benchmark_binary: str,
+        model_name: str,
+        model_path: str,
+        driver: str = "local-task",
+        num_threads: int = _DEFAULT_NUM_THREADS,
+        num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
+        taskset: Optional[str] = None,
+    ):
+        super().__init__(
+            benchmark_binary,
+            model_name,
+            model_path,
+            num_threads,
+            num_runs,
+            taskset=taskset,
+        )
+        self.driver = driver
+        self.args.append("--function=main")
+        self.args.append(
+            '--input="1x384xi32=101 2129 2116 19576 2015 2106 3854 4679 2486 1029 102 1996 14169 2165 2019 2220 2599 1999 3565 4605 2753 1998 2196 11145 1012 8446 2001 3132 2011 7573 1005 1055 3639 1010 2029 14159 2032 2698 2335 1998 3140 2032 2046 2093 20991 2015 1010 2164 1037 19576 2029 2027 6757 2005 1037 7921 1012 7573 15674 3854 4679 2001 2315 3565 4605 12041 1010 3405 2274 3948 10455 1010 1016 13714 14918 1010 1998 2048 3140 19576 2015 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
+        )
+        self.args.append(
+            '--input="1x384xi32=0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
+        )
+        self.args.append(
+            '--input="1x384xi32=1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
+        )
 
 
 class MobilebertFP32CommandFactory(BenchmarkCommandFactory):
-  """ Generates `BenchmarkCommand` objects specific to running MobileBert."""
+    """Generates `BenchmarkCommand` objects specific to running MobileBert."""
 
-  def __init__(self, base_dir: str, model_name: str):
-    self._model_name = model_name
-    self._base_dir = base_dir
-    self._iree_benchmark_binary_path = os.path.join(base_dir,
-                                                    "iree-benchmark-module")
-    self._tflite_benchmark_binary_path = os.path.join(base_dir,
-                                                      "benchmark_model")
-    self._tflite_model_path = os.path.join(self._base_dir, "models", "tflite",
-                                           self._model_name + ".tflite")
-    self._tflite_test_data_dir = os.path.join(self._base_dir, "test_data",
-                                              "squad")
+    def __init__(self, base_dir: str, model_name: str):
+        self._model_name = model_name
+        self._base_dir = base_dir
+        self._iree_benchmark_binary_path = os.path.join(
+            base_dir, "iree-benchmark-module"
+        )
+        self._tflite_benchmark_binary_path = os.path.join(base_dir, "benchmark_model")
+        self._tflite_model_path = os.path.join(
+            self._base_dir, "models", "tflite", self._model_name + ".tflite"
+        )
+        self._tflite_test_data_dir = os.path.join(self._base_dir, "test_data", "squad")
 
-  def generate_benchmark_commands(self, device: str,
-                                  driver: str) -> list[BenchmarkCommand]:
-    if device == "desktop" and driver == "cpu":
-      return self._generate_cpu(device)
-    elif device == "desktop" and driver == "gpu":
-      return self._generate_gpu("cuda")
-    elif device == "mobile" and driver == "cpu":
-      return self._generate_cpu(device)
-    elif device == "mobile" and driver == "gpu":
-      return self._generate_gpu("vulkan")
-    else:
-      print("Warning! Not a valid configuration.")
-      return []
+    def generate_benchmark_commands(
+        self, device: str, driver: str
+    ) -> list[BenchmarkCommand]:
+        if device == "desktop" and driver == "cpu":
+            return self._generate_cpu(device)
+        elif device == "desktop" and driver == "gpu":
+            return self._generate_gpu("cuda")
+        elif device == "mobile" and driver == "cpu":
+            return self._generate_cpu(device)
+        elif device == "mobile" and driver == "gpu":
+            return self._generate_gpu("vulkan")
+        else:
+            print("Warning! Not a valid configuration.")
+            return []
 
-  def _generate_cpu(self, device: str):
-    # Generate TFLite benchmarks.
-    tflite_mobilebert = TfliteMobilebertFP32(self._tflite_benchmark_binary_path,
-                                             self._model_name,
-                                             self._tflite_model_path,
-                                             self._tflite_test_data_dir,
-                                             driver="cpu")
+    def _generate_cpu(self, device: str):
+        # Generate TFLite benchmarks.
+        tflite_mobilebert = TfliteMobilebertFP32(
+            self._tflite_benchmark_binary_path,
+            self._model_name,
+            self._tflite_model_path,
+            self._tflite_test_data_dir,
+            driver="cpu",
+        )
 
-    tflite_mobilebert_noxnn = TfliteMobilebertFP32(
-        self._tflite_benchmark_binary_path,
-        self._model_name + "_noxnn",
-        self._tflite_model_path,
-        self._tflite_test_data_dir,
-        driver="cpu")
-    tflite_mobilebert_noxnn.args.append("--use_xnnpack=false")
+        tflite_mobilebert_noxnn = TfliteMobilebertFP32(
+            self._tflite_benchmark_binary_path,
+            self._model_name + "_noxnn",
+            self._tflite_model_path,
+            self._tflite_test_data_dir,
+            driver="cpu",
+        )
+        tflite_mobilebert_noxnn.args.append("--use_xnnpack=false")
 
-    # Generate IREE benchmarks.
-    driver = "local-task"
-    backend = "llvm-cpu"
-    iree_model_path = os.path.join(self._base_dir, "models", "iree", backend,
-                                   self._model_name + ".vmfb")
-    iree_mobilebert = IreeMobilebertFP32(self._iree_benchmark_binary_path,
-                                         self._model_name,
-                                         iree_model_path,
-                                         driver=driver)
-    commands = [tflite_mobilebert, tflite_mobilebert_noxnn, iree_mobilebert]
+        # Generate IREE benchmarks.
+        driver = "local-task"
+        backend = "llvm-cpu"
+        iree_model_path = os.path.join(
+            self._base_dir, "models", "iree", backend, self._model_name + ".vmfb"
+        )
+        iree_mobilebert = IreeMobilebertFP32(
+            self._iree_benchmark_binary_path,
+            self._model_name,
+            iree_model_path,
+            driver=driver,
+        )
+        commands = [tflite_mobilebert, tflite_mobilebert_noxnn, iree_mobilebert]
 
-    # Test mmt4d only on mobile.
-    if device == "mobile":
-      model_mmt4d_name = self._model_name + "_mmt4d"
-      iree_mmt4d_model_path = os.path.join(self._base_dir, "models", "iree",
-                                           backend, model_mmt4d_name + ".vmfb")
-      iree_mmt4d_mobilebert = IreeMobilebertFP32(
-          self._iree_benchmark_binary_path,
-          model_mmt4d_name,
-          iree_mmt4d_model_path,
-          driver=driver)
-      commands.append(iree_mmt4d_mobilebert)
+        # Test mmt4d only on mobile.
+        if device == "mobile":
+            model_mmt4d_name = self._model_name + "_mmt4d"
+            iree_mmt4d_model_path = os.path.join(
+                self._base_dir, "models", "iree", backend, model_mmt4d_name + ".vmfb"
+            )
+            iree_mmt4d_mobilebert = IreeMobilebertFP32(
+                self._iree_benchmark_binary_path,
+                model_mmt4d_name,
+                iree_mmt4d_model_path,
+                driver=driver,
+            )
+            commands.append(iree_mmt4d_mobilebert)
 
-      model_im2col_mmt4d_name = self._model_name + "_im2col_mmt4d"
-      iree_im2col_mmt4d_model_path = os.path.join(
-          self._base_dir, "models", "iree", backend,
-          model_im2col_mmt4d_name + ".vmfb")
-      iree_im2col_mmt4d_mobilebert = IreeMobilebertFP32(
-          self._iree_benchmark_binary_path,
-          model_im2col_mmt4d_name,
-          iree_im2col_mmt4d_model_path,
-          driver=driver)
-      commands.append(iree_im2col_mmt4d_mobilebert)
+            model_im2col_mmt4d_name = self._model_name + "_im2col_mmt4d"
+            iree_im2col_mmt4d_model_path = os.path.join(
+                self._base_dir,
+                "models",
+                "iree",
+                backend,
+                model_im2col_mmt4d_name + ".vmfb",
+            )
+            iree_im2col_mmt4d_mobilebert = IreeMobilebertFP32(
+                self._iree_benchmark_binary_path,
+                model_im2col_mmt4d_name,
+                iree_im2col_mmt4d_model_path,
+                driver=driver,
+            )
+            commands.append(iree_im2col_mmt4d_mobilebert)
 
-    return commands
+        return commands
 
-  def _generate_gpu(self, driver: str):
-    tflite_mobilebert = TfliteMobilebertFP32(self._tflite_benchmark_binary_path,
-                                             self._model_name,
-                                             self._tflite_model_path,
-                                             self._tflite_test_data_dir,
-                                             driver="gpu")
-    tflite_mobilebert.args.append("--gpu_precision_loss_allowed=false")
+    def _generate_gpu(self, driver: str):
+        tflite_mobilebert = TfliteMobilebertFP32(
+            self._tflite_benchmark_binary_path,
+            self._model_name,
+            self._tflite_model_path,
+            self._tflite_test_data_dir,
+            driver="gpu",
+        )
+        tflite_mobilebert.args.append("--gpu_precision_loss_allowed=false")
 
-    tflite_mobilebert_noxnn = TfliteMobilebertFP32(
-        self._tflite_benchmark_binary_path,
-        self._model_name + "_noxnn",
-        self._tflite_model_path,
-        self._tflite_test_data_dir,
-        driver="gpu")
-    tflite_mobilebert_noxnn.args.append("--gpu_precision_loss_allowed=false")
-    tflite_mobilebert_noxnn.args.append("--use_xnnpack=false")
+        tflite_mobilebert_noxnn = TfliteMobilebertFP32(
+            self._tflite_benchmark_binary_path,
+            self._model_name + "_noxnn",
+            self._tflite_model_path,
+            self._tflite_test_data_dir,
+            driver="gpu",
+        )
+        tflite_mobilebert_noxnn.args.append("--gpu_precision_loss_allowed=false")
+        tflite_mobilebert_noxnn.args.append("--use_xnnpack=false")
 
-    tflite_mobilebert_fp16 = TfliteMobilebertFP32(
-        self._tflite_benchmark_binary_path,
-        self._model_name + "_fp16",
-        self._tflite_model_path,
-        self._tflite_test_data_dir,
-        driver="gpu")
-    tflite_mobilebert_fp16.args.append("--gpu_precision_loss_allowed=true")
+        tflite_mobilebert_fp16 = TfliteMobilebertFP32(
+            self._tflite_benchmark_binary_path,
+            self._model_name + "_fp16",
+            self._tflite_model_path,
+            self._tflite_test_data_dir,
+            driver="gpu",
+        )
+        tflite_mobilebert_fp16.args.append("--gpu_precision_loss_allowed=true")
 
-    iree_model_path = os.path.join(self._base_dir, "models", "iree", driver,
-                                   self._model_name + ".vmfb")
-    iree_mobilebert = IreeMobilebertFP32(self._iree_benchmark_binary_path,
-                                         self._model_name,
-                                         iree_model_path,
-                                         driver=driver)
-    iree_fp16_model_path = os.path.join(self._base_dir, "models", "iree",
-                                        driver, self._model_name + "_fp16.vmfb")
-    iree_mobilebert_fp16 = IreeMobilebertFP32(self._iree_benchmark_binary_path,
-                                              self._model_name + "_fp16",
-                                              iree_fp16_model_path,
-                                              driver=driver)
-    iree_padfuse_model_path = os.path.join(self._base_dir, "models", "iree",
-                                           driver,
-                                           self._model_name + "_padfuse.vmfb")
-    iree_mobilebert_padfuse = IreeMobilebertFP32(
-        self._iree_benchmark_binary_path,
-        self._model_name + "_padfuse",
-        iree_padfuse_model_path,
-        driver=driver)
+        iree_model_path = os.path.join(
+            self._base_dir, "models", "iree", driver, self._model_name + ".vmfb"
+        )
+        iree_mobilebert = IreeMobilebertFP32(
+            self._iree_benchmark_binary_path,
+            self._model_name,
+            iree_model_path,
+            driver=driver,
+        )
+        iree_fp16_model_path = os.path.join(
+            self._base_dir, "models", "iree", driver, self._model_name + "_fp16.vmfb"
+        )
+        iree_mobilebert_fp16 = IreeMobilebertFP32(
+            self._iree_benchmark_binary_path,
+            self._model_name + "_fp16",
+            iree_fp16_model_path,
+            driver=driver,
+        )
+        iree_padfuse_model_path = os.path.join(
+            self._base_dir, "models", "iree", driver, self._model_name + "_padfuse.vmfb"
+        )
+        iree_mobilebert_padfuse = IreeMobilebertFP32(
+            self._iree_benchmark_binary_path,
+            self._model_name + "_padfuse",
+            iree_padfuse_model_path,
+            driver=driver,
+        )
 
-    return [
-        tflite_mobilebert, tflite_mobilebert_noxnn, tflite_mobilebert_fp16,
-        iree_mobilebert, iree_mobilebert_fp16, iree_mobilebert_padfuse
-    ]
+        return [
+            tflite_mobilebert,
+            tflite_mobilebert_noxnn,
+            tflite_mobilebert_fp16,
+            iree_mobilebert,
+            iree_mobilebert_fp16,
+            iree_mobilebert_padfuse,
+        ]
diff --git a/build_tools/benchmarks/comparisons/mobilebert_int8_commands.py b/build_tools/benchmarks/comparisons/mobilebert_int8_commands.py
index eb78e41..309bffb 100644
--- a/build_tools/benchmarks/comparisons/mobilebert_int8_commands.py
+++ b/build_tools/benchmarks/comparisons/mobilebert_int8_commands.py
@@ -15,176 +15,212 @@
 
 
 class TfliteMobilebertInt8(TFLiteBenchmarkCommand):
-  """ Specializes the benchmark command to use TFLite. """
+    """Specializes the benchmark command to use TFLite."""
 
-  def __init__(self,
-               benchmark_binary: str,
-               model_name: str,
-               model_path: str,
-               test_data_dir: str,
-               driver: str = "cpu",
-               num_threads: int = _DEFAULT_NUM_THREADS,
-               num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
-               taskset: Optional[str] = None):
-    super().__init__(benchmark_binary,
-                     model_name,
-                     model_path,
-                     num_threads,
-                     num_runs,
-                     taskset=taskset)
-    self.driver = driver
-    self.args.append("--input_layer=input_ids,segment_ids,input_mask")
-    self.args.append("--input_layer_value_files=input_ids:" + test_data_dir +
-                     "/input_word_id.bin,segment_ids:" + test_data_dir +
-                     "/input_type_id.bin,input_mask:" + test_data_dir +
-                     "/input_mask.bin")
-    self.args.append("--input_layer_shape=1,384:1,384:1,384")
+    def __init__(
+        self,
+        benchmark_binary: str,
+        model_name: str,
+        model_path: str,
+        test_data_dir: str,
+        driver: str = "cpu",
+        num_threads: int = _DEFAULT_NUM_THREADS,
+        num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
+        taskset: Optional[str] = None,
+    ):
+        super().__init__(
+            benchmark_binary,
+            model_name,
+            model_path,
+            num_threads,
+            num_runs,
+            taskset=taskset,
+        )
+        self.driver = driver
+        self.args.append("--input_layer=input_ids,segment_ids,input_mask")
+        self.args.append(
+            "--input_layer_value_files=input_ids:"
+            + test_data_dir
+            + "/input_word_id.bin,segment_ids:"
+            + test_data_dir
+            + "/input_type_id.bin,input_mask:"
+            + test_data_dir
+            + "/input_mask.bin"
+        )
+        self.args.append("--input_layer_shape=1,384:1,384:1,384")
 
 
 class IreeMobilebertInt8(IreeBenchmarkCommand):
-  """ Specializes the benchmark command to use IREE. """
+    """Specializes the benchmark command to use IREE."""
 
-  def __init__(self,
-               benchmark_binary: str,
-               model_name: str,
-               model_path: str,
-               driver: str = "local-task",
-               num_threads: int = _DEFAULT_NUM_THREADS,
-               num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
-               taskset: Optional[str] = None):
-    super().__init__(benchmark_binary,
-                     model_name,
-                     model_path,
-                     num_threads,
-                     num_runs,
-                     taskset=taskset)
-    self.driver = driver
-    self.args.append("--function=main")
-    self.args.append(
-        '--input="1x384xi32=101 2129 2116 19576 2015 2106 3854 4679 2486 1029 102 1996 14169 2165 2019 2220 2599 1999 3565 4605 2753 1998 2196 11145 1012 8446 2001 3132 2011 7573 1005 1055 3639 1010 2029 14159 2032 2698 2335 1998 3140 2032 2046 2093 20991 2015 1010 2164 1037 19576 2029 2027 6757 2005 1037 7921 1012 7573 15674 3854 4679 2001 2315 3565 4605 12041 1010 3405 2274 3948 10455 1010 1016 13714 14918 1010 1998 2048 3140 19576 2015 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
-    )
-    self.args.append(
-        '--input="1x384xi32=0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
-    )
-    self.args.append(
-        '--input="1x384xi32=1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
-    )
+    def __init__(
+        self,
+        benchmark_binary: str,
+        model_name: str,
+        model_path: str,
+        driver: str = "local-task",
+        num_threads: int = _DEFAULT_NUM_THREADS,
+        num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
+        taskset: Optional[str] = None,
+    ):
+        super().__init__(
+            benchmark_binary,
+            model_name,
+            model_path,
+            num_threads,
+            num_runs,
+            taskset=taskset,
+        )
+        self.driver = driver
+        self.args.append("--function=main")
+        self.args.append(
+            '--input="1x384xi32=101 2129 2116 19576 2015 2106 3854 4679 2486 1029 102 1996 14169 2165 2019 2220 2599 1999 3565 4605 2753 1998 2196 11145 1012 8446 2001 3132 2011 7573 1005 1055 3639 1010 2029 14159 2032 2698 2335 1998 3140 2032 2046 2093 20991 2015 1010 2164 1037 19576 2029 2027 6757 2005 1037 7921 1012 7573 15674 3854 4679 2001 2315 3565 4605 12041 1010 3405 2274 3948 10455 1010 1016 13714 14918 1010 1998 2048 3140 19576 2015 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
+        )
+        self.args.append(
+            '--input="1x384xi32=0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
+        )
+        self.args.append(
+            '--input="1x384xi32=1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"'
+        )
 
 
 class MobilebertInt8CommandFactory(BenchmarkCommandFactory):
-  """ Generates `BenchmarkCommand` objects specific to running MobileBert."""
+    """Generates `BenchmarkCommand` objects specific to running MobileBert."""
 
-  def __init__(self, base_dir: str):
-    self._model_name = "mobilebert-baseline-tf2-quant"
-    self._base_dir = base_dir
-    self._iree_benchmark_binary_path = os.path.join(base_dir,
-                                                    "iree-benchmark-module")
-    self._tflite_benchmark_binary_path = os.path.join(base_dir,
-                                                      "benchmark_model")
-    self._tflite_model_path = os.path.join(self._base_dir, "models", "tflite",
-                                           self._model_name + ".tflite")
-    self._tflite_test_data_dir = os.path.join(self._base_dir, "test_data",
-                                              "squad")
+    def __init__(self, base_dir: str):
+        self._model_name = "mobilebert-baseline-tf2-quant"
+        self._base_dir = base_dir
+        self._iree_benchmark_binary_path = os.path.join(
+            base_dir, "iree-benchmark-module"
+        )
+        self._tflite_benchmark_binary_path = os.path.join(base_dir, "benchmark_model")
+        self._tflite_model_path = os.path.join(
+            self._base_dir, "models", "tflite", self._model_name + ".tflite"
+        )
+        self._tflite_test_data_dir = os.path.join(self._base_dir, "test_data", "squad")
 
-  def generate_benchmark_commands(self, device: str,
-                                  driver: str) -> list[BenchmarkCommand]:
-    if device == "desktop" and driver == "cpu":
-      return self._generate_cpu(device)
-    elif device == "desktop" and driver == "gpu":
-      return self._generate_gpu("cuda")
-    elif device == "mobile" and driver == "cpu":
-      return self._generate_cpu(device)
-    elif device == "mobile" and driver == "gpu":
-      return self._generate_gpu("vulkan")
-    else:
-      print("Warning! Not a valid configuration.")
-      return []
+    def generate_benchmark_commands(
+        self, device: str, driver: str
+    ) -> list[BenchmarkCommand]:
+        if device == "desktop" and driver == "cpu":
+            return self._generate_cpu(device)
+        elif device == "desktop" and driver == "gpu":
+            return self._generate_gpu("cuda")
+        elif device == "mobile" and driver == "cpu":
+            return self._generate_cpu(device)
+        elif device == "mobile" and driver == "gpu":
+            return self._generate_gpu("vulkan")
+        else:
+            print("Warning! Not a valid configuration.")
+            return []
 
-  def _generate_cpu(self, device: str):
-    # Generate TFLite benchmarks.
-    tflite_mobilebert = TfliteMobilebertInt8(self._tflite_benchmark_binary_path,
-                                             self._model_name,
-                                             self._tflite_model_path,
-                                             self._tflite_test_data_dir,
-                                             driver="cpu")
+    def _generate_cpu(self, device: str):
+        # Generate TFLite benchmarks.
+        tflite_mobilebert = TfliteMobilebertInt8(
+            self._tflite_benchmark_binary_path,
+            self._model_name,
+            self._tflite_model_path,
+            self._tflite_test_data_dir,
+            driver="cpu",
+        )
 
-    tflite_mobilebert_noxnn = TfliteMobilebertInt8(
-        self._tflite_benchmark_binary_path,
-        self._model_name + "_noxnn",
-        self._tflite_model_path,
-        self._tflite_test_data_dir,
-        driver="cpu")
-    tflite_mobilebert_noxnn.args.append("--use_xnnpack=false")
+        tflite_mobilebert_noxnn = TfliteMobilebertInt8(
+            self._tflite_benchmark_binary_path,
+            self._model_name + "_noxnn",
+            self._tflite_model_path,
+            self._tflite_test_data_dir,
+            driver="cpu",
+        )
+        tflite_mobilebert_noxnn.args.append("--use_xnnpack=false")
 
-    # Generate IREE benchmarks.
-    driver = "local-task"
-    backend = "llvm-cpu"
-    iree_model_path = os.path.join(self._base_dir, "models", "iree", backend,
-                                   self._model_name + ".vmfb")
-    iree_mobilebert = IreeMobilebertInt8(self._iree_benchmark_binary_path,
-                                         self._model_name,
-                                         iree_model_path,
-                                         driver=driver)
-    commands = [tflite_mobilebert, tflite_mobilebert_noxnn, iree_mobilebert]
+        # Generate IREE benchmarks.
+        driver = "local-task"
+        backend = "llvm-cpu"
+        iree_model_path = os.path.join(
+            self._base_dir, "models", "iree", backend, self._model_name + ".vmfb"
+        )
+        iree_mobilebert = IreeMobilebertInt8(
+            self._iree_benchmark_binary_path,
+            self._model_name,
+            iree_model_path,
+            driver=driver,
+        )
+        commands = [tflite_mobilebert, tflite_mobilebert_noxnn, iree_mobilebert]
 
-    # Test mmt4d only on mobile.
-    if device == "mobile":
-      model_mmt4d_name = self._model_name + "_mmt4d"
-      iree_mmt4d_model_path = os.path.join(self._base_dir, "models", "iree",
-                                           backend, model_mmt4d_name + ".vmfb")
-      iree_mmt4d_mobilebert = IreeMobilebertInt8(
-          self._iree_benchmark_binary_path,
-          model_mmt4d_name,
-          iree_mmt4d_model_path,
-          driver=driver)
-      commands.append(iree_mmt4d_mobilebert)
+        # Test mmt4d only on mobile.
+        if device == "mobile":
+            model_mmt4d_name = self._model_name + "_mmt4d"
+            iree_mmt4d_model_path = os.path.join(
+                self._base_dir, "models", "iree", backend, model_mmt4d_name + ".vmfb"
+            )
+            iree_mmt4d_mobilebert = IreeMobilebertInt8(
+                self._iree_benchmark_binary_path,
+                model_mmt4d_name,
+                iree_mmt4d_model_path,
+                driver=driver,
+            )
+            commands.append(iree_mmt4d_mobilebert)
 
-      model_im2col_mmt4d_name = self._model_name + "_im2col_mmt4d"
-      iree_im2col_mmt4d_model_path = os.path.join(
-          self._base_dir, "models", "iree", backend,
-          model_im2col_mmt4d_name + ".vmfb")
-      iree_im2col_mmt4d_mobilebert = IreeMobilebertInt8(
-          self._iree_benchmark_binary_path,
-          model_im2col_mmt4d_name,
-          iree_im2col_mmt4d_model_path,
-          driver=driver)
-      commands.append(iree_im2col_mmt4d_mobilebert)
+            model_im2col_mmt4d_name = self._model_name + "_im2col_mmt4d"
+            iree_im2col_mmt4d_model_path = os.path.join(
+                self._base_dir,
+                "models",
+                "iree",
+                backend,
+                model_im2col_mmt4d_name + ".vmfb",
+            )
+            iree_im2col_mmt4d_mobilebert = IreeMobilebertInt8(
+                self._iree_benchmark_binary_path,
+                model_im2col_mmt4d_name,
+                iree_im2col_mmt4d_model_path,
+                driver=driver,
+            )
+            commands.append(iree_im2col_mmt4d_mobilebert)
 
-    return commands
+        return commands
 
-  def _generate_gpu(self, driver: str):
-    tflite_mobilebert = TfliteMobilebertInt8(self._tflite_benchmark_binary_path,
-                                             self._model_name,
-                                             self._tflite_model_path,
-                                             self._tflite_test_data_dir,
-                                             driver="gpu")
-    tflite_mobilebert.args.append("--gpu_precision_loss_allowed=false")
+    def _generate_gpu(self, driver: str):
+        tflite_mobilebert = TfliteMobilebertInt8(
+            self._tflite_benchmark_binary_path,
+            self._model_name,
+            self._tflite_model_path,
+            self._tflite_test_data_dir,
+            driver="gpu",
+        )
+        tflite_mobilebert.args.append("--gpu_precision_loss_allowed=false")
 
-    tflite_mobilebert_noxnn = TfliteMobilebertInt8(
-        self._tflite_benchmark_binary_path,
-        self._model_name + "_noxnn",
-        self._tflite_model_path,
-        self._tflite_test_data_dir,
-        driver="gpu")
-    tflite_mobilebert_noxnn.args.append("--gpu_precision_loss_allowed=false")
-    tflite_mobilebert_noxnn.args.append("--use_xnnpack=false")
+        tflite_mobilebert_noxnn = TfliteMobilebertInt8(
+            self._tflite_benchmark_binary_path,
+            self._model_name + "_noxnn",
+            self._tflite_model_path,
+            self._tflite_test_data_dir,
+            driver="gpu",
+        )
+        tflite_mobilebert_noxnn.args.append("--gpu_precision_loss_allowed=false")
+        tflite_mobilebert_noxnn.args.append("--use_xnnpack=false")
 
-    iree_model_path = os.path.join(self._base_dir, "models", "iree", driver,
-                                   self._model_name + ".vmfb")
-    iree_mobilebert = IreeMobilebertInt8(self._iree_benchmark_binary_path,
-                                         self._model_name,
-                                         iree_model_path,
-                                         driver=driver)
+        iree_model_path = os.path.join(
+            self._base_dir, "models", "iree", driver, self._model_name + ".vmfb"
+        )
+        iree_mobilebert = IreeMobilebertInt8(
+            self._iree_benchmark_binary_path,
+            self._model_name,
+            iree_model_path,
+            driver=driver,
+        )
 
-    iree_padfuse_model_path = os.path.join(self._base_dir, "models", "iree",
-                                           driver,
-                                           self._model_name + "_padfuse.vmfb")
-    iree_padfuse_mobilebert = IreeMobilebertInt8(
-        self._iree_benchmark_binary_path,
-        self._model_name + "_padfuse",
-        iree_padfuse_model_path,
-        driver=driver)
-    return [
-        tflite_mobilebert, tflite_mobilebert_noxnn, iree_mobilebert,
-        iree_padfuse_mobilebert
-    ]
+        iree_padfuse_model_path = os.path.join(
+            self._base_dir, "models", "iree", driver, self._model_name + "_padfuse.vmfb"
+        )
+        iree_padfuse_mobilebert = IreeMobilebertInt8(
+            self._iree_benchmark_binary_path,
+            self._model_name + "_padfuse",
+            iree_padfuse_model_path,
+            driver=driver,
+        )
+        return [
+            tflite_mobilebert,
+            tflite_mobilebert_noxnn,
+            iree_mobilebert,
+            iree_padfuse_mobilebert,
+        ]
diff --git a/build_tools/benchmarks/comparisons/run_benchmarks.py b/build_tools/benchmarks/comparisons/run_benchmarks.py
index 00b0fb4..24d1bb5 100644
--- a/build_tools/benchmarks/comparisons/run_benchmarks.py
+++ b/build_tools/benchmarks/comparisons/run_benchmarks.py
@@ -31,178 +31,222 @@
 from simple_commands import *
 
 
-def benchmark_desktop_cpu(device_name: str,
-                          command_factories: list[BenchmarkCommandFactory],
-                          results_path: str):
-  benchmarks = []
-  for factory in command_factories:
-    benchmarks.extend(factory.generate_benchmark_commands("desktop", "cpu"))
+def benchmark_desktop_cpu(
+    device_name: str,
+    command_factories: list[BenchmarkCommandFactory],
+    results_path: str,
+):
+    benchmarks = []
+    for factory in command_factories:
+        benchmarks.extend(factory.generate_benchmark_commands("desktop", "cpu"))
 
-  for num_threads in [1, 2, 4, 8]:
+    for num_threads in [1, 2, 4, 8]:
+        for benchmark in benchmarks:
+            results_array = [
+                device_name,
+                benchmark.model_name,
+                benchmark.runtime,
+                benchmark.driver,
+                num_threads,
+            ]
+            benchmark.num_threads = num_threads
+            results_array.extend(run_command(benchmark))
+            write_benchmark_result(results_array, results_path)
+
+
+def benchmark_desktop_gpu(
+    device_name: str,
+    command_factories: list[BenchmarkCommandFactory],
+    results_path: str,
+):
+    benchmarks = []
+    for factory in command_factories:
+        benchmarks.extend(factory.generate_benchmark_commands("desktop", "gpu"))
     for benchmark in benchmarks:
-      results_array = [
-          device_name, benchmark.model_name, benchmark.runtime,
-          benchmark.driver, num_threads
-      ]
-      benchmark.num_threads = num_threads
-      results_array.extend(run_command(benchmark))
-      write_benchmark_result(results_array, results_path)
+        results_array = [
+            device_name,
+            benchmark.model_name,
+            benchmark.runtime,
+            benchmark.driver,
+            benchmark.num_threads,
+        ]
+        results_array.extend(run_command(benchmark))
+        write_benchmark_result(results_array, results_path)
 
 
-def benchmark_desktop_gpu(device_name: str,
-                          command_factories: list[BenchmarkCommandFactory],
-                          results_path: str):
-  benchmarks = []
-  for factory in command_factories:
-    benchmarks.extend(factory.generate_benchmark_commands("desktop", "gpu"))
-  for benchmark in benchmarks:
-    results_array = [
-        device_name, benchmark.model_name, benchmark.runtime, benchmark.driver,
-        benchmark.num_threads
-    ]
-    results_array.extend(run_command(benchmark))
-    write_benchmark_result(results_array, results_path)
+def benchmark_mobile_cpu(
+    device_name: str,
+    command_factories: list[BenchmarkCommandFactory],
+    results_path: str,
+):
+    benchmarks = []
+    for factory in command_factories:
+        benchmarks.extend(factory.generate_benchmark_commands("mobile", "cpu"))
+
+    for _, tuple in enumerate([("80", 1), ("C0", 2), ("F0", 4), ("0F", 4), ("FF", 8)]):
+        taskset = tuple[0]
+        num_threads = tuple[1]
+        for benchmark in benchmarks:
+            results_array = [
+                device_name,
+                benchmark.model_name,
+                benchmark.runtime,
+                benchmark.driver,
+                taskset,
+                num_threads,
+            ]
+            benchmark.taskset = taskset
+            benchmark.num_threads = num_threads
+            results_array.extend(run_command(benchmark))
+            write_benchmark_result(results_array, results_path)
 
 
-def benchmark_mobile_cpu(device_name: str,
-                         command_factories: list[BenchmarkCommandFactory],
-                         results_path: str):
-  benchmarks = []
-  for factory in command_factories:
-    benchmarks.extend(factory.generate_benchmark_commands("mobile", "cpu"))
+def benchmark_mobile_gpu(
+    device_name: str,
+    command_factories: list[BenchmarkCommandFactory],
+    results_path: str,
+):
+    benchmarks = []
+    for factory in command_factories:
+        benchmarks.extend(factory.generate_benchmark_commands("mobile", "gpu"))
 
-  for _, tuple in enumerate([("80", 1), ("C0", 2), ("F0", 4), ("0F", 4),
-                             ("FF", 8)]):
-    taskset = tuple[0]
-    num_threads = tuple[1]
+    taskset = "80"
+    num_threads = 1
     for benchmark in benchmarks:
-      results_array = [
-          device_name, benchmark.model_name, benchmark.runtime,
-          benchmark.driver, taskset, num_threads
-      ]
-      benchmark.taskset = taskset
-      benchmark.num_threads = num_threads
-      results_array.extend(run_command(benchmark))
-      write_benchmark_result(results_array, results_path)
-
-
-def benchmark_mobile_gpu(device_name: str,
-                         command_factories: list[BenchmarkCommandFactory],
-                         results_path: str):
-  benchmarks = []
-  for factory in command_factories:
-    benchmarks.extend(factory.generate_benchmark_commands("mobile", "gpu"))
-
-  taskset = "80"
-  num_threads = 1
-  for benchmark in benchmarks:
-    results_array = [
-        device_name, benchmark.model_name, benchmark.runtime, benchmark.driver,
-        taskset, num_threads
-    ]
-    benchmark.taskset = taskset
-    benchmark.num_threads = num_threads
-    results_array.extend(run_command(benchmark))
-    write_benchmark_result(results_array, results_path)
+        results_array = [
+            device_name,
+            benchmark.model_name,
+            benchmark.runtime,
+            benchmark.driver,
+            taskset,
+            num_threads,
+        ]
+        benchmark.taskset = taskset
+        benchmark.num_threads = num_threads
+        results_array.extend(run_command(benchmark))
+        write_benchmark_result(results_array, results_path)
 
 
 def main(args):
-  # Create factories for all models to be benchmarked.
-  command_factory = []
-  command_factory.append(
-      MobilebertFP32CommandFactory(args.base_dir, "mobilebert_float_384_gpu"))
-  command_factory.append(MobilebertInt8CommandFactory(args.base_dir))
-  command_factory.append(
-      MobilebertFP32CommandFactory(args.base_dir, "albert_lite_base_squadv1_1"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "mobilenet_v2_1.0_224",
-                           "1x224x224x3xf32"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "mobilenet_v2_224_1.0_uint8",
-                           "1x224x224x3xui8"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "deeplabv3", "1x257x257x3xf32"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "person_detect", "1x96x96x1xi8"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "ssd_mobilenet_v2_static_1.0_int8",
-                           "1x320x320x3xi8"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "resnet_v2_101_1_default_1",
-                           "1x299x299x3xf32"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "ssd_mobilenet_v2_fpnlite_uint8",
-                           "1x320x320x3xui8"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "ssd_mobilenet_v2_fpnlite_fp32",
-                           "1x320x320x3xf32"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "efficientnet_lite0_int8_2",
-                           "1x224x224x3xui8"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "efficientnet_lite0_fp32_2",
-                           "1x224x224x3xf32"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "inception_v4_299_uint8",
-                           "1x299x299x3xui8"))
-  command_factory.append(
-      SimpleCommandFactory(args.base_dir, "inception_v4_299_fp32",
-                           "1x299x299x3xf32"))
+    # Create factories for all models to be benchmarked.
+    command_factory = []
+    command_factory.append(
+        MobilebertFP32CommandFactory(args.base_dir, "mobilebert_float_384_gpu")
+    )
+    command_factory.append(MobilebertInt8CommandFactory(args.base_dir))
+    command_factory.append(
+        MobilebertFP32CommandFactory(args.base_dir, "albert_lite_base_squadv1_1")
+    )
+    command_factory.append(
+        SimpleCommandFactory(args.base_dir, "mobilenet_v2_1.0_224", "1x224x224x3xf32")
+    )
+    command_factory.append(
+        SimpleCommandFactory(
+            args.base_dir, "mobilenet_v2_224_1.0_uint8", "1x224x224x3xui8"
+        )
+    )
+    command_factory.append(
+        SimpleCommandFactory(args.base_dir, "deeplabv3", "1x257x257x3xf32")
+    )
+    command_factory.append(
+        SimpleCommandFactory(args.base_dir, "person_detect", "1x96x96x1xi8")
+    )
+    command_factory.append(
+        SimpleCommandFactory(
+            args.base_dir, "ssd_mobilenet_v2_static_1.0_int8", "1x320x320x3xi8"
+        )
+    )
+    command_factory.append(
+        SimpleCommandFactory(
+            args.base_dir, "resnet_v2_101_1_default_1", "1x299x299x3xf32"
+        )
+    )
+    command_factory.append(
+        SimpleCommandFactory(
+            args.base_dir, "ssd_mobilenet_v2_fpnlite_uint8", "1x320x320x3xui8"
+        )
+    )
+    command_factory.append(
+        SimpleCommandFactory(
+            args.base_dir, "ssd_mobilenet_v2_fpnlite_fp32", "1x320x320x3xf32"
+        )
+    )
+    command_factory.append(
+        SimpleCommandFactory(
+            args.base_dir, "efficientnet_lite0_int8_2", "1x224x224x3xui8"
+        )
+    )
+    command_factory.append(
+        SimpleCommandFactory(
+            args.base_dir, "efficientnet_lite0_fp32_2", "1x224x224x3xf32"
+        )
+    )
+    command_factory.append(
+        SimpleCommandFactory(args.base_dir, "inception_v4_299_uint8", "1x299x299x3xui8")
+    )
+    command_factory.append(
+        SimpleCommandFactory(args.base_dir, "inception_v4_299_fp32", "1x299x299x3xf32")
+    )
 
-  if args.mode == "desktop":
-    results_path = os.path.join(args.output_dir, "results.csv")
-    with open(results_path, "w") as f:
-      f.write(
-          "device,model,runtime,driver/delegate,threads,latency (ms),vmhwm (KB),vmrss (KB),rssfile (KB)\n"
-      )
+    if args.mode == "desktop":
+        results_path = os.path.join(args.output_dir, "results.csv")
+        with open(results_path, "w") as f:
+            f.write(
+                "device,model,runtime,driver/delegate,threads,latency (ms),vmhwm (KB),vmrss (KB),rssfile (KB)\n"
+            )
 
-    if not args.disable_cpu:
-      benchmark_desktop_cpu(args.device_name, command_factory, results_path)
-    if not args.disable_gpu:
-      benchmark_desktop_gpu(args.device_name, command_factory, results_path)
-  else:
-    assert (args.mode == "mobile")
-    results_path = os.path.join(args.output_dir, "results.csv")
-    with open(results_path, "w") as f:
-      f.write(
-          "device,model,runtime,driver/delegate,taskset,threads,latency (ms),vmhwm (KB),vmrss (KB),rssfile (KB)\n"
-      )
-    if not args.disable_cpu:
-      benchmark_mobile_cpu(args.device_name, command_factory, results_path)
-    if not args.disable_gpu:
-      benchmark_mobile_gpu(args.device_name, command_factory, results_path)
+        if not args.disable_cpu:
+            benchmark_desktop_cpu(args.device_name, command_factory, results_path)
+        if not args.disable_gpu:
+            benchmark_desktop_gpu(args.device_name, command_factory, results_path)
+    else:
+        assert args.mode == "mobile"
+        results_path = os.path.join(args.output_dir, "results.csv")
+        with open(results_path, "w") as f:
+            f.write(
+                "device,model,runtime,driver/delegate,taskset,threads,latency (ms),vmhwm (KB),vmrss (KB),rssfile (KB)\n"
+            )
+        if not args.disable_cpu:
+            benchmark_mobile_cpu(args.device_name, command_factory, results_path)
+        if not args.disable_gpu:
+            benchmark_mobile_gpu(args.device_name, command_factory, results_path)
 
 
 def parse_args():
-  parser = argparse.ArgumentParser()
-  parser.add_argument(
-      "--device_name",
-      type=str,
-      default=None,
-      help="The name of the device the benchmark is running on e.g. Pixel 6")
-  parser.add_argument(
-      "--base_dir",
-      type=str,
-      default=None,
-      help="The directory where all benchmarking artifacts are located.")
-  parser.add_argument("--output_dir",
-                      type=str,
-                      default=None,
-                      help="The directory to save output artifacts into.")
-  parser.add_argument(
-      "--mode",
-      type=str,
-      choices=("desktop", "mobile"),
-      default="desktop",
-      help="The benchmarking mode to use. If mode is `mobile`, uses tasksets.")
-  parser.add_argument("--disable_cpu",
-                      action="store_true",
-                      help="Disables running benchmarks on CPU.")
-  parser.add_argument("--disable_gpu",
-                      action="store_true",
-                      help="Disables running benchmarks on GPU.")
-  return parser.parse_args()
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--device_name",
+        type=str,
+        default=None,
+        help="The name of the device the benchmark is running on e.g. Pixel 6",
+    )
+    parser.add_argument(
+        "--base_dir",
+        type=str,
+        default=None,
+        help="The directory where all benchmarking artifacts are located.",
+    )
+    parser.add_argument(
+        "--output_dir",
+        type=str,
+        default=None,
+        help="The directory to save output artifacts into.",
+    )
+    parser.add_argument(
+        "--mode",
+        type=str,
+        choices=("desktop", "mobile"),
+        default="desktop",
+        help="The benchmarking mode to use. If mode is `mobile`, uses tasksets.",
+    )
+    parser.add_argument(
+        "--disable_cpu", action="store_true", help="Disables running benchmarks on CPU."
+    )
+    parser.add_argument(
+        "--disable_gpu", action="store_true", help="Disables running benchmarks on GPU."
+    )
+    return parser.parse_args()
 
 
-if __name__ == '__main__':
-  main(parse_args())
+if __name__ == "__main__":
+    main(parse_args())
diff --git a/build_tools/benchmarks/comparisons/simple_commands.py b/build_tools/benchmarks/comparisons/simple_commands.py
index 2e49c30..ef001bf 100644
--- a/build_tools/benchmarks/comparisons/simple_commands.py
+++ b/build_tools/benchmarks/comparisons/simple_commands.py
@@ -15,217 +15,263 @@
 
 
 class TfliteWrapper(TFLiteBenchmarkCommand):
-  """Specializes the benchmark command to use TFLite."""
+    """Specializes the benchmark command to use TFLite."""
 
-  def __init__(self,
-               benchmark_binary: str,
-               model_name: str,
-               model_path: str,
-               input_layer: Optional[str] = None,
-               input_shape: Optional[str] = None,
-               driver: str = "cpu",
-               num_threads: int = _DEFAULT_NUM_THREADS,
-               num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
-               taskset: Optional[str] = None):
-    super().__init__(benchmark_binary,
-                     model_name,
-                     model_path,
-                     num_threads,
-                     num_runs,
-                     taskset=taskset)
-    self.driver = driver
-    if input_layer and input_shape:
-      self.args.append("--input_layer=%s" % input_layer)
-      self.args.append("--input_layer_shape=%s" % input_shape)
+    def __init__(
+        self,
+        benchmark_binary: str,
+        model_name: str,
+        model_path: str,
+        input_layer: Optional[str] = None,
+        input_shape: Optional[str] = None,
+        driver: str = "cpu",
+        num_threads: int = _DEFAULT_NUM_THREADS,
+        num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
+        taskset: Optional[str] = None,
+    ):
+        super().__init__(
+            benchmark_binary,
+            model_name,
+            model_path,
+            num_threads,
+            num_runs,
+            taskset=taskset,
+        )
+        self.driver = driver
+        if input_layer and input_shape:
+            self.args.append("--input_layer=%s" % input_layer)
+            self.args.append("--input_layer_shape=%s" % input_shape)
 
 
 class IreeWrapper(IreeBenchmarkCommand):
-  """Specializes the benchmark command to use IREE."""
+    """Specializes the benchmark command to use IREE."""
 
-  def __init__(self,
-               benchmark_binary: str,
-               model_name: str,
-               model_path: str,
-               function_input: str,
-               driver: str = "local-task",
-               num_threads: int = _DEFAULT_NUM_THREADS,
-               num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
-               taskset: Optional[str] = None):
-    super().__init__(benchmark_binary,
-                     model_name,
-                     model_path,
-                     num_threads,
-                     num_runs,
-                     taskset=taskset)
-    self.driver = driver
-    self.args.append("--function=main")
-    self.args.append('--input="%s"' % function_input)
+    def __init__(
+        self,
+        benchmark_binary: str,
+        model_name: str,
+        model_path: str,
+        function_input: str,
+        driver: str = "local-task",
+        num_threads: int = _DEFAULT_NUM_THREADS,
+        num_runs: int = _DEFAULT_NUM_BENCHMARK_RUNS,
+        taskset: Optional[str] = None,
+    ):
+        super().__init__(
+            benchmark_binary,
+            model_name,
+            model_path,
+            num_threads,
+            num_runs,
+            taskset=taskset,
+        )
+        self.driver = driver
+        self.args.append("--function=main")
+        self.args.append('--input="%s"' % function_input)
 
 
 class SimpleCommandFactory(BenchmarkCommandFactory):
-  """
-  Generates `BenchmarkCommand` objects specific to running series of simple models.
+    """
+    Generates `BenchmarkCommand` objects specific to running series of simple models.
 
-  A model is considered simple if its inputs can be generically generated based
-  on expected signature only without affecting behavior.
-  """
+    A model is considered simple if its inputs can be generically generated based
+    on expected signature only without affecting behavior.
+    """
 
-  def __init__(self,
-               base_dir: str,
-               model_name: str,
-               function_input: str,
-               input_name: Optional[str] = None,
-               input_layer: Optional[str] = None):
-    self._model_name = model_name
-    self._function_input = function_input
-    self._input_name = input_name
-    self._input_layer = input_layer
-    self._base_dir = base_dir
-    self._iree_benchmark_binary_path = os.path.join(base_dir,
-                                                    "iree-benchmark-module")
-    self._tflite_benchmark_binary_path = os.path.join(base_dir,
-                                                      "benchmark_model")
-    # Required to be set, but no test data used yet.
-    self._tflite_test_data_dir = os.path.join(self._base_dir, "test_data")
+    def __init__(
+        self,
+        base_dir: str,
+        model_name: str,
+        function_input: str,
+        input_name: Optional[str] = None,
+        input_layer: Optional[str] = None,
+    ):
+        self._model_name = model_name
+        self._function_input = function_input
+        self._input_name = input_name
+        self._input_layer = input_layer
+        self._base_dir = base_dir
+        self._iree_benchmark_binary_path = os.path.join(
+            base_dir, "iree-benchmark-module"
+        )
+        self._tflite_benchmark_binary_path = os.path.join(base_dir, "benchmark_model")
+        # Required to be set, but no test data used yet.
+        self._tflite_test_data_dir = os.path.join(self._base_dir, "test_data")
 
-  def generate_benchmark_commands(self, device: str,
-                                  driver: str) -> list[BenchmarkCommand]:
-    if device == "desktop" and driver == "cpu":
-      return self._generate_cpu(device)
-    elif device == "desktop" and driver == "gpu":
-      return self._generate_gpu("cuda")
-    elif device == "mobile" and driver == "cpu":
-      return self._generate_cpu(device)
-    elif device == "mobile" and driver == "gpu":
-      return self._generate_gpu("vulkan")
-    else:
-      print("Warning! Not a valid configuration.")
-      return []
+    def generate_benchmark_commands(
+        self, device: str, driver: str
+    ) -> list[BenchmarkCommand]:
+        if device == "desktop" and driver == "cpu":
+            return self._generate_cpu(device)
+        elif device == "desktop" and driver == "gpu":
+            return self._generate_gpu("cuda")
+        elif device == "mobile" and driver == "cpu":
+            return self._generate_cpu(device)
+        elif device == "mobile" and driver == "gpu":
+            return self._generate_gpu("vulkan")
+        else:
+            print("Warning! Not a valid configuration.")
+            return []
 
-  def _generate_cpu(self, device: str):
-    commands = []
-    # Generate TFLite benchmarks.
-    tflite_model_path = os.path.join(self._base_dir, "models", "tflite",
-                                     self._model_name + ".tflite")
-    tflite = TfliteWrapper(self._tflite_benchmark_binary_path,
-                           self._model_name,
-                           tflite_model_path,
-                           self._input_name,
-                           driver="cpu")
-    commands.append(tflite)
+    def _generate_cpu(self, device: str):
+        commands = []
+        # Generate TFLite benchmarks.
+        tflite_model_path = os.path.join(
+            self._base_dir, "models", "tflite", self._model_name + ".tflite"
+        )
+        tflite = TfliteWrapper(
+            self._tflite_benchmark_binary_path,
+            self._model_name,
+            tflite_model_path,
+            self._input_name,
+            driver="cpu",
+        )
+        commands.append(tflite)
 
-    tflite_noxnn = TfliteWrapper(self._tflite_benchmark_binary_path,
-                                 self._model_name + "_noxnn",
-                                 tflite_model_path,
-                                 self._input_name,
-                                 driver="cpu")
-    tflite_noxnn.args.append("--use_xnnpack=false")
-    commands.append(tflite_noxnn)
+        tflite_noxnn = TfliteWrapper(
+            self._tflite_benchmark_binary_path,
+            self._model_name + "_noxnn",
+            tflite_model_path,
+            self._input_name,
+            driver="cpu",
+        )
+        tflite_noxnn.args.append("--use_xnnpack=false")
+        commands.append(tflite_noxnn)
 
-    # Generate IREE benchmarks.
-    driver = "local-task"
-    backend = "llvm-cpu"
+        # Generate IREE benchmarks.
+        driver = "local-task"
+        backend = "llvm-cpu"
 
-    iree_model_path = os.path.join(self._base_dir, "models", "iree", backend,
-                                   self._model_name + ".vmfb")
-    iree = IreeWrapper(self._iree_benchmark_binary_path,
-                       self._model_name,
-                       iree_model_path,
-                       self._function_input,
-                       driver=driver)
-    commands.append(iree)
+        iree_model_path = os.path.join(
+            self._base_dir, "models", "iree", backend, self._model_name + ".vmfb"
+        )
+        iree = IreeWrapper(
+            self._iree_benchmark_binary_path,
+            self._model_name,
+            iree_model_path,
+            self._function_input,
+            driver=driver,
+        )
+        commands.append(iree)
 
-    model_padfuse_name = self._model_name + "_padfuse"
-    iree_padfuse_model_path = os.path.join(self._base_dir, "models", "iree",
-                                           backend,
-                                           model_padfuse_name + ".vmfb")
-    iree_padfuse = IreeWrapper(self._iree_benchmark_binary_path,
-                               model_padfuse_name,
-                               iree_padfuse_model_path,
-                               self._function_input,
-                               driver=driver)
-    commands.append(iree_padfuse)
+        model_padfuse_name = self._model_name + "_padfuse"
+        iree_padfuse_model_path = os.path.join(
+            self._base_dir, "models", "iree", backend, model_padfuse_name + ".vmfb"
+        )
+        iree_padfuse = IreeWrapper(
+            self._iree_benchmark_binary_path,
+            model_padfuse_name,
+            iree_padfuse_model_path,
+            self._function_input,
+            driver=driver,
+        )
+        commands.append(iree_padfuse)
 
-    # Test mmt4d only on mobile.
-    if device == "mobile":
-      model_mmt4d_name = self._model_name + "_mmt4d"
-      iree_mmt4d_model_path = os.path.join(self._base_dir, "models", "iree",
-                                           backend, model_mmt4d_name + ".vmfb")
-      iree_mmt4d = IreeWrapper(self._iree_benchmark_binary_path,
-                               model_mmt4d_name,
-                               iree_mmt4d_model_path,
-                               self._function_input,
-                               driver=driver)
-      commands.append(iree_mmt4d)
+        # Test mmt4d only on mobile.
+        if device == "mobile":
+            model_mmt4d_name = self._model_name + "_mmt4d"
+            iree_mmt4d_model_path = os.path.join(
+                self._base_dir, "models", "iree", backend, model_mmt4d_name + ".vmfb"
+            )
+            iree_mmt4d = IreeWrapper(
+                self._iree_benchmark_binary_path,
+                model_mmt4d_name,
+                iree_mmt4d_model_path,
+                self._function_input,
+                driver=driver,
+            )
+            commands.append(iree_mmt4d)
 
-      model_im2col_mmt4d_name = self._model_name + "_im2col_mmt4d"
-      iree_im2col_mmt4d_model_path = os.path.join(
-          self._base_dir, "models", "iree", backend,
-          model_im2col_mmt4d_name + ".vmfb")
-      iree_im2col_mmt4d = IreeWrapper(self._iree_benchmark_binary_path,
-                                      model_im2col_mmt4d_name,
-                                      iree_im2col_mmt4d_model_path,
-                                      self._function_input,
-                                      driver=driver)
-      commands.append(iree_im2col_mmt4d)
+            model_im2col_mmt4d_name = self._model_name + "_im2col_mmt4d"
+            iree_im2col_mmt4d_model_path = os.path.join(
+                self._base_dir,
+                "models",
+                "iree",
+                backend,
+                model_im2col_mmt4d_name + ".vmfb",
+            )
+            iree_im2col_mmt4d = IreeWrapper(
+                self._iree_benchmark_binary_path,
+                model_im2col_mmt4d_name,
+                iree_im2col_mmt4d_model_path,
+                self._function_input,
+                driver=driver,
+            )
+            commands.append(iree_im2col_mmt4d)
 
-    return commands
+        return commands
 
-  def _generate_gpu(self, driver: str):
-    commands = []
-    tflite_model_path = os.path.join(self._base_dir, "models", "tflite",
-                                     self._model_name + ".tflite")
-    tflite = TfliteWrapper(self._tflite_benchmark_binary_path,
-                           self._model_name,
-                           tflite_model_path,
-                           self._input_name,
-                           self._input_layer,
-                           driver="gpu")
-    tflite.args.append("--gpu_precision_loss_allowed=false")
-    commands.append(tflite)
+    def _generate_gpu(self, driver: str):
+        commands = []
+        tflite_model_path = os.path.join(
+            self._base_dir, "models", "tflite", self._model_name + ".tflite"
+        )
+        tflite = TfliteWrapper(
+            self._tflite_benchmark_binary_path,
+            self._model_name,
+            tflite_model_path,
+            self._input_name,
+            self._input_layer,
+            driver="gpu",
+        )
+        tflite.args.append("--gpu_precision_loss_allowed=false")
+        commands.append(tflite)
 
-    tflite_noxnn = TfliteWrapper(self._tflite_benchmark_binary_path,
-                                 self._model_name + "_noxnn",
-                                 tflite_model_path,
-                                 self._input_name,
-                                 self._input_layer,
-                                 driver="gpu")
-    tflite.args.append("--use_xnnpack=false")
-    commands.append(tflite_noxnn)
+        tflite_noxnn = TfliteWrapper(
+            self._tflite_benchmark_binary_path,
+            self._model_name + "_noxnn",
+            tflite_model_path,
+            self._input_name,
+            self._input_layer,
+            driver="gpu",
+        )
+        tflite.args.append("--use_xnnpack=false")
+        commands.append(tflite_noxnn)
 
-    tflite_fp16 = TfliteWrapper(self._tflite_benchmark_binary_path,
-                                self._model_name + "_fp16",
-                                tflite_model_path,
-                                self._input_name,
-                                self._input_layer,
-                                driver="gpu")
-    tflite.args.append("--gpu_precision_loss_allowed=true")
-    commands.append(tflite_fp16)
+        tflite_fp16 = TfliteWrapper(
+            self._tflite_benchmark_binary_path,
+            self._model_name + "_fp16",
+            tflite_model_path,
+            self._input_name,
+            self._input_layer,
+            driver="gpu",
+        )
+        tflite.args.append("--gpu_precision_loss_allowed=true")
+        commands.append(tflite_fp16)
 
-    iree_model_path = os.path.join(self._base_dir, "models", "iree", driver,
-                                   self._model_name + ".vmfb")
-    iree = IreeWrapper(self._iree_benchmark_binary_path,
-                       self._model_name,
-                       iree_model_path,
-                       self._function_input,
-                       driver=driver)
-    commands.append(iree)
+        iree_model_path = os.path.join(
+            self._base_dir, "models", "iree", driver, self._model_name + ".vmfb"
+        )
+        iree = IreeWrapper(
+            self._iree_benchmark_binary_path,
+            self._model_name,
+            iree_model_path,
+            self._function_input,
+            driver=driver,
+        )
+        commands.append(iree)
 
-    iree_model_path = os.path.join(self._base_dir, "models", "iree", driver,
-                                   self._model_name + "_fp16.vmfb")
-    iree = IreeWrapper(self._iree_benchmark_binary_path,
-                       self._model_name + "_fp16",
-                       iree_model_path,
-                       self._function_input,
-                       driver=driver)
-    commands.append(iree)
+        iree_model_path = os.path.join(
+            self._base_dir, "models", "iree", driver, self._model_name + "_fp16.vmfb"
+        )
+        iree = IreeWrapper(
+            self._iree_benchmark_binary_path,
+            self._model_name + "_fp16",
+            iree_model_path,
+            self._function_input,
+            driver=driver,
+        )
+        commands.append(iree)
 
-    iree_model_path = os.path.join(self._base_dir, "models", "iree", driver,
-                                   self._model_name + "_padfuse.vmfb")
-    iree = IreeWrapper(self._iree_benchmark_binary_path,
-                       self._model_name + "_padfuse",
-                       iree_model_path,
-                       self._function_input,
-                       driver=driver)
-    commands.append(iree)
-    return commands
+        iree_model_path = os.path.join(
+            self._base_dir, "models", "iree", driver, self._model_name + "_padfuse.vmfb"
+        )
+        iree = IreeWrapper(
+            self._iree_benchmark_binary_path,
+            self._model_name + "_padfuse",
+            iree_model_path,
+            self._function_input,
+            driver=driver,
+        )
+        commands.append(iree)
+        return commands
diff --git a/build_tools/benchmarks/diff_local_benchmarks.py b/build_tools/benchmarks/diff_local_benchmarks.py
index 6b458d7..eeb4333 100755
--- a/build_tools/benchmarks/diff_local_benchmarks.py
+++ b/build_tools/benchmarks/diff_local_benchmarks.py
@@ -29,89 +29,100 @@
     target_benchmark_file: Optional[pathlib.Path],
     base_compile_stats_file: Optional[pathlib.Path],
     target_compile_stats_file: Optional[pathlib.Path],
-    verbose: bool = False) -> str:
-  """Gets the full markdown summary of all benchmarks in files."""
-  base_benchmarks = {}
-  target_benchmarks = {}
-  base_compilation_metrics = {}
-  target_compilation_metrics = {}
-  if base_benchmark_file and target_benchmark_file:
-    base_benchmarks = aggregate_all_benchmarks([base_benchmark_file])
-    target_benchmarks = aggregate_all_benchmarks([target_benchmark_file])
-  if base_compile_stats_file and target_compile_stats_file:
-    base_compilation_metrics = collect_all_compilation_metrics(
-        [base_compile_stats_file])
-    target_compilation_metrics = collect_all_compilation_metrics(
-        [target_compile_stats_file])
+    verbose: bool = False,
+) -> str:
+    """Gets the full markdown summary of all benchmarks in files."""
+    base_benchmarks = {}
+    target_benchmarks = {}
+    base_compilation_metrics = {}
+    target_compilation_metrics = {}
+    if base_benchmark_file and target_benchmark_file:
+        base_benchmarks = aggregate_all_benchmarks([base_benchmark_file])
+        target_benchmarks = aggregate_all_benchmarks([target_benchmark_file])
+    if base_compile_stats_file and target_compile_stats_file:
+        base_compilation_metrics = collect_all_compilation_metrics(
+            [base_compile_stats_file]
+        )
+        target_compilation_metrics = collect_all_compilation_metrics(
+            [target_compile_stats_file]
+        )
 
-  # Update the target benchmarks with their corresponding base numbers.
-  for bench in base_benchmarks:
-    if bench in target_benchmarks:
-      target_benchmarks[bench].base_mean_time = base_benchmarks[bench].mean_time
+    # Update the target benchmarks with their corresponding base numbers.
+    for bench in base_benchmarks:
+        if bench in target_benchmarks:
+            target_benchmarks[bench].base_mean_time = base_benchmarks[bench].mean_time
 
-  for target_name, base_metrics in base_compilation_metrics.items():
-    updated_metrics = base_metrics
-    for mapper in COMPILATION_METRICS_TO_TABLE_MAPPERS:
-      metric_key = mapper.get_series_name(target_name)
-      base_value, _ = mapper.get_current_and_base_value(base_metrics)
-      updated_metrics = mapper.update_base_value(updated_metrics, base_value)
-    target_compilation_metrics[target_name] = updated_metrics
+    for target_name, base_metrics in base_compilation_metrics.items():
+        updated_metrics = base_metrics
+        for mapper in COMPILATION_METRICS_TO_TABLE_MAPPERS:
+            metric_key = mapper.get_series_name(target_name)
+            base_value, _ = mapper.get_current_and_base_value(base_metrics)
+            updated_metrics = mapper.update_base_value(updated_metrics, base_value)
+        target_compilation_metrics[target_name] = updated_metrics
 
-  # Compose the full benchmark tables.
-  full_table = [md.header("Full Benchmark Summary", 2)]
-  full_table.append(categorize_benchmarks_into_tables(target_benchmarks))
+    # Compose the full benchmark tables.
+    full_table = [md.header("Full Benchmark Summary", 2)]
+    full_table.append(categorize_benchmarks_into_tables(target_benchmarks))
 
-  # Compose the full compilation metrics tables.
-  full_table.append(
-      categorize_compilation_metrics_into_tables(target_compilation_metrics))
+    # Compose the full compilation metrics tables.
+    full_table.append(
+        categorize_compilation_metrics_into_tables(target_compilation_metrics)
+    )
 
-  return "\n\n".join(full_table)
+    return "\n\n".join(full_table)
 
 
 def parse_arguments():
-  """Parses command-line options."""
+    """Parses command-line options."""
 
-  def check_file_path(path):
-    path = pathlib.Path(path)
-    if path.is_file():
-      return path
-    else:
-      raise ValueError(path)
+    def check_file_path(path):
+        path = pathlib.Path(path)
+        if path.is_file():
+            return path
+        else:
+            raise ValueError(path)
 
-  parser = argparse.ArgumentParser()
-  parser.add_argument("--base",
-                      type=check_file_path,
-                      help="Base benchmark results")
-  parser.add_argument("--target",
-                      type=check_file_path,
-                      help="Target benchmark results")
-  parser.add_argument("--base-compile-stats",
-                      type=check_file_path,
-                      help="Base compilation statistics")
-  parser.add_argument("--target-compile-stats",
-                      type=check_file_path,
-                      help="Target compilation statistics")
-  parser.add_argument("--verbose",
-                      action="store_true",
-                      help="Print internal information during execution")
-  args = parser.parse_args()
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--base", type=check_file_path, help="Base benchmark results")
+    parser.add_argument(
+        "--target", type=check_file_path, help="Target benchmark results"
+    )
+    parser.add_argument(
+        "--base-compile-stats", type=check_file_path, help="Base compilation statistics"
+    )
+    parser.add_argument(
+        "--target-compile-stats",
+        type=check_file_path,
+        help="Target compilation statistics",
+    )
+    parser.add_argument(
+        "--verbose",
+        action="store_true",
+        help="Print internal information during execution",
+    )
+    args = parser.parse_args()
 
-  return args
+    return args
 
 
 if __name__ == "__main__":
-  args = parse_arguments()
-  if args.base or args.target:
-    if not args.base or not args.target:
-      raise ValueError("--base and --target must be used together.")
-  if args.base_compile_stats or args.target_compile_stats:
-    if not args.base_compile_stats or not args.target_compile_stats:
-      raise ValueError("--base-compile-stats and --target-compile-stats must "
-                       "be used together.")
+    args = parse_arguments()
+    if args.base or args.target:
+        if not args.base or not args.target:
+            raise ValueError("--base and --target must be used together.")
+    if args.base_compile_stats or args.target_compile_stats:
+        if not args.base_compile_stats or not args.target_compile_stats:
+            raise ValueError(
+                "--base-compile-stats and --target-compile-stats must "
+                "be used together."
+            )
 
-  print(
-      get_benchmark_result_markdown(args.base,
-                                    args.target,
-                                    args.base_compile_stats,
-                                    args.target_compile_stats,
-                                    verbose=args.verbose))
+    print(
+        get_benchmark_result_markdown(
+            args.base,
+            args.target,
+            args.base_compile_stats,
+            args.target_compile_stats,
+            verbose=args.verbose,
+        )
+    )
diff --git a/build_tools/benchmarks/export_benchmark_config.py b/build_tools/benchmarks/export_benchmark_config.py
index 2fbf270..1d0853b 100755
--- a/build_tools/benchmarks/export_benchmark_config.py
+++ b/build_tools/benchmarks/export_benchmark_config.py
@@ -47,172 +47,181 @@
 
 PresetMatcher = Callable[[Any], bool]
 EXECUTION_BENCHMARK_PRESET_MATCHERS: Dict[str, PresetMatcher] = {
-    "x86_64":
-        lambda config: (benchmark_tags.X86_64 in config.tags and benchmark_tags.
-                        LARGE not in config.tags),
-    "x86_64-large":
-        lambda config: (benchmark_tags.X86_64 in config.tags and benchmark_tags.
-                        LARGE in config.tags),
-    "cuda":
-        lambda config: (benchmark_tags.CUDA in config.tags and benchmark_tags.
-                        LARGE not in config.tags),
-    "cuda-large":
-        lambda config: (benchmark_tags.CUDA in config.tags and benchmark_tags.
-                        LARGE in config.tags),
-    "vulkan-nvidia":
-        lambda config: benchmark_tags.VULKAN_NVIDIA in config.tags,
-    "android-cpu":
-        lambda config:
-        (config.target_device_spec.architecture.type == common_definitions.
-         ArchitectureType.CPU and config.target_device_spec.host_environment.
-         platform == "android"),
-    "android-gpu":
-        lambda config:
-        (config.target_device_spec.architecture.type == common_definitions.
-         ArchitectureType.GPU and config.target_device_spec.host_environment.
-         platform == "android"),
+    "x86_64": lambda config: (
+        benchmark_tags.X86_64 in config.tags and benchmark_tags.LARGE not in config.tags
+    ),
+    "x86_64-large": lambda config: (
+        benchmark_tags.X86_64 in config.tags and benchmark_tags.LARGE in config.tags
+    ),
+    "cuda": lambda config: (
+        benchmark_tags.CUDA in config.tags and benchmark_tags.LARGE not in config.tags
+    ),
+    "cuda-large": lambda config: (
+        benchmark_tags.CUDA in config.tags and benchmark_tags.LARGE in config.tags
+    ),
+    "vulkan-nvidia": lambda config: benchmark_tags.VULKAN_NVIDIA in config.tags,
+    "android-cpu": lambda config: (
+        config.target_device_spec.architecture.type
+        == common_definitions.ArchitectureType.CPU
+        and config.target_device_spec.host_environment.platform == "android"
+    ),
+    "android-gpu": lambda config: (
+        config.target_device_spec.architecture.type
+        == common_definitions.ArchitectureType.GPU
+        and config.target_device_spec.host_environment.platform == "android"
+    ),
 }
 
 COMPILATION_BENCHMARK_PRESET_MATCHERS: Dict[str, PresetMatcher] = {
-    "comp-stats":
-        lambda gen_config: benchmark_tags.LARGE not in gen_config.tags,
-    "comp-stats-large":
-        lambda gen_config: benchmark_tags.LARGE in gen_config.tags,
+    "comp-stats": lambda gen_config: benchmark_tags.LARGE not in gen_config.tags,
+    "comp-stats-large": lambda gen_config: benchmark_tags.LARGE in gen_config.tags,
 }
 
 
 def filter_and_group_run_configs(
     run_configs: List[iree_definitions.E2EModelRunConfig],
     target_device_names: Optional[Set[str]] = None,
-    preset_matchers: Optional[Sequence[PresetMatcher]] = None
+    preset_matchers: Optional[Sequence[PresetMatcher]] = None,
 ) -> Dict[str, List[iree_definitions.E2EModelRunConfig]]:
-  """Filters run configs and groups by target device name.
-  
-  Args:
-    run_configs: source e2e model run configs.
-    target_device_names: list of target device names, includes all if not set.
-    preset_matchers: list of preset matcher, matches all if not set.
+    """Filters run configs and groups by target device name.
 
-  Returns:
-    A map of e2e model run configs keyed by target device name.
-  """
-  grouped_run_config_map = collections.defaultdict(list)
+    Args:
+      run_configs: source e2e model run configs.
+      target_device_names: list of target device names, includes all if not set.
+      preset_matchers: list of preset matcher, matches all if not set.
 
-  for run_config in run_configs:
-    device_name = run_config.target_device_spec.device_name
-    if (target_device_names is not None and
-        device_name not in target_device_names):
-      continue
-    if (preset_matchers is not None and
-        not any(matcher(run_config) for matcher in preset_matchers)):
-      continue
-    grouped_run_config_map[device_name].append(run_config)
+    Returns:
+      A map of e2e model run configs keyed by target device name.
+    """
+    grouped_run_config_map = collections.defaultdict(list)
 
-  return grouped_run_config_map
+    for run_config in run_configs:
+        device_name = run_config.target_device_spec.device_name
+        if target_device_names is not None and device_name not in target_device_names:
+            continue
+        if preset_matchers is not None and not any(
+            matcher(run_config) for matcher in preset_matchers
+        ):
+            continue
+        grouped_run_config_map[device_name].append(run_config)
+
+    return grouped_run_config_map
 
 
 def _get_distinct_module_dir_paths(
-    module_generation_configs: Iterable[
-        iree_definitions.ModuleGenerationConfig],
-    root_path: pathlib.PurePath = pathlib.PurePath()
+    module_generation_configs: Iterable[iree_definitions.ModuleGenerationConfig],
+    root_path: pathlib.PurePath = pathlib.PurePath(),
 ) -> List[str]:
-  module_dir_paths = (str(
-      iree_artifacts.get_module_dir_path(config, root_path=root_path))
-                      for config in module_generation_configs)
-  return sorted(set(module_dir_paths))
+    module_dir_paths = (
+        str(iree_artifacts.get_module_dir_path(config, root_path=root_path))
+        for config in module_generation_configs
+    )
+    return sorted(set(module_dir_paths))
 
 
 def _export_execution_handler(
     benchmark_presets: Optional[Sequence[PresetMatcher]] = None,
     target_device_names: Optional[Sequence[str]] = None,
-    **_unused_args):
-  _, all_run_configs = benchmark_collections.generate_benchmarks()
-  target_device_name_set = (None if target_device_names is None else
-                            set(target_device_names))
-  grouped_run_config_map = filter_and_group_run_configs(
-      all_run_configs,
-      target_device_names=target_device_name_set,
-      preset_matchers=benchmark_presets)
+    **_unused_args,
+):
+    _, all_run_configs = benchmark_collections.generate_benchmarks()
+    target_device_name_set = (
+        None if target_device_names is None else set(target_device_names)
+    )
+    grouped_run_config_map = filter_and_group_run_configs(
+        all_run_configs,
+        target_device_names=target_device_name_set,
+        preset_matchers=benchmark_presets,
+    )
 
-  output_map = {}
-  for device_name, run_configs in grouped_run_config_map.items():
-    host_environments = set(run_config.target_device_spec.host_environment
-                            for run_config in run_configs)
-    if len(host_environments) > 1:
-      raise ValueError(
-          "Device specs of the same device should have the same host environment."
-      )
-    host_environment = host_environments.pop()
+    output_map = {}
+    for device_name, run_configs in grouped_run_config_map.items():
+        host_environments = set(
+            run_config.target_device_spec.host_environment for run_config in run_configs
+        )
+        if len(host_environments) > 1:
+            raise ValueError(
+                "Device specs of the same device should have the same host environment."
+            )
+        host_environment = host_environments.pop()
 
-    distinct_module_dir_paths = _get_distinct_module_dir_paths(
-        config.module_generation_config for config in run_configs)
+        distinct_module_dir_paths = _get_distinct_module_dir_paths(
+            config.module_generation_config for config in run_configs
+        )
 
-    output_map[device_name] = {
-        "host_environment": dataclasses.asdict(host_environment),
-        "module_dir_paths": distinct_module_dir_paths,
-        "run_configs": serialization.serialize_and_pack(run_configs),
-    }
+        output_map[device_name] = {
+            "host_environment": dataclasses.asdict(host_environment),
+            "module_dir_paths": distinct_module_dir_paths,
+            "run_configs": serialization.serialize_and_pack(run_configs),
+        }
 
-  return output_map
+    return output_map
 
 
 def _export_compilation_handler(
-    benchmark_presets: Optional[Sequence[PresetMatcher]] = None,
-    **_unused_args):
-  all_gen_configs, _ = benchmark_collections.generate_benchmarks()
-  compile_stats_gen_configs = [
-      config for config in all_gen_configs
-      if benchmark_tags.COMPILE_STATS in config.compile_config.tags
-  ]
-
-  if benchmark_presets is not None:
-    match_predicate = lambda gen_config: any(
-        matcher(gen_config) for matcher in benchmark_presets)
+    benchmark_presets: Optional[Sequence[PresetMatcher]] = None, **_unused_args
+):
+    all_gen_configs, _ = benchmark_collections.generate_benchmarks()
     compile_stats_gen_configs = [
-        gen_config for gen_config in compile_stats_gen_configs
-        if match_predicate(gen_config)
+        config
+        for config in all_gen_configs
+        if benchmark_tags.COMPILE_STATS in config.compile_config.tags
     ]
 
-  distinct_module_dir_paths = _get_distinct_module_dir_paths(
-      compile_stats_gen_configs)
+    if benchmark_presets is not None:
+        match_predicate = lambda gen_config: any(
+            matcher(gen_config) for matcher in benchmark_presets
+        )
+        compile_stats_gen_configs = [
+            gen_config
+            for gen_config in compile_stats_gen_configs
+            if match_predicate(gen_config)
+        ]
 
-  return {
-      "module_dir_paths":
-          distinct_module_dir_paths,
-      "generation_configs":
-          serialization.serialize_and_pack(compile_stats_gen_configs)
-  }
+    distinct_module_dir_paths = _get_distinct_module_dir_paths(
+        compile_stats_gen_configs
+    )
+
+    return {
+        "module_dir_paths": distinct_module_dir_paths,
+        "generation_configs": serialization.serialize_and_pack(
+            compile_stats_gen_configs
+        ),
+    }
 
 
 def _parse_and_strip_list_argument(arg: str) -> List[str]:
-  return [part.strip() for part in arg.split(",") if part != ""]
+    return [part.strip() for part in arg.split(",") if part != ""]
 
 
 def _parse_benchmark_presets(
-    arg: str, matcher_map: Dict[str, PresetMatcher]) -> List[PresetMatcher]:
-  matchers = []
-  for preset in _parse_and_strip_list_argument(arg):
-    matcher = matcher_map.get(preset)
-    if matcher is None:
-      raise argparse.ArgumentTypeError(
-          f"Unrecognized benchmark preset: '{preset}'.")
-    matchers.append(matcher)
-  return matchers
+    arg: str, matcher_map: Dict[str, PresetMatcher]
+) -> List[PresetMatcher]:
+    matchers = []
+    for preset in _parse_and_strip_list_argument(arg):
+        matcher = matcher_map.get(preset)
+        if matcher is None:
+            raise argparse.ArgumentTypeError(
+                f"Unrecognized benchmark preset: '{preset}'."
+            )
+        matchers.append(matcher)
+    return matchers
 
 
 def _parse_arguments():
-  """Parses command-line options."""
+    """Parses command-line options."""
 
-  # Makes global options come *after* command.
-  # See https://stackoverflow.com/q/23296695
-  subparser_base = argparse.ArgumentParser(add_help=False)
-  subparser_base.add_argument("--output",
-                              type=pathlib.Path,
-                              help="Path to write the JSON output.")
+    # Makes global options come *after* command.
+    # See https://stackoverflow.com/q/23296695
+    subparser_base = argparse.ArgumentParser(add_help=False)
+    subparser_base.add_argument(
+        "--output", type=pathlib.Path, help="Path to write the JSON output."
+    )
 
-  parser = argparse.ArgumentParser(
-      formatter_class=argparse.RawDescriptionHelpFormatter,
-      description=textwrap.dedent("""
+    parser = argparse.ArgumentParser(
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+        description=textwrap.dedent(
+            """
       Export type: "execution" outputs:
       [
         <target device name>: {
@@ -231,53 +240,70 @@
       }
       of generation configs defined for compilation statistics, to be used in
       build_tools/benchmarks/collect_compilation_statistics.py
-      """))
+      """
+        ),
+    )
 
-  subparser = parser.add_subparsers(required=True, title="export type")
-  execution_parser = subparser.add_parser(
-      "execution",
-      parents=[subparser_base],
-      help="Export execution config to run benchmarks.")
-  execution_parser.set_defaults(handler=_export_execution_handler)
-  execution_parser.add_argument(
-      "--target_device_names",
-      type=_parse_and_strip_list_argument,
-      help=("Target device names, separated by comma, not specified means "
-            "including all devices."))
-  execution_parser.add_argument(
-      "--benchmark_presets",
-      type=lambda arg: _parse_benchmark_presets(
-          arg, EXECUTION_BENCHMARK_PRESET_MATCHERS),
-      help=("Presets that select a bundle of benchmarks, separated by comma, "
+    subparser = parser.add_subparsers(required=True, title="export type")
+    execution_parser = subparser.add_parser(
+        "execution",
+        parents=[subparser_base],
+        help="Export execution config to run benchmarks.",
+    )
+    execution_parser.set_defaults(handler=_export_execution_handler)
+    execution_parser.add_argument(
+        "--target_device_names",
+        type=_parse_and_strip_list_argument,
+        help=(
+            "Target device names, separated by comma, not specified means "
+            "including all devices."
+        ),
+    )
+    execution_parser.add_argument(
+        "--benchmark_presets",
+        type=lambda arg: _parse_benchmark_presets(
+            arg, EXECUTION_BENCHMARK_PRESET_MATCHERS
+        ),
+        help=(
+            "Presets that select a bundle of benchmarks, separated by comma, "
             "multiple presets will be union. Available options: "
-            f"{','.join(EXECUTION_BENCHMARK_PRESET_MATCHERS.keys())}"))
+            f"{','.join(EXECUTION_BENCHMARK_PRESET_MATCHERS.keys())}"
+        ),
+    )
 
-  compilation_parser = subparser.add_parser(
-      "compilation",
-      parents=[subparser_base],
-      help=("Export serialized list of module generation configs defined for "
-            "compilation statistics."))
-  compilation_parser.set_defaults(handler=_export_compilation_handler)
-  compilation_parser.add_argument(
-      "--benchmark_presets",
-      type=lambda arg: _parse_benchmark_presets(
-          arg, COMPILATION_BENCHMARK_PRESET_MATCHERS),
-      help=("Presets `comp-stats*` that select a bundle of compilation"
+    compilation_parser = subparser.add_parser(
+        "compilation",
+        parents=[subparser_base],
+        help=(
+            "Export serialized list of module generation configs defined for "
+            "compilation statistics."
+        ),
+    )
+    compilation_parser.set_defaults(handler=_export_compilation_handler)
+    compilation_parser.add_argument(
+        "--benchmark_presets",
+        type=lambda arg: _parse_benchmark_presets(
+            arg, COMPILATION_BENCHMARK_PRESET_MATCHERS
+        ),
+        help=(
+            "Presets `comp-stats*` that select a bundle of compilation"
             " benchmarks, separated by comma, multiple presets will be union."
             " Available options: "
-            f"{','.join(COMPILATION_BENCHMARK_PRESET_MATCHERS.keys())}"))
+            f"{','.join(COMPILATION_BENCHMARK_PRESET_MATCHERS.keys())}"
+        ),
+    )
 
-  return parser.parse_args()
+    return parser.parse_args()
 
 
 def main(args: argparse.Namespace):
-  output_obj = args.handler(**vars(args))
-  json_data = json.dumps(output_obj, indent=2)
-  if args.output is None:
-    print(json_data)
-  else:
-    args.output.write_text(json_data)
+    output_obj = args.handler(**vars(args))
+    json_data = json.dumps(output_obj, indent=2)
+    if args.output is None:
+        print(json_data)
+    else:
+        args.output.write_text(json_data)
 
 
 if __name__ == "__main__":
-  main(_parse_arguments())
+    main(_parse_arguments())
diff --git a/build_tools/benchmarks/export_benchmark_config_test.py b/build_tools/benchmarks/export_benchmark_config_test.py
index 2ab342d..b186301 100644
--- a/build_tools/benchmarks/export_benchmark_config_test.py
+++ b/build_tools/benchmarks/export_benchmark_config_test.py
@@ -17,7 +17,8 @@
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     source_url="",
     entry_function="predict",
-    input_types=["1xf32"])
+    input_types=["1xf32"],
+)
 COMMON_GEN_CONFIG = iree_definitions.ModuleGenerationConfig.build(
     imported_model=iree_definitions.ImportedModel.from_model(COMMON_MODEL),
     compile_config=iree_definitions.CompileConfig.build(
@@ -26,221 +27,273 @@
         compile_targets=[
             iree_definitions.CompileTarget(
                 target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-                target_architecture=common_definitions.DeviceArchitecture.
-                RV64_GENERIC,
-                target_abi=iree_definitions.TargetABI.LINUX_GNU)
-        ]))
+                target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+                target_abi=iree_definitions.TargetABI.LINUX_GNU,
+            )
+        ],
+    ),
+)
 COMMON_EXEC_CONFIG = iree_definitions.ModuleExecutionConfig.build(
     id="exec",
     tags=[],
     loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-    driver=iree_definitions.RuntimeDriver.LOCAL_SYNC)
+    driver=iree_definitions.RuntimeDriver.LOCAL_SYNC,
+)
 
 
 class ExportBenchmarkConfigTest(unittest.TestCase):
+    def test_filter_and_group_run_configs_set_all_filters(self):
+        device_spec_a = common_definitions.DeviceSpec.build(
+            id="dev_a_cpu",
+            device_name="dev_a_cpu",
+            architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            tags=[],
+        )
+        device_spec_b = common_definitions.DeviceSpec.build(
+            id="dev_a_gpu",
+            device_name="dev_a_gpu",
+            architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            tags=[],
+        )
+        device_spec_c = common_definitions.DeviceSpec.build(
+            id="dev_c",
+            device_name="dev_c",
+            architecture=common_definitions.DeviceArchitecture.CUDA_SM80,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+            tags=[],
+        )
+        matched_run_config_a = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=COMMON_GEN_CONFIG,
+            module_execution_config=COMMON_EXEC_CONFIG,
+            target_device_spec=device_spec_a,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        unmatched_run_config_b = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=COMMON_GEN_CONFIG,
+            module_execution_config=COMMON_EXEC_CONFIG,
+            target_device_spec=device_spec_b,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        matched_run_config_c = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=COMMON_GEN_CONFIG,
+            module_execution_config=COMMON_EXEC_CONFIG,
+            target_device_spec=device_spec_c,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        matchers = [
+            (
+                lambda config: config.target_device_spec.architecture.architecture
+                == "cuda"
+            ),
+            (
+                lambda config: config.target_device_spec.host_environment.platform
+                == "android"
+            ),
+        ]
 
-  def test_filter_and_group_run_configs_set_all_filters(self):
-    device_spec_a = common_definitions.DeviceSpec.build(
-        id="dev_a_cpu",
-        device_name="dev_a_cpu",
-        architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        tags=[])
-    device_spec_b = common_definitions.DeviceSpec.build(
-        id="dev_a_gpu",
-        device_name="dev_a_gpu",
-        architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        tags=[])
-    device_spec_c = common_definitions.DeviceSpec.build(
-        id="dev_c",
-        device_name="dev_c",
-        architecture=common_definitions.DeviceArchitecture.CUDA_SM80,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
-        tags=[])
-    matched_run_config_a = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=COMMON_GEN_CONFIG,
-        module_execution_config=COMMON_EXEC_CONFIG,
-        target_device_spec=device_spec_a,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    unmatched_run_config_b = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=COMMON_GEN_CONFIG,
-        module_execution_config=COMMON_EXEC_CONFIG,
-        target_device_spec=device_spec_b,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    matched_run_config_c = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=COMMON_GEN_CONFIG,
-        module_execution_config=COMMON_EXEC_CONFIG,
-        target_device_spec=device_spec_c,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    matchers = [(lambda config: config.target_device_spec.architecture.
-                 architecture == "cuda"),
-                (lambda config: config.target_device_spec.host_environment.
-                 platform == "android")]
+        run_config_map = export_benchmark_config.filter_and_group_run_configs(
+            run_configs=[
+                matched_run_config_a,
+                unmatched_run_config_b,
+                matched_run_config_c,
+            ],
+            target_device_names={"dev_a_cpu", "dev_c"},
+            preset_matchers=matchers,
+        )
 
-    run_config_map = export_benchmark_config.filter_and_group_run_configs(
-        run_configs=[
-            matched_run_config_a, unmatched_run_config_b, matched_run_config_c
-        ],
-        target_device_names={"dev_a_cpu", "dev_c"},
-        preset_matchers=matchers)
+        self.assertEqual(
+            run_config_map,
+            {
+                "dev_a_cpu": [matched_run_config_a],
+                "dev_c": [matched_run_config_c],
+            },
+        )
 
-    self.assertEqual(run_config_map, {
-        "dev_a_cpu": [matched_run_config_a],
-        "dev_c": [matched_run_config_c],
-    })
+    def test_filter_and_group_run_configs_include_all(self):
+        device_spec_a = common_definitions.DeviceSpec.build(
+            id="dev_a_cpu",
+            device_name="dev_a_cpu",
+            architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            tags=[],
+        )
+        device_spec_b = common_definitions.DeviceSpec.build(
+            id="dev_a_gpu",
+            device_name="dev_a_gpu",
+            architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            tags=[],
+        )
+        device_spec_c = common_definitions.DeviceSpec.build(
+            id="dev_a_second_gpu",
+            device_name="dev_a_gpu",
+            architecture=common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            tags=[],
+        )
+        run_config_a = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=COMMON_GEN_CONFIG,
+            module_execution_config=COMMON_EXEC_CONFIG,
+            target_device_spec=device_spec_a,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        run_config_b = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=COMMON_GEN_CONFIG,
+            module_execution_config=COMMON_EXEC_CONFIG,
+            target_device_spec=device_spec_b,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        run_config_c = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=COMMON_GEN_CONFIG,
+            module_execution_config=COMMON_EXEC_CONFIG,
+            target_device_spec=device_spec_c,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
 
-  def test_filter_and_group_run_configs_include_all(self):
-    device_spec_a = common_definitions.DeviceSpec.build(
-        id="dev_a_cpu",
-        device_name="dev_a_cpu",
-        architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        tags=[])
-    device_spec_b = common_definitions.DeviceSpec.build(
-        id="dev_a_gpu",
-        device_name="dev_a_gpu",
-        architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        tags=[])
-    device_spec_c = common_definitions.DeviceSpec.build(
-        id="dev_a_second_gpu",
-        device_name="dev_a_gpu",
-        architecture=common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        tags=[])
-    run_config_a = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=COMMON_GEN_CONFIG,
-        module_execution_config=COMMON_EXEC_CONFIG,
-        target_device_spec=device_spec_a,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    run_config_b = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=COMMON_GEN_CONFIG,
-        module_execution_config=COMMON_EXEC_CONFIG,
-        target_device_spec=device_spec_b,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    run_config_c = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=COMMON_GEN_CONFIG,
-        module_execution_config=COMMON_EXEC_CONFIG,
-        target_device_spec=device_spec_c,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
+        run_config_map = export_benchmark_config.filter_and_group_run_configs(
+            run_configs=[run_config_a, run_config_b, run_config_c]
+        )
 
-    run_config_map = export_benchmark_config.filter_and_group_run_configs(
-        run_configs=[run_config_a, run_config_b, run_config_c])
+        self.maxDiff = 100000
 
-    self.maxDiff = 100000
+        self.assertEqual(
+            run_config_map,
+            {
+                "dev_a_cpu": [run_config_a],
+                "dev_a_gpu": [run_config_b, run_config_c],
+            },
+        )
 
-    self.assertEqual(run_config_map, {
-        "dev_a_cpu": [run_config_a],
-        "dev_a_gpu": [run_config_b, run_config_c],
-    })
+    def test_filter_and_group_run_configs_set_target_device_names(self):
+        device_spec_a = common_definitions.DeviceSpec.build(
+            id="dev_a",
+            device_name="dev_a",
+            architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            tags=[],
+        )
+        device_spec_b = common_definitions.DeviceSpec.build(
+            id="dev_b",
+            device_name="dev_b",
+            architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            tags=[],
+        )
+        run_config_a = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=COMMON_GEN_CONFIG,
+            module_execution_config=COMMON_EXEC_CONFIG,
+            target_device_spec=device_spec_a,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        run_config_b = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=COMMON_GEN_CONFIG,
+            module_execution_config=COMMON_EXEC_CONFIG,
+            target_device_spec=device_spec_b,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
 
-  def test_filter_and_group_run_configs_set_target_device_names(self):
-    device_spec_a = common_definitions.DeviceSpec.build(
-        id="dev_a",
-        device_name="dev_a",
-        architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        tags=[])
-    device_spec_b = common_definitions.DeviceSpec.build(
-        id="dev_b",
-        device_name="dev_b",
-        architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        tags=[])
-    run_config_a = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=COMMON_GEN_CONFIG,
-        module_execution_config=COMMON_EXEC_CONFIG,
-        target_device_spec=device_spec_a,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    run_config_b = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=COMMON_GEN_CONFIG,
-        module_execution_config=COMMON_EXEC_CONFIG,
-        target_device_spec=device_spec_b,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
+        run_config_map = export_benchmark_config.filter_and_group_run_configs(
+            run_configs=[run_config_a, run_config_b],
+            target_device_names={"dev_a", "dev_b"},
+        )
 
-    run_config_map = export_benchmark_config.filter_and_group_run_configs(
-        run_configs=[run_config_a, run_config_b],
-        target_device_names={"dev_a", "dev_b"})
+        self.assertEqual(
+            run_config_map,
+            {
+                "dev_a": [run_config_a],
+                "dev_b": [run_config_b],
+            },
+        )
 
-    self.assertEqual(run_config_map, {
-        "dev_a": [run_config_a],
-        "dev_b": [run_config_b],
-    })
+    def test_filter_and_group_run_configs_set_preset_matchers(self):
+        small_model = common_definitions.Model(
+            id="small_model",
+            name="small_model",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="",
+            entry_function="predict",
+            input_types=["1xf32"],
+        )
+        big_model = common_definitions.Model(
+            id="big_model",
+            name="big_model",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="",
+            entry_function="predict",
+            input_types=["1xf32"],
+        )
+        compile_target = iree_definitions.CompileTarget(
+            target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+            target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            target_abi=iree_definitions.TargetABI.LINUX_GNU,
+        )
+        compile_config = iree_definitions.CompileConfig.build(
+            id="1", tags=[], compile_targets=[compile_target]
+        )
+        small_gen_config = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=iree_definitions.ImportedModel.from_model(small_model),
+            compile_config=compile_config,
+        )
+        big_gen_config = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=iree_definitions.ImportedModel.from_model(big_model),
+            compile_config=compile_config,
+        )
+        device_spec_a = common_definitions.DeviceSpec.build(
+            id="dev_a",
+            device_name="dev_a",
+            architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            tags=[],
+        )
+        device_spec_b = common_definitions.DeviceSpec.build(
+            id="dev_b",
+            device_name="dev_b",
+            architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            tags=[],
+        )
+        run_config_a = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=small_gen_config,
+            module_execution_config=COMMON_EXEC_CONFIG,
+            target_device_spec=device_spec_a,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
+        run_config_b = iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=big_gen_config,
+            module_execution_config=COMMON_EXEC_CONFIG,
+            target_device_spec=device_spec_b,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
+        )
 
-  def test_filter_and_group_run_configs_set_preset_matchers(self):
-    small_model = common_definitions.Model(
-        id="small_model",
-        name="small_model",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="",
-        entry_function="predict",
-        input_types=["1xf32"])
-    big_model = common_definitions.Model(
-        id="big_model",
-        name="big_model",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="",
-        entry_function="predict",
-        input_types=["1xf32"])
-    compile_target = iree_definitions.CompileTarget(
-        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-        target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        target_abi=iree_definitions.TargetABI.LINUX_GNU)
-    compile_config = iree_definitions.CompileConfig.build(
-        id="1", tags=[], compile_targets=[compile_target])
-    small_gen_config = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=iree_definitions.ImportedModel.from_model(small_model),
-        compile_config=compile_config)
-    big_gen_config = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=iree_definitions.ImportedModel.from_model(big_model),
-        compile_config=compile_config)
-    device_spec_a = common_definitions.DeviceSpec.build(
-        id="dev_a",
-        device_name="dev_a",
-        architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        tags=[])
-    device_spec_b = common_definitions.DeviceSpec.build(
-        id="dev_b",
-        device_name="dev_b",
-        architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        tags=[])
-    run_config_a = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=small_gen_config,
-        module_execution_config=COMMON_EXEC_CONFIG,
-        target_device_spec=device_spec_a,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
-    run_config_b = iree_definitions.E2EModelRunConfig.build(
-        module_generation_config=big_gen_config,
-        module_execution_config=COMMON_EXEC_CONFIG,
-        target_device_spec=device_spec_b,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        tool=iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE)
+        run_config_map = export_benchmark_config.filter_and_group_run_configs(
+            run_configs=[run_config_a, run_config_b],
+            preset_matchers=[
+                lambda config: config.module_generation_config.imported_model.model.id
+                == "small_model"
+            ],
+        )
 
-    run_config_map = export_benchmark_config.filter_and_group_run_configs(
-        run_configs=[run_config_a, run_config_b],
-        preset_matchers=[
-            lambda config: config.module_generation_config.imported_model.model.
-            id == "small_model"
-        ])
-
-    self.assertEqual(run_config_map, {
-        "dev_a": [run_config_a],
-    })
+        self.assertEqual(
+            run_config_map,
+            {
+                "dev_a": [run_config_a],
+            },
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/benchmarks/generate_benchmark_comment.py b/build_tools/benchmarks/generate_benchmark_comment.py
index 4e2ab34..92f3263 100755
--- a/build_tools/benchmarks/generate_benchmark_comment.py
+++ b/build_tools/benchmarks/generate_benchmark_comment.py
@@ -28,7 +28,7 @@
 
 GITHUB_IREE_REPO_PREFIX = "https://github.com/openxla/iree"
 IREE_DASHBOARD_URL = "https://perf.iree.dev/apis/v2"
-IREE_PROJECT_ID = 'IREE'
+IREE_PROJECT_ID = "IREE"
 # The maximal numbers of trials when querying base commit benchmark results.
 MAX_BASE_COMMIT_QUERY_COUNT = 10
 # The max number of rows to show per table.
@@ -38,303 +38,340 @@
 
 @dataclasses.dataclass(frozen=True)
 class CommentDef(object):
-  title: str
-  type_id: str
+    title: str
+    type_id: str
 
 
 # Map from comment type to comment definition.
 COMMENT_DEF_MAP = {
-    "android-benchmark-summary":
-        CommentDef(title="Abbreviated Android Benchmark Summary",
-                   type_id="bf8cdf94-a992-466d-b11c-778cbd805a22"),
-    "linux-benchmark-summary":
-        CommentDef(title="Abbreviated Linux Benchmark Summary",
-                   type_id="37549014-3c67-4e74-8d88-8e929231abe3"),
-    "benchmark-summary":
-        CommentDef(title="Abbreviated Benchmark Summary",
-                   type_id="5b42cbfe-26a0-4164-a51c-07f06762e2dc")
+    "android-benchmark-summary": CommentDef(
+        title="Abbreviated Android Benchmark Summary",
+        type_id="bf8cdf94-a992-466d-b11c-778cbd805a22",
+    ),
+    "linux-benchmark-summary": CommentDef(
+        title="Abbreviated Linux Benchmark Summary",
+        type_id="37549014-3c67-4e74-8d88-8e929231abe3",
+    ),
+    "benchmark-summary": CommentDef(
+        title="Abbreviated Benchmark Summary",
+        type_id="5b42cbfe-26a0-4164-a51c-07f06762e2dc",
+    ),
 }
 
 
 def get_git_total_commit_count(commit: str, verbose: bool = False) -> int:
-  """Gets the total commit count in history ending with the given commit."""
-  # TODO(#11703): Should use --first-parent here. See issue for the required
-  # work.
-  count = benchmark_definition.execute_cmd_and_get_stdout(
-      ['git', 'rev-list', '--count', commit],
-      cwd=THIS_DIRECTORY,
-      verbose=verbose)
-  return int(count)
+    """Gets the total commit count in history ending with the given commit."""
+    # TODO(#11703): Should use --first-parent here. See issue for the required
+    # work.
+    count = benchmark_definition.execute_cmd_and_get_stdout(
+        ["git", "rev-list", "--count", commit], cwd=THIS_DIRECTORY, verbose=verbose
+    )
+    return int(count)
 
 
-def get_from_dashboard(url: str,
-                       payload: Dict[str, Any],
-                       verbose: bool = False) -> Dict[str, Dict[str, Any]]:
-  headers = {'Content-type': 'application/json'}
-  data = json.dumps(payload)
+def get_from_dashboard(
+    url: str, payload: Dict[str, Any], verbose: bool = False
+) -> Dict[str, Dict[str, Any]]:
+    headers = {"Content-type": "application/json"}
+    data = json.dumps(payload)
 
-  if verbose:
-    print(f'API request payload: {data}')
+    if verbose:
+        print(f"API request payload: {data}")
 
-  response = requests.get(url, data=data, headers=headers)
-  code = response.status_code
-  if code != 200:
-    raise requests.RequestException(
-        f'Failed to get from dashboard server with status code {code}')
+    response = requests.get(url, data=data, headers=headers)
+    code = response.status_code
+    if code != 200:
+        raise requests.RequestException(
+            f"Failed to get from dashboard server with status code {code}"
+        )
 
-  data = response.json()
-  if verbose:
-    print(f'Queried base benchmark data: {data}')
-  return data
+    data = response.json()
+    if verbose:
+        print(f"Queried base benchmark data: {data}")
+    return data
 
 
 BenchmarkQueryResults = Dict[str, Dict[str, Any]]
 
 
 def query_base_benchmark_results(
-    commit: str, verbose: bool = False) -> BenchmarkQueryResults:
-  """Queries the benchmark results for the given commit."""
-  build_id = get_git_total_commit_count(commit, verbose)
-  payload = {'projectId': IREE_PROJECT_ID, 'buildId': build_id}
-  return get_from_dashboard(f'{IREE_DASHBOARD_URL}/getBuild',
-                            payload,
-                            verbose=verbose)
+    commit: str, verbose: bool = False
+) -> BenchmarkQueryResults:
+    """Queries the benchmark results for the given commit."""
+    build_id = get_git_total_commit_count(commit, verbose)
+    payload = {"projectId": IREE_PROJECT_ID, "buildId": build_id}
+    return get_from_dashboard(
+        f"{IREE_DASHBOARD_URL}/getBuild", payload, verbose=verbose
+    )
 
 
 @dataclasses.dataclass(frozen=True)
 class ComparableBenchmarkResults(object):
-  commit_sha: str
-  benchmark_results: BenchmarkQueryResults
+    commit_sha: str
+    benchmark_results: BenchmarkQueryResults
 
 
 def _find_comparable_benchmark_results(
-    start_commit: str,
-    required_benchmark_keys: Set[str],
-    verbose: bool = False) -> Optional[ComparableBenchmarkResults]:
-  cmds = [
-      "git", "rev-list", "--first-parent",
-      f"--max-count={MAX_BASE_COMMIT_QUERY_COUNT}", start_commit
-  ]
-  output = benchmark_definition.execute_cmd_and_get_stdout(cmds,
-                                                           cwd=THIS_DIRECTORY,
-                                                           verbose=verbose)
-  previous_commits = output.splitlines()
-  # Try to query some base benchmark to diff against, from the top of the
-  # tree. Bail out if the maximal trial number is exceeded.
-  for base_commit in previous_commits:
-    base_benchmarks = query_base_benchmark_results(commit=base_commit,
-                                                   verbose=verbose)
-    base_benchmark_keys = set(base_benchmarks.keys())
-    if required_benchmark_keys <= base_benchmark_keys:
-      return ComparableBenchmarkResults(commit_sha=base_commit,
-                                        benchmark_results=base_benchmarks)
+    start_commit: str, required_benchmark_keys: Set[str], verbose: bool = False
+) -> Optional[ComparableBenchmarkResults]:
+    cmds = [
+        "git",
+        "rev-list",
+        "--first-parent",
+        f"--max-count={MAX_BASE_COMMIT_QUERY_COUNT}",
+        start_commit,
+    ]
+    output = benchmark_definition.execute_cmd_and_get_stdout(
+        cmds, cwd=THIS_DIRECTORY, verbose=verbose
+    )
+    previous_commits = output.splitlines()
+    # Try to query some base benchmark to diff against, from the top of the
+    # tree. Bail out if the maximal trial number is exceeded.
+    for base_commit in previous_commits:
+        base_benchmarks = query_base_benchmark_results(
+            commit=base_commit, verbose=verbose
+        )
+        base_benchmark_keys = set(base_benchmarks.keys())
+        if required_benchmark_keys <= base_benchmark_keys:
+            return ComparableBenchmarkResults(
+                commit_sha=base_commit, benchmark_results=base_benchmarks
+            )
 
-  return None
+    return None
 
 
 def _get_git_commit_hash(ref: str, verbose: bool = False) -> str:
-  """Gets the commit hash for the given commit."""
-  return benchmark_definition.execute_cmd_and_get_stdout(
-      ['git', 'rev-parse', ref], cwd=THIS_DIRECTORY, verbose=verbose)
+    """Gets the commit hash for the given commit."""
+    return benchmark_definition.execute_cmd_and_get_stdout(
+        ["git", "rev-parse", ref], cwd=THIS_DIRECTORY, verbose=verbose
+    )
 
 
-def _get_git_merge_base_commit(pr_commit: str,
-                               target_branch: str,
-                               verbose: bool = False) -> str:
-  return benchmark_definition.execute_cmd_and_get_stdout(
-      args=["git", "merge-base", target_branch, pr_commit],
-      cwd=THIS_DIRECTORY,
-      verbose=verbose)
+def _get_git_merge_base_commit(
+    pr_commit: str, target_branch: str, verbose: bool = False
+) -> str:
+    return benchmark_definition.execute_cmd_and_get_stdout(
+        args=["git", "merge-base", target_branch, pr_commit],
+        cwd=THIS_DIRECTORY,
+        verbose=verbose,
+    )
 
 
 def _get_benchmark_result_markdown(
-    execution_benchmarks: Dict[
-        str, benchmark_presentation.AggregateBenchmarkLatency],
+    execution_benchmarks: Dict[str, benchmark_presentation.AggregateBenchmarkLatency],
     compilation_metrics: Dict[str, benchmark_presentation.CompilationMetrics],
-    pr_url: str, build_url: str, comment_def: CommentDef,
-    commit_info_md: str) -> Tuple[str, str]:
-  """Gets the full/abbreviated markdown summary of all benchmarks in files."""
+    pr_url: str,
+    build_url: str,
+    comment_def: CommentDef,
+    commit_info_md: str,
+) -> Tuple[str, str]:
+    """Gets the full/abbreviated markdown summary of all benchmarks in files."""
 
-  pr_info = md.link("Pull request", pr_url)
-  build_info = md.link("Build", build_url)
+    pr_info = md.link("Pull request", pr_url)
+    build_info = md.link("Build", build_url)
 
-  # Compose the full benchmark tables.
-  full_table = [md.header("Full Benchmark Summary", 2)]
-  full_table.append(md.unordered_list([commit_info_md, pr_info, build_info]))
+    # Compose the full benchmark tables.
+    full_table = [md.header("Full Benchmark Summary", 2)]
+    full_table.append(md.unordered_list([commit_info_md, pr_info, build_info]))
 
-  # Compose the abbreviated benchmark tables.
-  abbr_table = [md.header(comment_def.title, 2)]
-  abbr_table.append(commit_info_md)
+    # Compose the abbreviated benchmark tables.
+    abbr_table = [md.header(comment_def.title, 2)]
+    abbr_table.append(commit_info_md)
 
-  if len(execution_benchmarks) > 0:
-    full_table.append(
-        benchmark_presentation.categorize_benchmarks_into_tables(
-            execution_benchmarks))
+    if len(execution_benchmarks) > 0:
+        full_table.append(
+            benchmark_presentation.categorize_benchmarks_into_tables(
+                execution_benchmarks
+            )
+        )
 
-    abbr_benchmarks_tables = benchmark_presentation.categorize_benchmarks_into_tables(
-        execution_benchmarks, TABLE_SIZE_CUT)
-    if len(abbr_benchmarks_tables) == 0:
-      abbr_table.append("No improved or regressed benchmarks 🏖️")
-    else:
-      abbr_table.append(abbr_benchmarks_tables)
+        abbr_benchmarks_tables = (
+            benchmark_presentation.categorize_benchmarks_into_tables(
+                execution_benchmarks, TABLE_SIZE_CUT
+            )
+        )
+        if len(abbr_benchmarks_tables) == 0:
+            abbr_table.append("No improved or regressed benchmarks 🏖️")
+        else:
+            abbr_table.append(abbr_benchmarks_tables)
 
-  # Compose the full compilation metrics tables.
-  if len(compilation_metrics) > 0:
-    full_table.append(
-        benchmark_presentation.categorize_compilation_metrics_into_tables(
-            compilation_metrics))
+    # Compose the full compilation metrics tables.
+    if len(compilation_metrics) > 0:
+        full_table.append(
+            benchmark_presentation.categorize_compilation_metrics_into_tables(
+                compilation_metrics
+            )
+        )
 
-    abbr_compilation_metrics_tables = benchmark_presentation.categorize_compilation_metrics_into_tables(
-        compilation_metrics, TABLE_SIZE_CUT)
-    if len(abbr_compilation_metrics_tables) == 0:
-      abbr_table.append("No improved or regressed compilation metrics 🏖️")
-    else:
-      abbr_table.append(abbr_compilation_metrics_tables)
+        abbr_compilation_metrics_tables = (
+            benchmark_presentation.categorize_compilation_metrics_into_tables(
+                compilation_metrics, TABLE_SIZE_CUT
+            )
+        )
+        if len(abbr_compilation_metrics_tables) == 0:
+            abbr_table.append("No improved or regressed compilation metrics 🏖️")
+        else:
+            abbr_table.append(abbr_compilation_metrics_tables)
 
-  abbr_table.append("For more information:")
-  # We don't know until a Gist is really created. Use a placeholder for now and
-  # replace later.
-  full_result_info = md.link("Full benchmark result tables",
-                             benchmark_comment.GIST_LINK_PLACEHORDER)
-  abbr_table.append(md.unordered_list([full_result_info, build_info]))
+    abbr_table.append("For more information:")
+    # We don't know until a Gist is really created. Use a placeholder for now and
+    # replace later.
+    full_result_info = md.link(
+        "Full benchmark result tables", benchmark_comment.GIST_LINK_PLACEHORDER
+    )
+    abbr_table.append(md.unordered_list([full_result_info, build_info]))
 
-  # Append the unique comment type id to help identify and update the existing
-  # comment.
-  abbr_table.append(f"<!--Comment type id: {comment_def.type_id}-->")
+    # Append the unique comment type id to help identify and update the existing
+    # comment.
+    abbr_table.append(f"<!--Comment type id: {comment_def.type_id}-->")
 
-  return "\n\n".join(full_table), "\n\n".join(abbr_table)
+    return "\n\n".join(full_table), "\n\n".join(abbr_table)
 
 
 def parse_arguments():
-  """Parses command-line options."""
+    """Parses command-line options."""
 
-  parser = argparse.ArgumentParser()
-  parser.add_argument(
-      "--benchmark_files",
-      metavar="<benchmark-json-files>",
-      default=[],
-      action="append",
-      help=("Paths to the JSON files containing benchmark results, "
-            "accepts wildcards"))
-  parser.add_argument(
-      "--compile_stats_files",
-      metavar="<compile-stats-json-files>",
-      default=[],
-      action="append",
-      help=("Paths to the JSON files containing compilation statistics, "
-            "accepts wildcards"))
-  parser.add_argument("--pr_number", required=True, type=int, help="PR number")
-  parser.add_argument("--pr_committish",
-                      type=str,
-                      default="HEAD",
-                      help="PR commit hash or ref")
-  parser.add_argument("--pr_base_branch",
-                      type=str,
-                      default=None,
-                      help="Base branch to merge the PR.")
-  parser.add_argument("--comment_type",
-                      required=True,
-                      choices=COMMENT_DEF_MAP.keys(),
-                      help="Type of summary comment")
-  parser.add_argument("--build_url",
-                      required=True,
-                      type=str,
-                      help="CI build page url to show in the report")
-  parser.add_argument("--output", type=pathlib.Path, default=None)
-  parser.add_argument("--verbose",
-                      action="store_true",
-                      help="Print internal information during execution")
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--benchmark_files",
+        metavar="<benchmark-json-files>",
+        default=[],
+        action="append",
+        help=(
+            "Paths to the JSON files containing benchmark results, " "accepts wildcards"
+        ),
+    )
+    parser.add_argument(
+        "--compile_stats_files",
+        metavar="<compile-stats-json-files>",
+        default=[],
+        action="append",
+        help=(
+            "Paths to the JSON files containing compilation statistics, "
+            "accepts wildcards"
+        ),
+    )
+    parser.add_argument("--pr_number", required=True, type=int, help="PR number")
+    parser.add_argument(
+        "--pr_committish", type=str, default="HEAD", help="PR commit hash or ref"
+    )
+    parser.add_argument(
+        "--pr_base_branch", type=str, default=None, help="Base branch to merge the PR."
+    )
+    parser.add_argument(
+        "--comment_type",
+        required=True,
+        choices=COMMENT_DEF_MAP.keys(),
+        help="Type of summary comment",
+    )
+    parser.add_argument(
+        "--build_url",
+        required=True,
+        type=str,
+        help="CI build page url to show in the report",
+    )
+    parser.add_argument("--output", type=pathlib.Path, default=None)
+    parser.add_argument(
+        "--verbose",
+        action="store_true",
+        help="Print internal information during execution",
+    )
 
-  return parser.parse_args()
+    return parser.parse_args()
 
 
 def main(args):
-  benchmark_files = common_arguments.expand_and_check_file_paths(
-      args.benchmark_files)
-  compile_stats_files = common_arguments.expand_and_check_file_paths(
-      args.compile_stats_files)
+    benchmark_files = common_arguments.expand_and_check_file_paths(args.benchmark_files)
+    compile_stats_files = common_arguments.expand_and_check_file_paths(
+        args.compile_stats_files
+    )
 
-  pr_commit = _get_git_commit_hash(ref=args.pr_committish, verbose=args.verbose)
-  execution_benchmarks = benchmark_presentation.aggregate_all_benchmarks(
-      benchmark_files=benchmark_files, expected_pr_commit=pr_commit)
-  compilation_metrics = benchmark_presentation.collect_all_compilation_metrics(
-      compile_stats_files=compile_stats_files, expected_pr_commit=pr_commit)
+    pr_commit = _get_git_commit_hash(ref=args.pr_committish, verbose=args.verbose)
+    execution_benchmarks = benchmark_presentation.aggregate_all_benchmarks(
+        benchmark_files=benchmark_files, expected_pr_commit=pr_commit
+    )
+    compilation_metrics = benchmark_presentation.collect_all_compilation_metrics(
+        compile_stats_files=compile_stats_files, expected_pr_commit=pr_commit
+    )
 
-  if args.pr_base_branch is None:
-    pr_base_commit = None
-  else:
-    pr_base_commit = _get_git_merge_base_commit(
-        pr_commit=pr_commit,
-        target_branch=args.pr_base_branch,
-        verbose=args.verbose)
+    if args.pr_base_branch is None:
+        pr_base_commit = None
+    else:
+        pr_base_commit = _get_git_merge_base_commit(
+            pr_commit=pr_commit, target_branch=args.pr_base_branch, verbose=args.verbose
+        )
 
-  if pr_base_commit is None:
-    comparable_results = None
-  else:
-    required_benchmark_keys = set(execution_benchmarks.keys())
-    for target_id in compilation_metrics:
-      for mapper in benchmark_presentation.COMPILATION_METRICS_TO_TABLE_MAPPERS:
-        required_benchmark_keys.add(mapper.get_series_id(target_id))
+    if pr_base_commit is None:
+        comparable_results = None
+    else:
+        required_benchmark_keys = set(execution_benchmarks.keys())
+        for target_id in compilation_metrics:
+            for mapper in benchmark_presentation.COMPILATION_METRICS_TO_TABLE_MAPPERS:
+                required_benchmark_keys.add(mapper.get_series_id(target_id))
 
-    comparable_results = _find_comparable_benchmark_results(
-        start_commit=pr_base_commit,
-        required_benchmark_keys=required_benchmark_keys,
-        verbose=args.verbose)
+        comparable_results = _find_comparable_benchmark_results(
+            start_commit=pr_base_commit,
+            required_benchmark_keys=required_benchmark_keys,
+            verbose=args.verbose,
+        )
 
-  if comparable_results is None:
-    comparable_commit = None
-  else:
-    comparable_commit = comparable_results.commit_sha
-    # Update the execution benchmarks with base numbers.
-    for bench in execution_benchmarks:
-      base_benchmark = comparable_results.benchmark_results[bench]
-      if base_benchmark["sampleUnit"] != "ns":
-        raise ValueError("Only support nanoseconds for latency sample.")
-      execution_benchmarks[bench].base_mean_time = base_benchmark["sample"]
+    if comparable_results is None:
+        comparable_commit = None
+    else:
+        comparable_commit = comparable_results.commit_sha
+        # Update the execution benchmarks with base numbers.
+        for bench in execution_benchmarks:
+            base_benchmark = comparable_results.benchmark_results[bench]
+            if base_benchmark["sampleUnit"] != "ns":
+                raise ValueError("Only support nanoseconds for latency sample.")
+            execution_benchmarks[bench].base_mean_time = base_benchmark["sample"]
 
-    # Update the compilation metrics with base numbers.
-    for target_id, metrics in compilation_metrics.items():
-      updated_metrics = metrics
-      for mapper in benchmark_presentation.COMPILATION_METRICS_TO_TABLE_MAPPERS:
-        base_benchmark = comparable_results.benchmark_results[
-            mapper.get_series_id(target_id)]
-        if base_benchmark["sampleUnit"] != mapper.get_unit():
-          raise ValueError("Unit of the queried sample is mismatched.")
-        updated_metrics = mapper.update_base_value(updated_metrics,
-                                                   base_benchmark["sample"])
-      compilation_metrics[target_id] = updated_metrics
+        # Update the compilation metrics with base numbers.
+        for target_id, metrics in compilation_metrics.items():
+            updated_metrics = metrics
+            for mapper in benchmark_presentation.COMPILATION_METRICS_TO_TABLE_MAPPERS:
+                base_benchmark = comparable_results.benchmark_results[
+                    mapper.get_series_id(target_id)
+                ]
+                if base_benchmark["sampleUnit"] != mapper.get_unit():
+                    raise ValueError("Unit of the queried sample is mismatched.")
+                updated_metrics = mapper.update_base_value(
+                    updated_metrics, base_benchmark["sample"]
+                )
+            compilation_metrics[target_id] = updated_metrics
 
-  pr_commit_link = md.link(pr_commit,
-                           f"{GITHUB_IREE_REPO_PREFIX}/commit/{pr_commit}")
-  commit_info_md = f"@ commit {pr_commit_link}"
-  if comparable_commit is not None:
-    baseline_commit_link = md.link(
-        comparable_commit,
-        f"{GITHUB_IREE_REPO_PREFIX}/commit/{comparable_commit}")
-    commit_info_md += f" (vs. base {baseline_commit_link})"
-  elif pr_base_commit is not None:
-    commit_info_md += " (no previous benchmark results to compare)"
+    pr_commit_link = md.link(pr_commit, f"{GITHUB_IREE_REPO_PREFIX}/commit/{pr_commit}")
+    commit_info_md = f"@ commit {pr_commit_link}"
+    if comparable_commit is not None:
+        baseline_commit_link = md.link(
+            comparable_commit, f"{GITHUB_IREE_REPO_PREFIX}/commit/{comparable_commit}"
+        )
+        commit_info_md += f" (vs. base {baseline_commit_link})"
+    elif pr_base_commit is not None:
+        commit_info_md += " (no previous benchmark results to compare)"
 
-  comment_def = COMMENT_DEF_MAP[args.comment_type]
-  full_md, abbr_md = _get_benchmark_result_markdown(
-      execution_benchmarks=execution_benchmarks,
-      compilation_metrics=compilation_metrics,
-      pr_url=f"{GITHUB_IREE_REPO_PREFIX}/pull/{args.pr_number}",
-      build_url=args.build_url,
-      comment_def=comment_def,
-      commit_info_md=commit_info_md)
+    comment_def = COMMENT_DEF_MAP[args.comment_type]
+    full_md, abbr_md = _get_benchmark_result_markdown(
+        execution_benchmarks=execution_benchmarks,
+        compilation_metrics=compilation_metrics,
+        pr_url=f"{GITHUB_IREE_REPO_PREFIX}/pull/{args.pr_number}",
+        build_url=args.build_url,
+        comment_def=comment_def,
+        commit_info_md=commit_info_md,
+    )
 
-  comment_data = benchmark_comment.CommentData(
-      type_id=comment_def.type_id,
-      abbr_md=abbr_md,
-      full_md=full_md,
-      unverified_pr_number=args.pr_number)
-  comment_json_data = json.dumps(dataclasses.asdict(comment_data), indent=2)
-  if args.output is None:
-    print(comment_json_data)
-  else:
-    args.output.write_text(comment_json_data)
+    comment_data = benchmark_comment.CommentData(
+        type_id=comment_def.type_id,
+        abbr_md=abbr_md,
+        full_md=full_md,
+        unverified_pr_number=args.pr_number,
+    )
+    comment_json_data = json.dumps(dataclasses.asdict(comment_data), indent=2)
+    if args.output is None:
+        print(comment_json_data)
+    else:
+        args.output.write_text(comment_json_data)
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())
diff --git a/build_tools/benchmarks/post_benchmark_comment.py b/build_tools/benchmarks/post_benchmark_comment.py
index 9322d85..8cd98da 100755
--- a/build_tools/benchmarks/post_benchmark_comment.py
+++ b/build_tools/benchmarks/post_benchmark_comment.py
@@ -39,221 +39,227 @@
 
 
 class APIRequester(object):
-  """REST API client that injects proper GitHub authentication headers."""
+    """REST API client that injects proper GitHub authentication headers."""
 
-  def __init__(self, github_token: str):
-    self._api_headers = {
-        "Accept": "application/vnd.github+json",
-        "Authorization": f"token {github_token}",
-        "X-GitHub-Api-Version": GITHUB_API_VERSION,
-    }
-    self._session = requests.session()
+    def __init__(self, github_token: str):
+        self._api_headers = {
+            "Accept": "application/vnd.github+json",
+            "Authorization": f"token {github_token}",
+            "X-GitHub-Api-Version": GITHUB_API_VERSION,
+        }
+        self._session = requests.session()
 
-  def get(self, endpoint: str, payload: Any = {}) -> requests.Response:
-    return self._session.get(endpoint,
-                             data=json.dumps(payload),
-                             headers=self._api_headers)
+    def get(self, endpoint: str, payload: Any = {}) -> requests.Response:
+        return self._session.get(
+            endpoint, data=json.dumps(payload), headers=self._api_headers
+        )
 
-  def post(self, endpoint: str, payload: Any = {}) -> requests.Response:
-    return self._session.post(endpoint,
-                              data=json.dumps(payload),
-                              headers=self._api_headers)
+    def post(self, endpoint: str, payload: Any = {}) -> requests.Response:
+        return self._session.post(
+            endpoint, data=json.dumps(payload), headers=self._api_headers
+        )
 
-  def patch(self, endpoint: str, payload: Any = {}) -> requests.Response:
-    return self._session.patch(endpoint,
-                               data=json.dumps(payload),
-                               headers=self._api_headers)
+    def patch(self, endpoint: str, payload: Any = {}) -> requests.Response:
+        return self._session.patch(
+            endpoint, data=json.dumps(payload), headers=self._api_headers
+        )
 
 
 class GithubClient(object):
-  """Helper to call Github REST APIs."""
+    """Helper to call Github REST APIs."""
 
-  def __init__(self, requester: APIRequester):
-    self._requester = requester
+    def __init__(self, requester: APIRequester):
+        self._requester = requester
 
-  def post_to_gist(self,
-                   filename: str,
-                   content: str,
-                   verbose: bool = False) -> str:
-    """Posts the given content to a new GitHub Gist and returns the URL to it."""
+    def post_to_gist(self, filename: str, content: str, verbose: bool = False) -> str:
+        """Posts the given content to a new GitHub Gist and returns the URL to it."""
 
-    response = self._requester.post(endpoint=GITHUB_GIST_API,
-                                    payload={
-                                        "public": True,
-                                        "files": {
-                                            filename: {
-                                                "content": content
-                                            }
-                                        }
-                                    })
-    if response.status_code != http.client.CREATED:
-      raise RuntimeError(
-          f"Failed to create on gist; error code: {response.status_code} - {response.text}"
-      )
-
-    response = response.json()
-    if verbose:
-      print(f"Gist posting response: {response}")
-
-    if response["truncated"]:
-      raise RuntimeError(f"Content is too large and was truncated")
-
-    return response["html_url"]
-
-  def get_previous_comment_on_pr(self,
-                                 pr_number: int,
-                                 comment_bot_user: str,
-                                 comment_type_id: str,
-                                 query_comment_per_page: int = 100,
-                                 max_pages_to_search: int = 10,
-                                 verbose: bool = False) -> Optional[int]:
-    """Gets the previous comment's id from GitHub."""
-
-    for page in range(1, max_pages_to_search + 1):
-      response = self._requester.get(
-          endpoint=f"{GITHUB_IREE_API_PREFIX}/issues/{pr_number}/comments",
-          payload={
-              "per_page": query_comment_per_page,
-              "page": page,
-              "sort": "updated",
-              "direction": "desc"
-          })
-      if response.status_code != http.client.OK:
-        raise RuntimeError(
-            f"Failed to get PR comments from GitHub; error code: {response.status_code} - {response.text}"
+        response = self._requester.post(
+            endpoint=GITHUB_GIST_API,
+            payload={"public": True, "files": {filename: {"content": content}}},
         )
+        if response.status_code != http.client.CREATED:
+            raise RuntimeError(
+                f"Failed to create on gist; error code: {response.status_code} - {response.text}"
+            )
 
-      comments = response.json()
-      if verbose:
-        print(f"Previous comment query response on page {page}: {comments}")
+        response = response.json()
+        if verbose:
+            print(f"Gist posting response: {response}")
 
-      # Find the most recently updated comment that matches.
-      for comment in comments:
-        if (comment["user"]["login"] == comment_bot_user and
-            comment_type_id in comment["body"]):
-          return comment["id"]
+        if response["truncated"]:
+            raise RuntimeError(f"Content is too large and was truncated")
 
-      if len(comments) < query_comment_per_page:
-        break
+        return response["html_url"]
 
-    return None
+    def get_previous_comment_on_pr(
+        self,
+        pr_number: int,
+        comment_bot_user: str,
+        comment_type_id: str,
+        query_comment_per_page: int = 100,
+        max_pages_to_search: int = 10,
+        verbose: bool = False,
+    ) -> Optional[int]:
+        """Gets the previous comment's id from GitHub."""
 
-  def update_comment_on_pr(self, comment_id: int, content: str):
-    """Updates the content of the given comment id."""
+        for page in range(1, max_pages_to_search + 1):
+            response = self._requester.get(
+                endpoint=f"{GITHUB_IREE_API_PREFIX}/issues/{pr_number}/comments",
+                payload={
+                    "per_page": query_comment_per_page,
+                    "page": page,
+                    "sort": "updated",
+                    "direction": "desc",
+                },
+            )
+            if response.status_code != http.client.OK:
+                raise RuntimeError(
+                    f"Failed to get PR comments from GitHub; error code: {response.status_code} - {response.text}"
+                )
 
-    response = self._requester.patch(
-        endpoint=f"{GITHUB_IREE_API_PREFIX}/issues/comments/{comment_id}",
-        payload={"body": content})
-    if response.status_code != http.client.OK:
-      raise RuntimeError(
-          f"Failed to comment on GitHub; error code: {response.status_code} - {response.text}"
-      )
+            comments = response.json()
+            if verbose:
+                print(f"Previous comment query response on page {page}: {comments}")
 
-  def create_comment_on_pr(self, pr_number: int, content: str):
-    """Posts the given content as comments to the current pull request."""
+            # Find the most recently updated comment that matches.
+            for comment in comments:
+                if (
+                    comment["user"]["login"] == comment_bot_user
+                    and comment_type_id in comment["body"]
+                ):
+                    return comment["id"]
 
-    response = self._requester.post(
-        endpoint=f"{GITHUB_IREE_API_PREFIX}/issues/{pr_number}/comments",
-        payload={"body": content})
-    if response.status_code != http.client.CREATED:
-      raise RuntimeError(
-          f"Failed to comment on GitHub; error code: {response.status_code} - {response.text}"
-      )
+            if len(comments) < query_comment_per_page:
+                break
 
-  def get_pull_request_head_commit(self, pr_number: int) -> str:
-    """Get pull request head commit SHA."""
+        return None
 
-    response = self._requester.get(
-        endpoint=f"{GITHUB_IREE_API_PREFIX}/pulls/{pr_number}")
-    if response.status_code != http.client.OK:
-      raise RuntimeError(
-          f"Failed to fetch the pull request: {pr_number}; "
-          f"error code: {response.status_code} - {response.text}")
+    def update_comment_on_pr(self, comment_id: int, content: str):
+        """Updates the content of the given comment id."""
 
-    return response.json()["head"]["sha"]
+        response = self._requester.patch(
+            endpoint=f"{GITHUB_IREE_API_PREFIX}/issues/comments/{comment_id}",
+            payload={"body": content},
+        )
+        if response.status_code != http.client.OK:
+            raise RuntimeError(
+                f"Failed to comment on GitHub; error code: {response.status_code} - {response.text}"
+            )
+
+    def create_comment_on_pr(self, pr_number: int, content: str):
+        """Posts the given content as comments to the current pull request."""
+
+        response = self._requester.post(
+            endpoint=f"{GITHUB_IREE_API_PREFIX}/issues/{pr_number}/comments",
+            payload={"body": content},
+        )
+        if response.status_code != http.client.CREATED:
+            raise RuntimeError(
+                f"Failed to comment on GitHub; error code: {response.status_code} - {response.text}"
+            )
+
+    def get_pull_request_head_commit(self, pr_number: int) -> str:
+        """Get pull request head commit SHA."""
+
+        response = self._requester.get(
+            endpoint=f"{GITHUB_IREE_API_PREFIX}/pulls/{pr_number}"
+        )
+        if response.status_code != http.client.OK:
+            raise RuntimeError(
+                f"Failed to fetch the pull request: {pr_number}; "
+                f"error code: {response.status_code} - {response.text}"
+            )
+
+        return response.json()["head"]["sha"]
 
 
 def _parse_arguments():
-  parser = argparse.ArgumentParser()
-  parser.add_argument("comment_json", type=pathlib.Path)
-  parser.add_argument("--verbose", action="store_true")
-  verification_parser = parser.add_mutually_exclusive_group(required=True)
-  verification_parser.add_argument("--github_event_json", type=pathlib.Path)
-  # Temporary option for buildkite pipeline.
-  verification_parser.add_argument("--no_verify_pr", action="store_true")
-  return parser.parse_args()
+    parser = argparse.ArgumentParser()
+    parser.add_argument("comment_json", type=pathlib.Path)
+    parser.add_argument("--verbose", action="store_true")
+    verification_parser = parser.add_mutually_exclusive_group(required=True)
+    verification_parser.add_argument("--github_event_json", type=pathlib.Path)
+    # Temporary option for buildkite pipeline.
+    verification_parser.add_argument("--no_verify_pr", action="store_true")
+    return parser.parse_args()
 
 
 def main(args: argparse.Namespace):
-  github_token = os.environ.get("GITHUB_TOKEN")
-  if github_token is None:
-    raise ValueError("GITHUB_TOKEN must be set.")
+    github_token = os.environ.get("GITHUB_TOKEN")
+    if github_token is None:
+        raise ValueError("GITHUB_TOKEN must be set.")
 
-  comment_bot_user = os.environ.get("COMMENT_BOT_USER")
-  if comment_bot_user is None:
-    raise ValueError("COMMENT_BOT_USER must be set.")
+    comment_bot_user = os.environ.get("COMMENT_BOT_USER")
+    if comment_bot_user is None:
+        raise ValueError("COMMENT_BOT_USER must be set.")
 
-  gist_bot_token = os.environ.get("GIST_BOT_TOKEN")
-  if gist_bot_token is None:
-    raise ValueError("GIST_BOT_TOKEN must be set.")
+    gist_bot_token = os.environ.get("GIST_BOT_TOKEN")
+    if gist_bot_token is None:
+        raise ValueError("GIST_BOT_TOKEN must be set.")
 
-  comment_data = benchmark_comment.CommentData(
-      **json.loads(args.comment_json.read_text()))
-  # Sanitize the pr number to make sure it is an integer.
-  pr_number = int(comment_data.unverified_pr_number)
+    comment_data = benchmark_comment.CommentData(
+        **json.loads(args.comment_json.read_text())
+    )
+    # Sanitize the pr number to make sure it is an integer.
+    pr_number = int(comment_data.unverified_pr_number)
 
-  pr_client = GithubClient(requester=APIRequester(github_token=github_token))
-  if args.github_event_json is None:
-    github_event = None
-  else:
-    github_event = json.loads(args.github_event_json.read_text())
-    workflow_run_sha = github_event["workflow_run"]["head_sha"]
-    pr_head_sha = pr_client.get_pull_request_head_commit(pr_number=pr_number)
-    # We can't get the trusted PR number of a workflow run from GitHub API. So we
-    # take the untrusted PR number from presubmit workflow and verify if the PR's
-    # current head SHA matches the commit SHA in the workflow run. It assumes
-    # that to generate the malicious comment data, attacker must modify the code
-    # and has a new commit SHA. So if the PR head commit matches the workflow
-    # run with attacker's commit, either the PR is created by the attacker or
-    # other's PR has the malicious commit. In both cases posting malicious
-    # comment is acceptable.
-    #
-    # Note that the collision of a target SHA1 is possible but GitHub has some
-    # protections (https://github.blog/2017-03-20-sha-1-collision-detection-on-github-com/).
-    # The assumption also only holds if files in GCS can't be overwritten (so the
-    # comment data can't be modified without changing the code).
-    # The check will also fail if the PR author pushes the new commit after the
-    # workflow is triggered. But pushing the new commit means to cancel the
-    # current CI run including the benchmarking. So it will unlikely fail for
-    # that reason.
-    if workflow_run_sha != pr_head_sha:
-      raise ValueError(
-          f"Workflow run SHA: {workflow_run_sha} does not match "
-          f"the head SHA: {pr_head_sha} of the pull request: {pr_number}.")
+    pr_client = GithubClient(requester=APIRequester(github_token=github_token))
+    if args.github_event_json is None:
+        github_event = None
+    else:
+        github_event = json.loads(args.github_event_json.read_text())
+        workflow_run_sha = github_event["workflow_run"]["head_sha"]
+        pr_head_sha = pr_client.get_pull_request_head_commit(pr_number=pr_number)
+        # We can't get the trusted PR number of a workflow run from GitHub API. So we
+        # take the untrusted PR number from presubmit workflow and verify if the PR's
+        # current head SHA matches the commit SHA in the workflow run. It assumes
+        # that to generate the malicious comment data, attacker must modify the code
+        # and has a new commit SHA. So if the PR head commit matches the workflow
+        # run with attacker's commit, either the PR is created by the attacker or
+        # other's PR has the malicious commit. In both cases posting malicious
+        # comment is acceptable.
+        #
+        # Note that the collision of a target SHA1 is possible but GitHub has some
+        # protections (https://github.blog/2017-03-20-sha-1-collision-detection-on-github-com/).
+        # The assumption also only holds if files in GCS can't be overwritten (so the
+        # comment data can't be modified without changing the code).
+        # The check will also fail if the PR author pushes the new commit after the
+        # workflow is triggered. But pushing the new commit means to cancel the
+        # current CI run including the benchmarking. So it will unlikely fail for
+        # that reason.
+        if workflow_run_sha != pr_head_sha:
+            raise ValueError(
+                f"Workflow run SHA: {workflow_run_sha} does not match "
+                f"the head SHA: {pr_head_sha} of the pull request: {pr_number}."
+            )
 
-  gist_client = GithubClient(requester=APIRequester(
-      github_token=gist_bot_token))
-  gist_url = gist_client.post_to_gist(
-      filename=f'iree-full-benchmark-results-{pr_number}.md',
-      content=comment_data.full_md,
-      verbose=args.verbose)
+    gist_client = GithubClient(requester=APIRequester(github_token=gist_bot_token))
+    gist_url = gist_client.post_to_gist(
+        filename=f"iree-full-benchmark-results-{pr_number}.md",
+        content=comment_data.full_md,
+        verbose=args.verbose,
+    )
 
-  previous_comment_id = pr_client.get_previous_comment_on_pr(
-      pr_number=pr_number,
-      comment_bot_user=comment_bot_user,
-      comment_type_id=comment_data.type_id,
-      verbose=args.verbose)
+    previous_comment_id = pr_client.get_previous_comment_on_pr(
+        pr_number=pr_number,
+        comment_bot_user=comment_bot_user,
+        comment_type_id=comment_data.type_id,
+        verbose=args.verbose,
+    )
 
-  abbr_md = comment_data.abbr_md.replace(
-      benchmark_comment.GIST_LINK_PLACEHORDER, gist_url)
-  if github_event is not None:
-    abbr_md += f'\n\n[Source Workflow Run]({github_event["workflow_run"]["html_url"]})'
-  if previous_comment_id is not None:
-    pr_client.update_comment_on_pr(comment_id=previous_comment_id,
-                                   content=abbr_md)
-  else:
-    pr_client.create_comment_on_pr(pr_number=pr_number, content=abbr_md)
+    abbr_md = comment_data.abbr_md.replace(
+        benchmark_comment.GIST_LINK_PLACEHORDER, gist_url
+    )
+    if github_event is not None:
+        abbr_md += (
+            f'\n\n[Source Workflow Run]({github_event["workflow_run"]["html_url"]})'
+        )
+    if previous_comment_id is not None:
+        pr_client.update_comment_on_pr(comment_id=previous_comment_id, content=abbr_md)
+    else:
+        pr_client.create_comment_on_pr(pr_number=pr_number, content=abbr_md)
 
 
 if __name__ == "__main__":
-  main(_parse_arguments())
+    main(_parse_arguments())
diff --git a/build_tools/benchmarks/post_benchmark_comment_test.py b/build_tools/benchmarks/post_benchmark_comment_test.py
index 6564b0d..d54fb8d 100644
--- a/build_tools/benchmarks/post_benchmark_comment_test.py
+++ b/build_tools/benchmarks/post_benchmark_comment_test.py
@@ -15,174 +15,146 @@
 
 
 class GithubClientTest(unittest.TestCase):
+    def setUp(self):
+        self._mock_response = mock.create_autospec(requests.Response)
+        self._mock_requester = mock.create_autospec(post_benchmark_comment.APIRequester)
+        self._mock_requester.get.return_value = self._mock_response
+        self._mock_requester.post.return_value = self._mock_response
+        self._mock_requester.patch.return_value = self._mock_response
 
-  def setUp(self):
-    self._mock_response = mock.create_autospec(requests.Response)
-    self._mock_requester = mock.create_autospec(
-        post_benchmark_comment.APIRequester)
-    self._mock_requester.get.return_value = self._mock_response
-    self._mock_requester.post.return_value = self._mock_response
-    self._mock_requester.patch.return_value = self._mock_response
+    def test_post_to_gist(self):
+        gist_url = "https://example.com/123455/1234.md"
+        self._mock_response.status_code = http.client.CREATED
+        self._mock_response.json.return_value = {
+            "html_url": gist_url,
+            "truncated": False,
+        }
+        client = post_benchmark_comment.GithubClient(self._mock_requester)
 
-  def test_post_to_gist(self):
-    gist_url = "https://example.com/123455/1234.md"
-    self._mock_response.status_code = http.client.CREATED
-    self._mock_response.json.return_value = {
-        "html_url": gist_url,
-        "truncated": False
-    }
-    client = post_benchmark_comment.GithubClient(self._mock_requester)
+        url = client.post_to_gist(filename="1234.md", content="xyz")
 
-    url = client.post_to_gist(filename="1234.md", content="xyz")
+        self.assertEqual(url, gist_url)
+        self._mock_requester.post.assert_called_once_with(
+            endpoint=post_benchmark_comment.GITHUB_GIST_API,
+            payload={"public": True, "files": {"1234.md": {"content": "xyz"}}},
+        )
 
-    self.assertEqual(url, gist_url)
-    self._mock_requester.post.assert_called_once_with(
-        endpoint=post_benchmark_comment.GITHUB_GIST_API,
-        payload={
-            "public": True,
-            "files": {
-                "1234.md": {
-                    "content": "xyz"
-                }
-            }
-        })
+    def test_post_to_gist_truncated(self):
+        gist_url = "example.com/123455/1234.md"
+        self._mock_response.status_code = http.client.CREATED
+        self._mock_response.json.return_value = {
+            "html_url": gist_url,
+            "truncated": True,
+        }
+        client = post_benchmark_comment.GithubClient(self._mock_requester)
 
-  def test_post_to_gist_truncated(self):
-    gist_url = "example.com/123455/1234.md"
-    self._mock_response.status_code = http.client.CREATED
-    self._mock_response.json.return_value = {
-        "html_url": gist_url,
-        "truncated": True
-    }
-    client = post_benchmark_comment.GithubClient(self._mock_requester)
+        with self.assertRaises(RuntimeError) as _:
+            client.post_to_gist(filename="1234.md", content="xyz")
 
-    with self.assertRaises(RuntimeError) as _:
-      client.post_to_gist(filename="1234.md", content="xyz")
+    def test_get_previous_comment_on_pr(self):
+        first_mock_response = mock.create_autospec(requests.Response)
+        first_mock_response.status_code = http.client.OK
+        first_mock_response.json.return_value = [
+            {"id": 1, "user": {"login": "bot"}, "body": "comment id: abcd"},
+            {"id": 2, "user": {"login": "user"}, "body": "comment id: 1234"},
+        ]
+        second_mock_response = mock.create_autospec(requests.Response)
+        second_mock_response.status_code = http.client.OK
+        second_mock_response.json.return_value = [
+            {"id": 3, "user": {"login": "bot"}, "body": "comment id: 1234"}
+        ]
+        mock_requester = mock.create_autospec(post_benchmark_comment.APIRequester)
+        mock_requester.get.side_effect = [first_mock_response, second_mock_response]
+        client = post_benchmark_comment.GithubClient(mock_requester)
 
-  def test_get_previous_comment_on_pr(self):
-    first_mock_response = mock.create_autospec(requests.Response)
-    first_mock_response.status_code = http.client.OK
-    first_mock_response.json.return_value = [{
-        "id": 1,
-        "user": {
-            "login": "bot"
-        },
-        "body": "comment id: abcd"
-    }, {
-        "id": 2,
-        "user": {
-            "login": "user"
-        },
-        "body": "comment id: 1234"
-    }]
-    second_mock_response = mock.create_autospec(requests.Response)
-    second_mock_response.status_code = http.client.OK
-    second_mock_response.json.return_value = [{
-        "id": 3,
-        "user": {
-            "login": "bot"
-        },
-        "body": "comment id: 1234"
-    }]
-    mock_requester = mock.create_autospec(post_benchmark_comment.APIRequester)
-    mock_requester.get.side_effect = [first_mock_response, second_mock_response]
-    client = post_benchmark_comment.GithubClient(mock_requester)
+        comment_id = client.get_previous_comment_on_pr(
+            pr_number=23,
+            comment_bot_user="bot",
+            comment_type_id="1234",
+            query_comment_per_page=2,
+            max_pages_to_search=10,
+        )
 
-    comment_id = client.get_previous_comment_on_pr(pr_number=23,
-                                                   comment_bot_user="bot",
-                                                   comment_type_id="1234",
-                                                   query_comment_per_page=2,
-                                                   max_pages_to_search=10)
+        self.assertEqual(comment_id, 3)
+        self.assertEqual(mock_requester.get.call_count, 2)
+        endpoint_url = (
+            f"{post_benchmark_comment.GITHUB_IREE_API_PREFIX}/issues/23/comments"
+        )
+        mock_requester.get.assert_any_call(
+            endpoint=endpoint_url,
+            payload={"per_page": 2, "page": 1, "sort": "updated", "direction": "desc"},
+        )
+        mock_requester.get.assert_any_call(
+            endpoint=endpoint_url,
+            payload={"per_page": 2, "page": 2, "sort": "updated", "direction": "desc"},
+        )
 
-    self.assertEqual(comment_id, 3)
-    self.assertEqual(mock_requester.get.call_count, 2)
-    endpoint_url = f"{post_benchmark_comment.GITHUB_IREE_API_PREFIX}/issues/23/comments"
-    mock_requester.get.assert_any_call(endpoint=endpoint_url,
-                                       payload={
-                                           "per_page": 2,
-                                           "page": 1,
-                                           "sort": "updated",
-                                           "direction": "desc"
-                                       })
-    mock_requester.get.assert_any_call(endpoint=endpoint_url,
-                                       payload={
-                                           "per_page": 2,
-                                           "page": 2,
-                                           "sort": "updated",
-                                           "direction": "desc"
-                                       })
+    def test_get_previous_comment_on_pr_not_found(self):
+        mock_response = mock.create_autospec(requests.Response)
+        mock_response.status_code = http.client.OK
+        mock_response.json.return_value = [
+            {"id": 1, "user": {"login": "bot"}, "body": "comment id: 5678"}
+        ]
+        mock_requester = mock.create_autospec(post_benchmark_comment.APIRequester)
+        mock_requester.get.side_effect = [mock_response] * 10
+        client = post_benchmark_comment.GithubClient(mock_requester)
 
-  def test_get_previous_comment_on_pr_not_found(self):
-    mock_response = mock.create_autospec(requests.Response)
-    mock_response.status_code = http.client.OK
-    mock_response.json.return_value = [{
-        "id": 1,
-        "user": {
-            "login": "bot"
-        },
-        "body": "comment id: 5678"
-    }]
-    mock_requester = mock.create_autospec(post_benchmark_comment.APIRequester)
-    mock_requester.get.side_effect = [mock_response] * 10
-    client = post_benchmark_comment.GithubClient(mock_requester)
+        comment_id = client.get_previous_comment_on_pr(
+            pr_number=23,
+            comment_bot_user="bot",
+            comment_type_id="1234",
+            query_comment_per_page=1,
+            max_pages_to_search=10,
+        )
 
-    comment_id = client.get_previous_comment_on_pr(pr_number=23,
-                                                   comment_bot_user="bot",
-                                                   comment_type_id="1234",
-                                                   query_comment_per_page=1,
-                                                   max_pages_to_search=10)
+        self.assertIsNone(comment_id)
+        self.assertEqual(mock_requester.get.call_count, 10)
+        endpoint_url = (
+            f"{post_benchmark_comment.GITHUB_IREE_API_PREFIX}/issues/23/comments"
+        )
+        mock_requester.get.assert_any_call(
+            endpoint=endpoint_url,
+            payload={"per_page": 1, "page": 1, "sort": "updated", "direction": "desc"},
+        )
+        mock_requester.get.assert_any_call(
+            endpoint=endpoint_url,
+            payload={"per_page": 1, "page": 10, "sort": "updated", "direction": "desc"},
+        )
 
-    self.assertIsNone(comment_id)
-    self.assertEqual(mock_requester.get.call_count, 10)
-    endpoint_url = f"{post_benchmark_comment.GITHUB_IREE_API_PREFIX}/issues/23/comments"
-    mock_requester.get.assert_any_call(endpoint=endpoint_url,
-                                       payload={
-                                           "per_page": 1,
-                                           "page": 1,
-                                           "sort": "updated",
-                                           "direction": "desc"
-                                       })
-    mock_requester.get.assert_any_call(endpoint=endpoint_url,
-                                       payload={
-                                           "per_page": 1,
-                                           "page": 10,
-                                           "sort": "updated",
-                                           "direction": "desc"
-                                       })
+    def test_update_comment_on_pr(self):
+        self._mock_response.status_code = http.client.OK
+        client = post_benchmark_comment.GithubClient(self._mock_requester)
 
-  def test_update_comment_on_pr(self):
-    self._mock_response.status_code = http.client.OK
-    client = post_benchmark_comment.GithubClient(self._mock_requester)
+        client.update_comment_on_pr(comment_id=123, content="xyz")
 
-    client.update_comment_on_pr(comment_id=123, content="xyz")
+        self._mock_requester.patch.assert_called_once_with(
+            endpoint=f"{post_benchmark_comment.GITHUB_IREE_API_PREFIX}/issues/comments/123",
+            payload={"body": "xyz"},
+        )
 
-    self._mock_requester.patch.assert_called_once_with(
-        endpoint=
-        f"{post_benchmark_comment.GITHUB_IREE_API_PREFIX}/issues/comments/123",
-        payload={"body": "xyz"})
+    def test_create_comment_on_pr(self):
+        self._mock_response.status_code = http.client.CREATED
+        client = post_benchmark_comment.GithubClient(self._mock_requester)
 
-  def test_create_comment_on_pr(self):
-    self._mock_response.status_code = http.client.CREATED
-    client = post_benchmark_comment.GithubClient(self._mock_requester)
+        client.create_comment_on_pr(pr_number=1234, content="xyz")
 
-    client.create_comment_on_pr(pr_number=1234, content="xyz")
+        self._mock_requester.post.assert_called_once_with(
+            endpoint=f"{post_benchmark_comment.GITHUB_IREE_API_PREFIX}/issues/1234/comments",
+            payload={"body": "xyz"},
+        )
 
-    self._mock_requester.post.assert_called_once_with(
-        endpoint=
-        f"{post_benchmark_comment.GITHUB_IREE_API_PREFIX}/issues/1234/comments",
-        payload={"body": "xyz"})
+    def test_get_pull_request_head_commit(self):
+        self._mock_response.status_code = http.client.OK
+        self._mock_response.json.return_value = {"head": {"sha": "sha123"}}
+        client = post_benchmark_comment.GithubClient(self._mock_requester)
 
-  def test_get_pull_request_head_commit(self):
-    self._mock_response.status_code = http.client.OK
-    self._mock_response.json.return_value = {"head": {"sha": "sha123"}}
-    client = post_benchmark_comment.GithubClient(self._mock_requester)
+        commit_sha = client.get_pull_request_head_commit(pr_number=123)
 
-    commit_sha = client.get_pull_request_head_commit(pr_number=123)
-
-    self.assertEqual(commit_sha, "sha123")
-    self._mock_requester.get.assert_called_once_with(
-        endpoint=f"{post_benchmark_comment.GITHUB_IREE_API_PREFIX}/pulls/123")
+        self.assertEqual(commit_sha, "sha123")
+        self._mock_requester.get.assert_called_once_with(
+            endpoint=f"{post_benchmark_comment.GITHUB_IREE_API_PREFIX}/pulls/123"
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/benchmarks/reporting/parse_shark_benchmarks.py b/build_tools/benchmarks/reporting/parse_shark_benchmarks.py
index 2c85891..97fd5cd 100755
--- a/build_tools/benchmarks/reporting/parse_shark_benchmarks.py
+++ b/build_tools/benchmarks/reporting/parse_shark_benchmarks.py
@@ -51,287 +51,342 @@
 
 
 def _generate_table(df_iree, df_shark, df_baseline, title):
-  """Generates a table comparing latencies between IREE, SHARK and a baseline."""
-  summary = pd.DataFrame(columns=[
-      _MODEL, _BASELINE, _DATA_TYPE, _DIALECT, _DEVICE, _BASELINE_LATENCY,
-      _IREE_LATENCY, _SHARK_LATENCY, _IREE_VS_BASELINE, _SHARK_VS_BASELINE,
-      _IREE_VS_SHARK, _BASELINE_MEMORY, _IREE_MEMORY, _SHARK_MEMORY
-  ])
+    """Generates a table comparing latencies between IREE, SHARK and a baseline."""
+    summary = pd.DataFrame(
+        columns=[
+            _MODEL,
+            _BASELINE,
+            _DATA_TYPE,
+            _DIALECT,
+            _DEVICE,
+            _BASELINE_LATENCY,
+            _IREE_LATENCY,
+            _SHARK_LATENCY,
+            _IREE_VS_BASELINE,
+            _SHARK_VS_BASELINE,
+            _IREE_VS_SHARK,
+            _BASELINE_MEMORY,
+            _IREE_MEMORY,
+            _SHARK_MEMORY,
+        ]
+    )
 
-  models = df_iree.model.unique()
-  for model in models:
-    iree_results_per_model = df_iree.loc[df_iree.model == model]
-    dialects = iree_results_per_model.dialect.unique()
-    for dialect in dialects:
-      iree_results_per_dialect = iree_results_per_model.loc[
-          iree_results_per_model.dialect == dialect]
-      data_types = iree_results_per_dialect.data_type.unique()
-      for data_type in data_types:
-        iree_results_per_datatype = iree_results_per_dialect.loc[
-            iree_results_per_dialect.data_type == data_type]
-        device_types = iree_results_per_datatype.device.unique()
-        for device in device_types:
-          iree_results = iree_results_per_datatype.loc[
-              iree_results_per_datatype.device == device]
-          if len(iree_results) != 3:
-            print(f"Warning! Expected number of results to be 3. Got"
-                  f" {len(iree_results)}")
-            print(iree_results)
-            continue
+    models = df_iree.model.unique()
+    for model in models:
+        iree_results_per_model = df_iree.loc[df_iree.model == model]
+        dialects = iree_results_per_model.dialect.unique()
+        for dialect in dialects:
+            iree_results_per_dialect = iree_results_per_model.loc[
+                iree_results_per_model.dialect == dialect
+            ]
+            data_types = iree_results_per_dialect.data_type.unique()
+            for data_type in data_types:
+                iree_results_per_datatype = iree_results_per_dialect.loc[
+                    iree_results_per_dialect.data_type == data_type
+                ]
+                device_types = iree_results_per_datatype.device.unique()
+                for device in device_types:
+                    iree_results = iree_results_per_datatype.loc[
+                        iree_results_per_datatype.device == device
+                    ]
+                    if len(iree_results) != 3:
+                        print(
+                            f"Warning! Expected number of results to be 3. Got"
+                            f" {len(iree_results)}"
+                        )
+                        print(iree_results)
+                        continue
 
-          baseline_results = df_baseline.loc[(df_baseline.model == model) &
-                                             (df_baseline.dialect == dialect) &
-                                             (df_baseline.data_type
-                                              == data_type) &
-                                             (df_baseline.device == device)]
+                    baseline_results = df_baseline.loc[
+                        (df_baseline.model == model)
+                        & (df_baseline.dialect == dialect)
+                        & (df_baseline.data_type == data_type)
+                        & (df_baseline.device == device)
+                    ]
 
-          if baseline_results.empty:
-            # We use snapshots of latencies for baseline. If it is a new
-            # benchmark that is not included in the snapshot yet, emit a
-            # warning.
-            print(
-                f"Warning: No baseline results found for {model}, {dialect},"
-                f" {data_type}, {device}. Using IREE version as baseline. Please"
-                f" update baseline csv.")
-            engine = iree_results.engine.iloc[0]
-            baseline_df = iree_results.loc[iree_results.engine == engine]
-            baseline_latency = baseline_df.iloc[0]["ms/iter"]
-            baseline_device_mb = baseline_df.iloc[0]["device_memory_mb"]
-          else:
-            engine = baseline_results.engine.iloc[0]
-            baseline_df = baseline_results.loc[baseline_results.engine ==
-                                               engine]
-            baseline_latency = baseline_df.iloc[0]["ms/iter"]
-            baseline_device_mb = baseline_df.iloc[0]["device_memory_mb"]
+                    if baseline_results.empty:
+                        # We use snapshots of latencies for baseline. If it is a new
+                        # benchmark that is not included in the snapshot yet, emit a
+                        # warning.
+                        print(
+                            f"Warning: No baseline results found for {model}, {dialect},"
+                            f" {data_type}, {device}. Using IREE version as baseline. Please"
+                            f" update baseline csv."
+                        )
+                        engine = iree_results.engine.iloc[0]
+                        baseline_df = iree_results.loc[iree_results.engine == engine]
+                        baseline_latency = baseline_df.iloc[0]["ms/iter"]
+                        baseline_device_mb = baseline_df.iloc[0]["device_memory_mb"]
+                    else:
+                        engine = baseline_results.engine.iloc[0]
+                        baseline_df = baseline_results.loc[
+                            baseline_results.engine == engine
+                        ]
+                        baseline_latency = baseline_df.iloc[0]["ms/iter"]
+                        baseline_device_mb = baseline_df.iloc[0]["device_memory_mb"]
 
-          iree_df = iree_results.loc[iree_results.engine == "shark_iree_c"]
-          iree_latency = iree_df.iloc[0]["ms/iter"]
-          iree_device_mb = iree_df.iloc[0]["device_memory_mb"]
-          iree_vs_baseline = html_utils.format_latency_comparison(
-              iree_latency, baseline_latency)
+                    iree_df = iree_results.loc[iree_results.engine == "shark_iree_c"]
+                    iree_latency = iree_df.iloc[0]["ms/iter"]
+                    iree_device_mb = iree_df.iloc[0]["device_memory_mb"]
+                    iree_vs_baseline = html_utils.format_latency_comparison(
+                        iree_latency, baseline_latency
+                    )
 
-          if df_shark is not None:
-            shark_results = df_shark.loc[(df_shark.model == model) &
-                                         (df_shark.dialect == dialect) &
-                                         (df_shark.data_type == data_type) &
-                                         (df_shark.device == device)]
-            if shark_results.empty:
-              print(
-                  f"Warning: No SHARK results for {model}, {dialect}, {data_type}, {device}."
-              )
-              continue
+                    if df_shark is not None:
+                        shark_results = df_shark.loc[
+                            (df_shark.model == model)
+                            & (df_shark.dialect == dialect)
+                            & (df_shark.data_type == data_type)
+                            & (df_shark.device == device)
+                        ]
+                        if shark_results.empty:
+                            print(
+                                f"Warning: No SHARK results for {model}, {dialect}, {data_type}, {device}."
+                            )
+                            continue
 
-            shark_df = shark_results.loc[shark_results.engine == "shark_iree_c"]
-            shark_latency = shark_df.iloc[0]["ms/iter"]
-            shark_device_mb = shark_df.iloc[0]["device_memory_mb"]
-            shark_vs_baseline = html_utils.format_latency_comparison(
-                shark_latency, baseline_latency)
-            iree_vs_shark = html_utils.format_latency_comparison(
-                iree_latency, shark_latency)
-          else:
-            # If there are no SHARK benchmarks available, use default values.
-            # These columns will be hidden later.
-            shark_latency = 0
-            shark_vs_baseline = "<missing_comparison>"
-            iree_vs_shark = "<missing_comparison>"
+                        shark_df = shark_results.loc[
+                            shark_results.engine == "shark_iree_c"
+                        ]
+                        shark_latency = shark_df.iloc[0]["ms/iter"]
+                        shark_device_mb = shark_df.iloc[0]["device_memory_mb"]
+                        shark_vs_baseline = html_utils.format_latency_comparison(
+                            shark_latency, baseline_latency
+                        )
+                        iree_vs_shark = html_utils.format_latency_comparison(
+                            iree_latency, shark_latency
+                        )
+                    else:
+                        # If there are no SHARK benchmarks available, use default values.
+                        # These columns will be hidden later.
+                        shark_latency = 0
+                        shark_vs_baseline = "<missing_comparison>"
+                        iree_vs_shark = "<missing_comparison>"
 
-          summary.loc[len(summary)] = [
-              model,
-              engine,
-              data_type,
-              dialect,
-              device,
-              f"{baseline_latency:.1f}",
-              f"{iree_latency:.1f}",
-              f"{shark_latency:.1f}",
-              iree_vs_baseline,
-              shark_vs_baseline,
-              iree_vs_shark,
-              f"{baseline_device_mb:.3f}",
-              f"{iree_device_mb:.3f}",
-              f"{shark_device_mb:.3f}",
-          ]
+                    summary.loc[len(summary)] = [
+                        model,
+                        engine,
+                        data_type,
+                        dialect,
+                        device,
+                        f"{baseline_latency:.1f}",
+                        f"{iree_latency:.1f}",
+                        f"{shark_latency:.1f}",
+                        iree_vs_baseline,
+                        shark_vs_baseline,
+                        iree_vs_shark,
+                        f"{baseline_device_mb:.3f}",
+                        f"{iree_device_mb:.3f}",
+                        f"{shark_device_mb:.3f}",
+                    ]
 
-  summary = summary.round(2)
+    summary = summary.round(2)
 
-  st = summary.style.set_table_styles(html_utils.get_table_css())
-  st = st.hide(axis="index")
-  if df_shark is None:
-    st = st.hide_columns(
-        subset=[_SHARK_LATENCY, _SHARK_VS_BASELINE, _IREE_VS_SHARK])
-  st = st.set_caption(title)
-  st = st.applymap(html_utils.style_performance, subset=_PERF_COLUMNS)
-  st = st.set_properties(subset=[_MODEL],
-                         **{
-                             "width": "300px",
-                             "text-align": "left",
-                         })
-  st = st.set_properties(subset=[_BASELINE],
-                         **{
-                             "width": "140",
-                             "text-align": "center",
-                         })
-  st = st.set_properties(subset=[_DIALECT, _DATA_TYPE, _DEVICE],
-                         **{
-                             "width": "100",
-                             "text-align": "center",
-                         })
-  st = st.set_properties(subset=_LATENCY_COLUMNS,
-                         **{
-                             "width": "100",
-                             "text-align": "right",
-                         })
-  st = st.set_properties(subset=_PERF_COLUMNS,
-                         **{
-                             "width": "150px",
-                             "text-align": "right",
-                             "color": "#ffffff"
-                         })
-  st = st.set_properties(subset=_MEMORY_COLUMNS,
-                         **{
-                             "width": "100",
-                             "text-align": "right",
-                         })
+    st = summary.style.set_table_styles(html_utils.get_table_css())
+    st = st.hide(axis="index")
+    if df_shark is None:
+        st = st.hide_columns(
+            subset=[_SHARK_LATENCY, _SHARK_VS_BASELINE, _IREE_VS_SHARK]
+        )
+    st = st.set_caption(title)
+    st = st.applymap(html_utils.style_performance, subset=_PERF_COLUMNS)
+    st = st.set_properties(
+        subset=[_MODEL],
+        **{
+            "width": "300px",
+            "text-align": "left",
+        },
+    )
+    st = st.set_properties(
+        subset=[_BASELINE],
+        **{
+            "width": "140",
+            "text-align": "center",
+        },
+    )
+    st = st.set_properties(
+        subset=[_DIALECT, _DATA_TYPE, _DEVICE],
+        **{
+            "width": "100",
+            "text-align": "center",
+        },
+    )
+    st = st.set_properties(
+        subset=_LATENCY_COLUMNS,
+        **{
+            "width": "100",
+            "text-align": "right",
+        },
+    )
+    st = st.set_properties(
+        subset=_PERF_COLUMNS,
+        **{"width": "150px", "text-align": "right", "color": "#ffffff"},
+    )
+    st = st.set_properties(
+        subset=_MEMORY_COLUMNS,
+        **{
+            "width": "100",
+            "text-align": "right",
+        },
+    )
 
-  return st.to_html() + "<br/>"
+    return st.to_html() + "<br/>"
 
 
-def generate_table(iree_csv,
-                   baseline_csv,
-                   shark_csv=None,
-                   shape_type="static",
-                   device="cpu",
-                   title="Benchmarks"):
-  """Generates a table comparing latencies between IREE, SHARK and a baseline.
+def generate_table(
+    iree_csv,
+    baseline_csv,
+    shark_csv=None,
+    shape_type="static",
+    device="cpu",
+    title="Benchmarks",
+):
+    """Generates a table comparing latencies between IREE, SHARK and a baseline.
 
-  Args:
-    iree_csv: Path to the csv file containing IREE latencies.
-    baseline_csv: Path to the csv file containing baseline latencies.
-    shark_csv: Path to the csv file containing SHARK-Runtime latencies. This is optional.
-    shape_type: Currently either `static` or `dynamic`.
-    device: Device used to run the benchmarks.
-    title: The title of the generated table.
+    Args:
+      iree_csv: Path to the csv file containing IREE latencies.
+      baseline_csv: Path to the csv file containing baseline latencies.
+      shark_csv: Path to the csv file containing SHARK-Runtime latencies. This is optional.
+      shape_type: Currently either `static` or `dynamic`.
+      device: Device used to run the benchmarks.
+      title: The title of the generated table.
 
-  Returns:
-    An HTML string containing the summarized report.
-  """
-  shark_df = None
-  if shark_csv is not None:
-    shark_df = pd.read_csv(shark_csv)
-    shark_df = shark_df.loc[(shark_df.shape_type == shape_type) &
-                            (shark_df.device == device)]
+    Returns:
+      An HTML string containing the summarized report.
+    """
+    shark_df = None
+    if shark_csv is not None:
+        shark_df = pd.read_csv(shark_csv)
+        shark_df = shark_df.loc[
+            (shark_df.shape_type == shape_type) & (shark_df.device == device)
+        ]
 
-  iree_df = pd.read_csv(iree_csv)
-  iree_df = iree_df.loc[(iree_df.shape_type == shape_type) &
-                        (iree_df.device == device)]
+    iree_df = pd.read_csv(iree_csv)
+    iree_df = iree_df.loc[
+        (iree_df.shape_type == shape_type) & (iree_df.device == device)
+    ]
 
-  baseline_df = pd.read_csv(baseline_csv)
-  baseline_df = baseline_df.loc[(baseline_df.shape_type == shape_type) &
-                                (baseline_df.device == device)]
+    baseline_df = pd.read_csv(baseline_csv)
+    baseline_df = baseline_df.loc[
+        (baseline_df.shape_type == shape_type) & (baseline_df.device == device)
+    ]
 
-  return _generate_table(iree_df, shark_df, baseline_df, title)
+    return _generate_table(iree_df, shark_df, baseline_df, title)
 
 
 def main(args):
-  """Summarizes benchmark results generated by the SHARK Tank."""
-  version_html = f"<i>last updated: {date.today().isoformat()}</i><br/><br/>"
-  version_html += "<i><b>Version Info</b></i><br/>"
-  with open(args.version_info) as f:
-    version_info = dict(l.strip().split("=", 1) for l in f)
-  for key, value in version_info.items():
-    version_html += f"<i>{key}: {value}</i><br/>"
-  version_html += "<br/>"
+    """Summarizes benchmark results generated by the SHARK Tank."""
+    version_html = f"<i>last updated: {date.today().isoformat()}</i><br/><br/>"
+    version_html += "<i><b>Version Info</b></i><br/>"
+    with open(args.version_info) as f:
+        version_info = dict(l.strip().split("=", 1) for l in f)
+    for key, value in version_info.items():
+        version_html += f"<i>{key}: {value}</i><br/>"
+    version_html += "<br/>"
 
-  html = html_utils.generate_header_and_legend(version_html)
+    html = html_utils.generate_header_and_legend(version_html)
 
-  # Generate Server CPU Static.
-  if args.cpu_iree_csv is not None:
-    html += generate_table(args.cpu_iree_csv,
-                           args.cpu_baseline_csv,
-                           shark_csv=args.cpu_shark_csv,
-                           shape_type="static",
-                           device="cpu",
-                           title="Server Intel Ice Lake CPU (Static Shapes)")
+    # Generate Server CPU Static.
+    if args.cpu_iree_csv is not None:
+        html += generate_table(
+            args.cpu_iree_csv,
+            args.cpu_baseline_csv,
+            shark_csv=args.cpu_shark_csv,
+            shape_type="static",
+            device="cpu",
+            title="Server Intel Ice Lake CPU (Static Shapes)",
+        )
 
-  # Generate Server GPU Static.
-  if args.gpu_iree_csv is not None:
-    html += generate_table(args.gpu_iree_csv,
-                           args.gpu_baseline_csv,
-                           shark_csv=args.gpu_shark_csv,
-                           shape_type="static",
-                           device="cuda",
-                           title="Server NVIDIA Tesla A100 GPU (Static Shapes)")
+    # Generate Server GPU Static.
+    if args.gpu_iree_csv is not None:
+        html += generate_table(
+            args.gpu_iree_csv,
+            args.gpu_baseline_csv,
+            shark_csv=args.gpu_shark_csv,
+            shape_type="static",
+            device="cuda",
+            title="Server NVIDIA Tesla A100 GPU (Static Shapes)",
+        )
 
-  # Generate Server CPU Dynamic.
-  if args.cpu_iree_csv is not None:
-    html += generate_table(args.cpu_iree_csv,
-                           args.cpu_baseline_csv,
-                           shark_csv=args.cpu_shark_csv,
-                           shape_type="dynamic",
-                           device="cpu",
-                           title="Server Intel Ice Lake CPU (Dynamic Shapes)")
+    # Generate Server CPU Dynamic.
+    if args.cpu_iree_csv is not None:
+        html += generate_table(
+            args.cpu_iree_csv,
+            args.cpu_baseline_csv,
+            shark_csv=args.cpu_shark_csv,
+            shape_type="dynamic",
+            device="cpu",
+            title="Server Intel Ice Lake CPU (Dynamic Shapes)",
+        )
 
-  # Generate Server GPU Dynamic.
-  if args.gpu_iree_csv is not None:
-    html += generate_table(
-        args.gpu_iree_csv,
-        args.gpu_baseline_csv,
-        shark_csv=args.gpu_shark_csv,
-        shape_type="dynamic",
-        device="cuda",
-        title="Server NVIDIA Tesla A100 GPU (Dynamic Shapes)")
+    # Generate Server GPU Dynamic.
+    if args.gpu_iree_csv is not None:
+        html += generate_table(
+            args.gpu_iree_csv,
+            args.gpu_baseline_csv,
+            shark_csv=args.gpu_shark_csv,
+            shape_type="dynamic",
+            device="cuda",
+            title="Server NVIDIA Tesla A100 GPU (Dynamic Shapes)",
+        )
 
-  args.output_path.write_text(html)
+    args.output_path.write_text(html)
 
 
 def parse_args():
-  parser = argparse.ArgumentParser()
-  parser.add_argument(
-      "--cpu_shark_csv",
-      type=str,
-      default=None,
-      help="The path to the csv file with CPU benchmarking results from the "
-      "SHARK runtime.")
-  parser.add_argument(
-      "--cpu_iree_csv",
-      type=str,
-      default=None,
-      help="The path to the csv file with CPU benchmarking results from IREE.")
-  parser.add_argument(
-      "--cpu_baseline_csv",
-      type=str,
-      default="data/icelake_baseline_2022-09-19.csv",
-      help="The path to the csv file containing baseline CPU results.")
-  parser.add_argument(
-      "--gpu_shark_csv",
-      type=str,
-      default=None,
-      help="The path to the csv file with GPU benchmarking results from the "
-      "SHARK runtime.")
-  parser.add_argument(
-      "--gpu_iree_csv",
-      type=str,
-      default=None,
-      help="The path to the csv file with CPU benchmarking results from IREE.")
-  parser.add_argument(
-      "--gpu_baseline_csv",
-      type=str,
-      default="data/a100_baseline_2022-09-19.csv",
-      help="The path to the csv file containing baseline GPU results.")
-  parser.add_argument(
-      "--version_info",
-      type=pathlib.Path,
-      default=None,
-      help=
-      "The path to a text file containing version information of the frameworks tested."
-  )
-  parser.add_argument(
-      "--output_path",
-      type=pathlib.Path,
-      default="/tmp/summary.html",
-      help="The path to the output html file that summarizes results.")
-  return parser.parse_args()
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--cpu_shark_csv",
+        type=str,
+        default=None,
+        help="The path to the csv file with CPU benchmarking results from the "
+        "SHARK runtime.",
+    )
+    parser.add_argument(
+        "--cpu_iree_csv",
+        type=str,
+        default=None,
+        help="The path to the csv file with CPU benchmarking results from IREE.",
+    )
+    parser.add_argument(
+        "--cpu_baseline_csv",
+        type=str,
+        default="data/icelake_baseline_2022-09-19.csv",
+        help="The path to the csv file containing baseline CPU results.",
+    )
+    parser.add_argument(
+        "--gpu_shark_csv",
+        type=str,
+        default=None,
+        help="The path to the csv file with GPU benchmarking results from the "
+        "SHARK runtime.",
+    )
+    parser.add_argument(
+        "--gpu_iree_csv",
+        type=str,
+        default=None,
+        help="The path to the csv file with CPU benchmarking results from IREE.",
+    )
+    parser.add_argument(
+        "--gpu_baseline_csv",
+        type=str,
+        default="data/a100_baseline_2022-09-19.csv",
+        help="The path to the csv file containing baseline GPU results.",
+    )
+    parser.add_argument(
+        "--version_info",
+        type=pathlib.Path,
+        default=None,
+        help="The path to a text file containing version information of the frameworks tested.",
+    )
+    parser.add_argument(
+        "--output_path",
+        type=pathlib.Path,
+        default="/tmp/summary.html",
+        help="The path to the output html file that summarizes results.",
+    )
+    return parser.parse_args()
 
 
 if __name__ == "__main__":
-  main(parse_args())
+    main(parse_args())
diff --git a/build_tools/benchmarks/reporting/parse_tflite_benchmarks.py b/build_tools/benchmarks/reporting/parse_tflite_benchmarks.py
index d08af97..b9c67b0 100755
--- a/build_tools/benchmarks/reporting/parse_tflite_benchmarks.py
+++ b/build_tools/benchmarks/reporting/parse_tflite_benchmarks.py
@@ -55,7 +55,7 @@
     "ssd_mobilenet_v2_static_1.0_int8": "int8",
     "ssd_mobilenet_v2_fpnlite_fp32": "fp32",
     "ssd_mobilenet_v2_fpnlite_fp32_fp16": "fp16",
-    "ssd_mobilenet_v2_fpnlite_uint8": 'uint8',
+    "ssd_mobilenet_v2_fpnlite_uint8": "uint8",
 }
 
 # Column headers.
@@ -84,398 +84,450 @@
 
 
 def get_tflite_model_list(df):
-  """Retrieves the list of TFLite models, filtering out duplicates.
+    """Retrieves the list of TFLite models, filtering out duplicates.
 
-  The .csv file includes multiple entries of the same model but under a
-  different configuration (e.g. XNNPack enabled, XNNPack disabled).
-  """
-  df = df.loc[df.runtime == "tflite"]
-  # Remove rows where the model name ends with `noxnn` since this is a duplicate.
-  df = df[~df.model.str.endswith("noxnn")]
-  return df.model.unique()
+    The .csv file includes multiple entries of the same model but under a
+    different configuration (e.g. XNNPack enabled, XNNPack disabled).
+    """
+    df = df.loc[df.runtime == "tflite"]
+    # Remove rows where the model name ends with `noxnn` since this is a duplicate.
+    df = df[~df.model.str.endswith("noxnn")]
+    return df.model.unique()
 
 
 def get_fastest_result(model, df):
-  """Retrieves the lowest latency result from multiple configurations.
+    """Retrieves the lowest latency result from multiple configurations.
 
-  Benchmarks are run under different configurations (e.g. number of threads,
-  Big core, LITTLE core, etc). This method retrieves the fastest configuration
-  whilst ensuring apples to apples comparisons (e.g. FP16 results are not
-  considered when the model is FP32).
+    Benchmarks are run under different configurations (e.g. number of threads,
+    Big core, LITTLE core, etc). This method retrieves the fastest configuration
+    whilst ensuring apples to apples comparisons (e.g. FP16 results are not
+    considered when the model is FP32).
 
-  Args:
-    model: The model name.
-    df: The dataframe to filter through.
+    Args:
+      model: The model name.
+      df: The dataframe to filter through.
 
-  Returns:
-    A dataframe containing the lowest latency.
-  """
-  df = df[df.model.str.startswith(model)]
-  if not model.endswith("fp16"):
-    df = df[~df[_MODEL].str.endswith("fp16")]
-  df = df[df[_LATENCY] != 0]
-  df = df[df[_LATENCY] == df[_LATENCY].min()]
-  return df.head(1)
+    Returns:
+      A dataframe containing the lowest latency.
+    """
+    df = df[df.model.str.startswith(model)]
+    if not model.endswith("fp16"):
+        df = df[~df[_MODEL].str.endswith("fp16")]
+    df = df[df[_LATENCY] != 0]
+    df = df[df[_LATENCY] == df[_LATENCY].min()]
+    return df.head(1)
 
 
 def get_tflite_config(model, df):
-  """Generates a configuration string from TFLite config variables."""
-  config = []
-  if _TASKSET in df.columns:
-    taskset = df.taskset.iloc[0]
-    config.append(f"taskset {taskset}")
-  threads = df.threads.iloc[0]
-  config.append(f"{threads} threads" if threads > 1 else f"{threads} thread")
-  config.append("no xnnpack" if model.endswith("noxnn") else "xnnpack")
-  return ", ".join(config)
+    """Generates a configuration string from TFLite config variables."""
+    config = []
+    if _TASKSET in df.columns:
+        taskset = df.taskset.iloc[0]
+        config.append(f"taskset {taskset}")
+    threads = df.threads.iloc[0]
+    config.append(f"{threads} threads" if threads > 1 else f"{threads} thread")
+    config.append("no xnnpack" if model.endswith("noxnn") else "xnnpack")
+    return ", ".join(config)
 
 
 def generate_tflite_summary(dataframe):
-  """Generates a dataframe containing the fastest TFLite result for each model."""
-  summary = pd.DataFrame(columns=[_MODEL, _LATENCY, _MEMORY, _CONFIG])
-  tflite_df = dataframe[dataframe.runtime == "tflite"]
-  model_list = get_tflite_model_list(dataframe)
-  for model in model_list:
-    df = get_fastest_result(model, tflite_df)
-    if df.empty:
-      print(f"Warning: TFLite results invalid for {model}.")
-      continue
-    latency = df[_LATENCY].iloc[0]
-    full_model_name = df.model.iloc[0]
-    memory = df[_MEMORY].iloc[0]
-    config = get_tflite_config(full_model_name, df)
-    summary.loc[len(summary)] = [model, latency, memory, config]
-  return summary
+    """Generates a dataframe containing the fastest TFLite result for each model."""
+    summary = pd.DataFrame(columns=[_MODEL, _LATENCY, _MEMORY, _CONFIG])
+    tflite_df = dataframe[dataframe.runtime == "tflite"]
+    model_list = get_tflite_model_list(dataframe)
+    for model in model_list:
+        df = get_fastest_result(model, tflite_df)
+        if df.empty:
+            print(f"Warning: TFLite results invalid for {model}.")
+            continue
+        latency = df[_LATENCY].iloc[0]
+        full_model_name = df.model.iloc[0]
+        memory = df[_MEMORY].iloc[0]
+        config = get_tflite_config(full_model_name, df)
+        summary.loc[len(summary)] = [model, latency, memory, config]
+    return summary
 
 
 def get_iree_model_list(df):
-  """Retrieves the list of IREE models, filtering out duplicates.
+    """Retrieves the list of IREE models, filtering out duplicates.
 
-  The .csv file includes multiple entries of the same model but under a
-  different configuration (e.g. mmt4d).
-  """
-  df = df.loc[df.runtime == "iree"]
-  df = df[~df.model.str.endswith("mmt4d")]
-  df = df[~df.model.str.endswith("padfuse")]
-  return df.model.unique()
+    The .csv file includes multiple entries of the same model but under a
+    different configuration (e.g. mmt4d).
+    """
+    df = df.loc[df.runtime == "iree"]
+    df = df[~df.model.str.endswith("mmt4d")]
+    df = df[~df.model.str.endswith("padfuse")]
+    return df.model.unique()
 
 
 def get_iree_config(model, df):
-  """Generates a configuration string from IREE config variables.
+    """Generates a configuration string from IREE config variables.
 
-  The configuration is embedded in the model name.
-  """
-  config = []
-  if _TASKSET in df.columns:
-    taskset = df.taskset.iloc[0]
-    config.append(f"taskset {taskset}")
-  threads = df.threads.iloc[0]
-  config.append(f"{threads} threads" if threads > 1 else f"{threads} thread")
-  if model.endswith("im2col_mmt4d"):
-    config.append("im2col")
-    config.append("mmt4d")
-  elif model.endswith("mmt4d"):
-    config.append("mmt4d")
-  elif model.endswith("padfuse"):
-    config.append("fused pad")
-  return ", ".join(config)
+    The configuration is embedded in the model name.
+    """
+    config = []
+    if _TASKSET in df.columns:
+        taskset = df.taskset.iloc[0]
+        config.append(f"taskset {taskset}")
+    threads = df.threads.iloc[0]
+    config.append(f"{threads} threads" if threads > 1 else f"{threads} thread")
+    if model.endswith("im2col_mmt4d"):
+        config.append("im2col")
+        config.append("mmt4d")
+    elif model.endswith("mmt4d"):
+        config.append("mmt4d")
+    elif model.endswith("padfuse"):
+        config.append("fused pad")
+    return ", ".join(config)
 
 
 def generate_iree_summary(dataframe):
-  """Generates a dataframe containing the fastest IREE result for each model."""
-  summary = pd.DataFrame(columns=[_MODEL, _LATENCY, _MEMORY, _CONFIG])
-  iree_df = dataframe[dataframe.runtime == "iree"]
-  model_list = get_iree_model_list(dataframe)
-  for model in model_list:
-    df = get_fastest_result(model, iree_df)
-    if df.empty:
-      print(f"Warning: IREE results invalid for {model}.")
-      continue
-    latency = df[_LATENCY].iloc[0]
-    full_model_name = df.model.iloc[0]
-    memory = df[_MEMORY].iloc[0]
-    config = get_iree_config(full_model_name, df)
-    summary.loc[len(summary)] = [model, latency, memory, config]
-  return summary
+    """Generates a dataframe containing the fastest IREE result for each model."""
+    summary = pd.DataFrame(columns=[_MODEL, _LATENCY, _MEMORY, _CONFIG])
+    iree_df = dataframe[dataframe.runtime == "iree"]
+    model_list = get_iree_model_list(dataframe)
+    for model in model_list:
+        df = get_fastest_result(model, iree_df)
+        if df.empty:
+            print(f"Warning: IREE results invalid for {model}.")
+            continue
+        latency = df[_LATENCY].iloc[0]
+        full_model_name = df.model.iloc[0]
+        memory = df[_MEMORY].iloc[0]
+        config = get_iree_config(full_model_name, df)
+        summary.loc[len(summary)] = [model, latency, memory, config]
+    return summary
 
 
 def get_common_html_style(df, title):
-  """Returns HTML style attributes common to both server and mobile."""
-  st = df.style.set_table_styles(html_utils.get_table_css())
-  st = st.hide(axis="index")
-  st = st.set_caption(title)
-  st = st.set_properties(subset=[_MODEL],
-                         **{
-                             "width": "300px",
-                             "text-align": "left",
-                         })
-  st = st.set_properties(subset=[_DATA_TYPE],
-                         **{
-                             "width": "100",
-                             "text-align": "center",
-                         })
-  st = st.set_properties(subset=_NUMBER_COLUMNS,
-                         **{
-                             "width": "100",
-                             "text-align": "right",
-                         })
-  st = st.set_properties(subset=_PERF_COLUMNS,
-                         **{
-                             "width": "150px",
-                             "text-align": "right",
-                             "color": "#ffffff"
-                         })
-  st = st.applymap(html_utils.style_latency, subset=[_IREE_VS_TFLITE_LATENCY])
-  st = st.applymap(html_utils.style_memory, subset=[_IREE_VS_TFLITE_MEMORY])
-  return st
+    """Returns HTML style attributes common to both server and mobile."""
+    st = df.style.set_table_styles(html_utils.get_table_css())
+    st = st.hide(axis="index")
+    st = st.set_caption(title)
+    st = st.set_properties(
+        subset=[_MODEL],
+        **{
+            "width": "300px",
+            "text-align": "left",
+        },
+    )
+    st = st.set_properties(
+        subset=[_DATA_TYPE],
+        **{
+            "width": "100",
+            "text-align": "center",
+        },
+    )
+    st = st.set_properties(
+        subset=_NUMBER_COLUMNS,
+        **{
+            "width": "100",
+            "text-align": "right",
+        },
+    )
+    st = st.set_properties(
+        subset=_PERF_COLUMNS,
+        **{"width": "150px", "text-align": "right", "color": "#ffffff"},
+    )
+    st = st.applymap(html_utils.style_latency, subset=[_IREE_VS_TFLITE_LATENCY])
+    st = st.applymap(html_utils.style_memory, subset=[_IREE_VS_TFLITE_MEMORY])
+    return st
 
 
 def generate_summary(dataframe, title):
-  """Generates a table comparing latencies and memory usage between IREE and TFLite.
+    """Generates a table comparing latencies and memory usage between IREE and TFLite.
 
-  For each model, retrieves the lowest latency configuration from both IREE and TFLite.
+    For each model, retrieves the lowest latency configuration from both IREE and TFLite.
 
-  Args:
-    dataframe: The raw data to summarize.
-    title: The title of the table.
+    Args:
+      dataframe: The raw data to summarize.
+      title: The title of the table.
 
-  Returns:
-    An HTML string containing the summarized report.
-  """
-  summary = pd.DataFrame(columns=[
-      _MODEL, _DATA_TYPE, _TFLITE_CONFIG, _IREE_CONFIG, _TFLITE_LATENCY,
-      _IREE_LATENCY, _IREE_VS_TFLITE_LATENCY, _TFLITE_MEMORY, _IREE_MEMORY,
-      _IREE_VS_TFLITE_MEMORY
-  ])
+    Returns:
+      An HTML string containing the summarized report.
+    """
+    summary = pd.DataFrame(
+        columns=[
+            _MODEL,
+            _DATA_TYPE,
+            _TFLITE_CONFIG,
+            _IREE_CONFIG,
+            _TFLITE_LATENCY,
+            _IREE_LATENCY,
+            _IREE_VS_TFLITE_LATENCY,
+            _TFLITE_MEMORY,
+            _IREE_MEMORY,
+            _IREE_VS_TFLITE_MEMORY,
+        ]
+    )
 
-  tflite_df = generate_tflite_summary(dataframe)
-  iree_df = generate_iree_summary(dataframe)
-  model_list = tflite_df[_MODEL].unique()
+    tflite_df = generate_tflite_summary(dataframe)
+    iree_df = generate_iree_summary(dataframe)
+    model_list = tflite_df[_MODEL].unique()
 
-  for model in model_list:
-    tflite_results = tflite_df[tflite_df.model == model]
-    iree_results = iree_df[iree_df.model == model]
+    for model in model_list:
+        tflite_results = tflite_df[tflite_df.model == model]
+        iree_results = iree_df[iree_df.model == model]
 
-    if tflite_results.empty:
-      print(f"Warning: No TFLite results found for model {model}")
-      continue
-    if iree_results.empty:
-      print(f"Warning: No IREE results found for model {model}")
-      continue
+        if tflite_results.empty:
+            print(f"Warning: No TFLite results found for model {model}")
+            continue
+        if iree_results.empty:
+            print(f"Warning: No IREE results found for model {model}")
+            continue
 
-    iree_latency = iree_results[_LATENCY].iloc[0]
-    tflite_latency = tflite_results[_LATENCY].iloc[0]
-    latency_comparison = html_utils.format_latency_comparison(
-        iree_latency, tflite_latency)
+        iree_latency = iree_results[_LATENCY].iloc[0]
+        tflite_latency = tflite_results[_LATENCY].iloc[0]
+        latency_comparison = html_utils.format_latency_comparison(
+            iree_latency, tflite_latency
+        )
 
-    iree_memory = iree_results[_MEMORY].iloc[0]
-    tflite_memory = tflite_results[_MEMORY].iloc[0]
-    memory_comparison = html_utils.format_memory_comparison(
-        iree_memory, tflite_memory)
+        iree_memory = iree_results[_MEMORY].iloc[0]
+        tflite_memory = tflite_results[_MEMORY].iloc[0]
+        memory_comparison = html_utils.format_memory_comparison(
+            iree_memory, tflite_memory
+        )
 
-    iree_config = iree_results.config.iloc[0]
-    tflite_config = tflite_results.config.iloc[0]
-    summary.loc[len(summary)] = [
-        model,
-        _MODEL_TO_DATA_TYPE[model],
-        tflite_config,
-        iree_config,
-        f"{tflite_latency:.1f}",
-        f"{iree_latency:.1f}",
-        latency_comparison,
-        f"{tflite_memory:,.0f}",
-        f"{iree_memory:,.0f}",
-        memory_comparison,
-    ]
+        iree_config = iree_results.config.iloc[0]
+        tflite_config = tflite_results.config.iloc[0]
+        summary.loc[len(summary)] = [
+            model,
+            _MODEL_TO_DATA_TYPE[model],
+            tflite_config,
+            iree_config,
+            f"{tflite_latency:.1f}",
+            f"{iree_latency:.1f}",
+            latency_comparison,
+            f"{tflite_memory:,.0f}",
+            f"{iree_memory:,.0f}",
+            memory_comparison,
+        ]
 
-  summary = summary.round(2)
-  st = get_common_html_style(summary, title)
-  st = st.set_properties(subset=_CONFIG_COLUMNS,
-                         **{
-                             "width": "300px",
-                             "text-align": "left",
-                         })
-  return st.to_html().replace("\\n", "<br>") + "<br/>"
+    summary = summary.round(2)
+    st = get_common_html_style(summary, title)
+    st = st.set_properties(
+        subset=_CONFIG_COLUMNS,
+        **{
+            "width": "300px",
+            "text-align": "left",
+        },
+    )
+    return st.to_html().replace("\\n", "<br>") + "<br/>"
 
 
 def generate_detail(dataframe, title, platform):
-  """Generates a table comparing latencies and memory usage between IREE and TFLite.
+    """Generates a table comparing latencies and memory usage between IREE and TFLite.
 
-  The table generated is more detailed than `generate_summary`. It lists latencies
-  of all IREE configurations, using the fastest TFLite configuration as baseline.
+    The table generated is more detailed than `generate_summary`. It lists latencies
+    of all IREE configurations, using the fastest TFLite configuration as baseline.
 
-  Args:
-    dataframe: The raw data to summarize.
-    title: The title of the table.
-    platform: Either `server` or `mobile`.
+    Args:
+      dataframe: The raw data to summarize.
+      title: The title of the table.
+      platform: Either `server` or `mobile`.
 
-  Returns:
-    An HTML string containing the detailed report.
-  """
-  summary = pd.DataFrame(columns=[
-      _MODEL, _DATA_TYPE, _TFLITE_CONFIG, _IREE_CONFIG, _TASKSET, _THREADS,
-      _TFLITE_LATENCY, _IREE_LATENCY, _IREE_VS_TFLITE_LATENCY, _TFLITE_MEMORY,
-      _IREE_MEMORY, _IREE_VS_TFLITE_MEMORY
-  ])
+    Returns:
+      An HTML string containing the detailed report.
+    """
+    summary = pd.DataFrame(
+        columns=[
+            _MODEL,
+            _DATA_TYPE,
+            _TFLITE_CONFIG,
+            _IREE_CONFIG,
+            _TASKSET,
+            _THREADS,
+            _TFLITE_LATENCY,
+            _IREE_LATENCY,
+            _IREE_VS_TFLITE_LATENCY,
+            _TFLITE_MEMORY,
+            _IREE_MEMORY,
+            _IREE_VS_TFLITE_MEMORY,
+        ]
+    )
 
-  model_list = get_tflite_model_list(dataframe)
-  for model in model_list:
-    df = dataframe[dataframe.model.str.startswith(model)]
-    # If result does not use FP16, remove FP16 results from dataframe to
-    # maintain apples-to-apples comparisons.
-    if not model.endswith("fp16"):
-      df = df[~df.model.str.endswith("fp16")]
+    model_list = get_tflite_model_list(dataframe)
+    for model in model_list:
+        df = dataframe[dataframe.model.str.startswith(model)]
+        # If result does not use FP16, remove FP16 results from dataframe to
+        # maintain apples-to-apples comparisons.
+        if not model.endswith("fp16"):
+            df = df[~df.model.str.endswith("fp16")]
 
-    if _TASKSET in df.columns:
-      tasksets = df.taskset.unique()
-    else:
-      tasksets = ["none"]
+        if _TASKSET in df.columns:
+            tasksets = df.taskset.unique()
+        else:
+            tasksets = ["none"]
 
-    for taskset in tasksets:
-      per_taskset_df = df if taskset == "none" else df[df.taskset == taskset]
-      threads = per_taskset_df.threads.unique()
-      for thread in threads:
-        per_thread_df = per_taskset_df[per_taskset_df.threads == thread]
-        tflite_df = get_fastest_result(
-            model, per_thread_df[per_thread_df.runtime == "tflite"])
-        if tflite_df.empty:
-          continue
+        for taskset in tasksets:
+            per_taskset_df = df if taskset == "none" else df[df.taskset == taskset]
+            threads = per_taskset_df.threads.unique()
+            for thread in threads:
+                per_thread_df = per_taskset_df[per_taskset_df.threads == thread]
+                tflite_df = get_fastest_result(
+                    model, per_thread_df[per_thread_df.runtime == "tflite"]
+                )
+                if tflite_df.empty:
+                    continue
 
-        tflite_latency = tflite_df[_LATENCY].iloc[0]
-        tflite_memory = tflite_df[_MEMORY].iloc[0]
-        if tflite_latency == 0 or tflite_memory == 0:
-          continue
+                tflite_latency = tflite_df[_LATENCY].iloc[0]
+                tflite_memory = tflite_df[_MEMORY].iloc[0]
+                if tflite_latency == 0 or tflite_memory == 0:
+                    continue
 
-        full_model_name = tflite_df.model.iloc[0]
-        # For TFLite config, we only want to know if XNNPack was used. The other
-        # configuration settings are covered in other columns.
-        tflite_config = "no xnnpack" if full_model_name.endswith(
-            "noxnn") else "xnnpack"
+                full_model_name = tflite_df.model.iloc[0]
+                # For TFLite config, we only want to know if XNNPack was used. The other
+                # configuration settings are covered in other columns.
+                tflite_config = (
+                    "no xnnpack" if full_model_name.endswith("noxnn") else "xnnpack"
+                )
 
-        iree_df = per_thread_df[per_thread_df.runtime == "iree"]
-        for _, row in iree_df.iterrows():
-          iree_config = row[_DRIVER]
-          model_name = row[_MODEL]
-          if model_name.endswith("im2col_mmt4d"):
-            iree_config += ", im2col, mmt4d"
-          elif model_name.endswith("mmt4d"):
-            iree_config += ", mmt4d"
-          elif model_name.endswith("padfuse"):
-            iree_config += ", fused pad"
+                iree_df = per_thread_df[per_thread_df.runtime == "iree"]
+                for _, row in iree_df.iterrows():
+                    iree_config = row[_DRIVER]
+                    model_name = row[_MODEL]
+                    if model_name.endswith("im2col_mmt4d"):
+                        iree_config += ", im2col, mmt4d"
+                    elif model_name.endswith("mmt4d"):
+                        iree_config += ", mmt4d"
+                    elif model_name.endswith("padfuse"):
+                        iree_config += ", fused pad"
 
-          iree_latency = row[_LATENCY]
-          latency_comparison = html_utils.format_latency_comparison(
-              iree_latency, tflite_latency)
-          iree_memory = row[_MEMORY]
-          memory_comparison = html_utils.format_memory_comparison(
-              iree_memory, tflite_memory)
+                    iree_latency = row[_LATENCY]
+                    latency_comparison = html_utils.format_latency_comparison(
+                        iree_latency, tflite_latency
+                    )
+                    iree_memory = row[_MEMORY]
+                    memory_comparison = html_utils.format_memory_comparison(
+                        iree_memory, tflite_memory
+                    )
 
-          if iree_latency == 0 or iree_memory == 0:
-            continue
+                    if iree_latency == 0 or iree_memory == 0:
+                        continue
 
-          summary.loc[len(summary)] = [
-              model, _MODEL_TO_DATA_TYPE[model], tflite_config, iree_config,
-              taskset, thread, f"{tflite_latency:.1f}", f"{iree_latency:.1f}",
-              latency_comparison, f"{tflite_memory:,.0f}",
-              f"{iree_memory:,.0f}", memory_comparison
-          ]
+                    summary.loc[len(summary)] = [
+                        model,
+                        _MODEL_TO_DATA_TYPE[model],
+                        tflite_config,
+                        iree_config,
+                        taskset,
+                        thread,
+                        f"{tflite_latency:.1f}",
+                        f"{iree_latency:.1f}",
+                        latency_comparison,
+                        f"{tflite_memory:,.0f}",
+                        f"{iree_memory:,.0f}",
+                        memory_comparison,
+                    ]
 
-  summary = summary.round(2)
-  st = get_common_html_style(summary, title)
-  st = st.set_properties(subset=[_TASKSET, _THREADS],
-                         **{
-                             "width": "100",
-                             "text-align": "center",
-                         })
-  st = st.set_properties(subset=[_TFLITE_CONFIG],
-                         **{
-                             "width": "150px",
-                             "text-align": "left",
-                         })
-  st = st.set_properties(subset=[_IREE_CONFIG],
-                         **{
-                             "width": "300px",
-                             "text-align": "left",
-                         })
-  if platform != "mobile":
-    st.hide_columns(subset=[_TASKSET])
+    summary = summary.round(2)
+    st = get_common_html_style(summary, title)
+    st = st.set_properties(
+        subset=[_TASKSET, _THREADS],
+        **{
+            "width": "100",
+            "text-align": "center",
+        },
+    )
+    st = st.set_properties(
+        subset=[_TFLITE_CONFIG],
+        **{
+            "width": "150px",
+            "text-align": "left",
+        },
+    )
+    st = st.set_properties(
+        subset=[_IREE_CONFIG],
+        **{
+            "width": "300px",
+            "text-align": "left",
+        },
+    )
+    if platform != "mobile":
+        st.hide_columns(subset=[_TASKSET])
 
-  return st.to_html().replace("\\n", "<br>") + "<br/>"
+    return st.to_html().replace("\\n", "<br>") + "<br/>"
 
 
 def main(args):
-  """Summarizes IREE vs TFLite benchmark results."""
-  if args.platform == _PLATFORM_SERVER:
-    cpu_drivers = ["cpu", "local-task"]
-    gpu_drivers = ["gpu", "cuda"]
-  else:
-    cpu_drivers = ["cpu", "local-task"]
-    gpu_drivers = ["gpu", "vulkan", "adreno"]
+    """Summarizes IREE vs TFLite benchmark results."""
+    if args.platform == _PLATFORM_SERVER:
+        cpu_drivers = ["cpu", "local-task"]
+        gpu_drivers = ["gpu", "cuda"]
+    else:
+        cpu_drivers = ["cpu", "local-task"]
+        gpu_drivers = ["gpu", "vulkan", "adreno"]
 
-  version_html = (f"<i>IREE version: {args.iree_version}</i><br/>"
-                  f"<i>TFlite version: {args.tflite_version}</i><br/>"
-                  f"<i>last updated: {date.today().isoformat()}</i><br/><br/>")
-  html = html_utils.generate_header_and_legend(version_html)
+    version_html = (
+        f"<i>IREE version: {args.iree_version}</i><br/>"
+        f"<i>TFlite version: {args.tflite_version}</i><br/>"
+        f"<i>last updated: {date.today().isoformat()}</i><br/><br/>"
+    )
+    html = html_utils.generate_header_and_legend(version_html)
 
-  df = pd.read_csv(args.input_csv)
+    df = pd.read_csv(args.input_csv)
 
-  # Generate CPU Summary.
-  results = df[df[_DRIVER].isin(cpu_drivers)]
-  html += generate_summary(results, args.platform.capitalize() + " CPU Summary")
+    # Generate CPU Summary.
+    results = df[df[_DRIVER].isin(cpu_drivers)]
+    html += generate_summary(results, args.platform.capitalize() + " CPU Summary")
 
-  # Generate GPU Summary.
-  results = df[df[_DRIVER].isin(gpu_drivers)]
-  html += generate_summary(results, args.platform.capitalize() + " GPU Summary")
+    # Generate GPU Summary.
+    results = df[df[_DRIVER].isin(gpu_drivers)]
+    html += generate_summary(results, args.platform.capitalize() + " GPU Summary")
 
-  # Generate CPU Detailed View.
-  results = df[df[_DRIVER].isin(cpu_drivers)]
-  html += generate_detail(results,
-                          args.platform.capitalize() + " CPU Detailed",
-                          args.platform)
+    # Generate CPU Detailed View.
+    results = df[df[_DRIVER].isin(cpu_drivers)]
+    html += generate_detail(
+        results, args.platform.capitalize() + " CPU Detailed", args.platform
+    )
 
-  # Generate GPU Detailed View.
-  results = df[df[_DRIVER].isin(gpu_drivers)]
-  html += generate_detail(results,
-                          args.platform.capitalize() + " GPU Detailed",
-                          args.platform)
+    # Generate GPU Detailed View.
+    results = df[df[_DRIVER].isin(gpu_drivers)]
+    html += generate_detail(
+        results, args.platform.capitalize() + " GPU Detailed", args.platform
+    )
 
-  args.output_path.write_text(html)
+    args.output_path.write_text(html)
 
 
 def parse_args():
-  parser = argparse.ArgumentParser()
-  parser.add_argument("--iree_version",
-                      type=str,
-                      default=None,
-                      required=True,
-                      help="The IREE version.")
-  parser.add_argument("--tflite_version",
-                      type=str,
-                      default=None,
-                      required=True,
-                      help="The TFLite version.")
-  parser.add_argument(
-      "--platform",
-      action="store",
-      type=str.lower,
-      help=
-      "The platform the models were benchmarked on. Either server or mobile.",
-      required=True,
-      choices=[_PLATFORM_SERVER, _PLATFORM_MOBILE])
-  parser.add_argument(
-      "--input_csv",
-      type=str,
-      default=None,
-      help=
-      "The path to the csv file containing benchmark results for both IREE and TFLite."
-  )
-  parser.add_argument(
-      "--output_path",
-      type=pathlib.Path,
-      default="/tmp/summary.html",
-      help="The path to the output html file that summarizes results.")
-  return parser.parse_args()
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--iree_version",
+        type=str,
+        default=None,
+        required=True,
+        help="The IREE version.",
+    )
+    parser.add_argument(
+        "--tflite_version",
+        type=str,
+        default=None,
+        required=True,
+        help="The TFLite version.",
+    )
+    parser.add_argument(
+        "--platform",
+        action="store",
+        type=str.lower,
+        help="The platform the models were benchmarked on. Either server or mobile.",
+        required=True,
+        choices=[_PLATFORM_SERVER, _PLATFORM_MOBILE],
+    )
+    parser.add_argument(
+        "--input_csv",
+        type=str,
+        default=None,
+        help="The path to the csv file containing benchmark results for both IREE and TFLite.",
+    )
+    parser.add_argument(
+        "--output_path",
+        type=pathlib.Path,
+        default="/tmp/summary.html",
+        help="The path to the output html file that summarizes results.",
+    )
+    return parser.parse_args()
 
 
 if __name__ == "__main__":
-  main(parse_args())
+    main(parse_args())
diff --git a/build_tools/benchmarks/run_benchmarks_on_android.py b/build_tools/benchmarks/run_benchmarks_on_android.py
index 5ffccd0..995b80e 100755
--- a/build_tools/benchmarks/run_benchmarks_on_android.py
+++ b/build_tools/benchmarks/run_benchmarks_on_android.py
@@ -45,15 +45,21 @@
 from common.benchmark_config import BenchmarkConfig
 from common.benchmark_driver import BenchmarkDriver
 from common.benchmark_definition import (
-    DriverInfo, execute_cmd, execute_cmd_and_get_stdout,
-    execute_cmd_and_get_output, get_git_commit_hash,
-    get_iree_benchmark_module_arguments, wait_for_iree_benchmark_module_start,
-    parse_iree_benchmark_metrics)
-from common.benchmark_suite import (MODEL_FLAGFILE_NAME, BenchmarkCase,
-                                    BenchmarkSuite)
-from common.android_device_utils import (get_android_device_model,
-                                         get_android_device_info,
-                                         get_android_gpu_name)
+    DriverInfo,
+    execute_cmd,
+    execute_cmd_and_get_stdout,
+    execute_cmd_and_get_output,
+    get_git_commit_hash,
+    get_iree_benchmark_module_arguments,
+    wait_for_iree_benchmark_module_start,
+    parse_iree_benchmark_metrics,
+)
+from common.benchmark_suite import MODEL_FLAGFILE_NAME, BenchmarkCase, BenchmarkSuite
+from common.android_device_utils import (
+    get_android_device_model,
+    get_android_device_info,
+    get_android_gpu_name,
+)
 import common.common_arguments
 from e2e_test_artifacts import iree_artifacts
 from e2e_test_framework import serialization
@@ -70,349 +76,395 @@
 def adb_push_to_tmp_dir(
     content: pathlib.Path,
     relative_dir: pathlib.PurePosixPath = pathlib.PurePosixPath(),
-    verbose: bool = False) -> pathlib.PurePosixPath:
-  """Pushes content onto the Android device.
+    verbose: bool = False,
+) -> pathlib.PurePosixPath:
+    """Pushes content onto the Android device.
 
-  Args:
-    content: the full path to the source file.
-    relative_dir: the directory to push to; relative to ANDROID_TMPDIR.
+    Args:
+      content: the full path to the source file.
+      relative_dir: the directory to push to; relative to ANDROID_TMPDIR.
 
-  Returns:
-    The full path to the content on the Android device.
-  """
-  filename = content.name
-  android_path = ANDROID_TMPDIR / relative_dir / filename
-  # When the output is a TTY, keep the default progress info output.
-  # In other cases, redirect progress info to null to avoid bloating log files.
-  stdout_redirect = None if sys.stdout.isatty() else subprocess.DEVNULL
-  execute_cmd(["adb", "push", content.resolve(), android_path],
-              verbose=verbose,
-              stdout=stdout_redirect)
-  return android_path
+    Returns:
+      The full path to the content on the Android device.
+    """
+    filename = content.name
+    android_path = ANDROID_TMPDIR / relative_dir / filename
+    # When the output is a TTY, keep the default progress info output.
+    # In other cases, redirect progress info to null to avoid bloating log files.
+    stdout_redirect = None if sys.stdout.isatty() else subprocess.DEVNULL
+    execute_cmd(
+        ["adb", "push", content.resolve(), android_path],
+        verbose=verbose,
+        stdout=stdout_redirect,
+    )
+    return android_path
 
 
 def adb_execute_and_get_output(
     cmd_args: Sequence[str],
     relative_dir: pathlib.PurePosixPath = pathlib.PurePosixPath(),
-    verbose: bool = False) -> Tuple[str, str]:
-  """Executes command with adb shell.
+    verbose: bool = False,
+) -> Tuple[str, str]:
+    """Executes command with adb shell.
 
-  Switches to `relative_dir` relative to the android tmp directory before
-  executing. Waits for completion and returns the command stdout.
+    Switches to `relative_dir` relative to the android tmp directory before
+    executing. Waits for completion and returns the command stdout.
 
-  Args:
-    cmd_args: a list containing the command to execute and its parameters
-    relative_dir: the directory to execute the command in; relative to
-      ANDROID_TMPDIR.
+    Args:
+      cmd_args: a list containing the command to execute and its parameters
+      relative_dir: the directory to execute the command in; relative to
+        ANDROID_TMPDIR.
 
-  Returns:
-    Strings for stdout and stderr.
-  """
-  cmd = ["adb", "shell", "cd", ANDROID_TMPDIR / relative_dir, "&&"]
-  cmd.extend(cmd_args)
-  return execute_cmd_and_get_output(cmd, verbose=verbose)
+    Returns:
+      Strings for stdout and stderr.
+    """
+    cmd = ["adb", "shell", "cd", ANDROID_TMPDIR / relative_dir, "&&"]
+    cmd.extend(cmd_args)
+    return execute_cmd_and_get_output(cmd, verbose=verbose)
 
 
-def adb_execute(cmd_args: Sequence[str],
-                relative_dir: pathlib.PurePosixPath = pathlib.PurePosixPath(),
-                verbose: bool = False) -> subprocess.CompletedProcess:
-  """Executes command with adb shell.
+def adb_execute(
+    cmd_args: Sequence[str],
+    relative_dir: pathlib.PurePosixPath = pathlib.PurePosixPath(),
+    verbose: bool = False,
+) -> subprocess.CompletedProcess:
+    """Executes command with adb shell.
 
-  Switches to `relative_dir` relative to the android tmp directory before
-  executing. Waits for completion. Output is streamed to the terminal.
+    Switches to `relative_dir` relative to the android tmp directory before
+    executing. Waits for completion. Output is streamed to the terminal.
 
-  Args:
-    cmd_args: a list containing the command to execute and its parameters
-    relative_dir: the directory to execute the command in; relative to
-      ANDROID_TMPDIR.
+    Args:
+      cmd_args: a list containing the command to execute and its parameters
+      relative_dir: the directory to execute the command in; relative to
+        ANDROID_TMPDIR.
 
-  Returns:
-    The completed process.
-  """
-  cmd = ["adb", "shell", "cd", ANDROID_TMPDIR / relative_dir, "&&"]
-  cmd.extend(cmd_args)
-  return execute_cmd(cmd, verbose=verbose)
+    Returns:
+      The completed process.
+    """
+    cmd = ["adb", "shell", "cd", ANDROID_TMPDIR / relative_dir, "&&"]
+    cmd.extend(cmd_args)
+    return execute_cmd(cmd, verbose=verbose)
 
 
 def is_magisk_su():
-  """Returns true if the Android device has a Magisk SU binary."""
-  stdout, _ = adb_execute_and_get_output(["su", "--help"])
-  return "MagiskSU" in stdout
+    """Returns true if the Android device has a Magisk SU binary."""
+    stdout, _ = adb_execute_and_get_output(["su", "--help"])
+    return "MagiskSU" in stdout
 
 
 def adb_execute_as_root(cmd_args: Sequence[Any]) -> subprocess.CompletedProcess:
-  """Executes the given command as root."""
-  cmd = ["su", "-c" if is_magisk_su() else "root"]
-  cmd.extend(cmd_args)
-  return adb_execute(cmd)
+    """Executes the given command as root."""
+    cmd = ["su", "-c" if is_magisk_su() else "root"]
+    cmd.extend(cmd_args)
+    return adb_execute(cmd)
 
 
-def adb_start_cmd(cmd_args: Sequence[str],
-                  relative_dir: pathlib.PurePosixPath = pathlib.PurePosixPath(),
-                  verbose: bool = False) -> subprocess.Popen:
-  """Executes command with adb shell in a directory and returns the handle
-  without waiting for completion.
+def adb_start_cmd(
+    cmd_args: Sequence[str],
+    relative_dir: pathlib.PurePosixPath = pathlib.PurePosixPath(),
+    verbose: bool = False,
+) -> subprocess.Popen:
+    """Executes command with adb shell in a directory and returns the handle
+    without waiting for completion.
 
-  Args:
-    cmd_args: a list containing the command to execute and its parameters
-    relative_dir: the directory to execute the command in; relative to
-      ANDROID_TMPDIR.
+    Args:
+      cmd_args: a list containing the command to execute and its parameters
+      relative_dir: the directory to execute the command in; relative to
+        ANDROID_TMPDIR.
 
-  Returns:
-    A Popen object for the started command.
-  """
-  cmd = ["adb", "shell", "cd", ANDROID_TMPDIR / relative_dir, "&&"]
-  cmd.extend(cmd_args)
+    Returns:
+      A Popen object for the started command.
+    """
+    cmd = ["adb", "shell", "cd", ANDROID_TMPDIR / relative_dir, "&&"]
+    cmd.extend(cmd_args)
 
-  if verbose:
-    print(f"cmd: {cmd}")
-  return subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True)
+    if verbose:
+        print(f"cmd: {cmd}")
+    return subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True)
 
 
 def get_vmfb_full_path_for_benchmark_case(
-    benchmark_case_dir: pathlib.Path) -> pathlib.Path:
-  flagfile = benchmark_case_dir / MODEL_FLAGFILE_NAME
-  for line in flagfile.read_text().splitlines():
-    flag_name, flag_value = line.strip().split("=")
-    if flag_name == "--module":
-      # Realpath canonicalization matters. The caller may rely on that to track
-      # which files it already pushed.
-      return (benchmark_case_dir / flag_value).resolve()
-  raise ValueError(f"{flagfile} does not contain a --module flag")
+    benchmark_case_dir: pathlib.Path,
+) -> pathlib.Path:
+    flagfile = benchmark_case_dir / MODEL_FLAGFILE_NAME
+    for line in flagfile.read_text().splitlines():
+        flag_name, flag_value = line.strip().split("=")
+        if flag_name == "--module":
+            # Realpath canonicalization matters. The caller may rely on that to track
+            # which files it already pushed.
+            return (benchmark_case_dir / flag_value).resolve()
+    raise ValueError(f"{flagfile} does not contain a --module flag")
 
 
 class AndroidBenchmarkDriver(BenchmarkDriver):
-  """Android benchmark driver."""
+    """Android benchmark driver."""
 
-  def __init__(self, *args, **kwargs):
-    super().__init__(*args, **kwargs)
-    self.already_pushed_files = {}
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.already_pushed_files = {}
 
-  def run_benchmark_case(self, benchmark_case: BenchmarkCase,
-                         benchmark_results_filename: Optional[pathlib.Path],
-                         capture_filename: Optional[pathlib.Path]) -> None:
-    benchmark_case_dir = benchmark_case.benchmark_case_dir
-    android_case_dir = pathlib.PurePosixPath(
-        benchmark_case_dir.relative_to(self.config.root_benchmark_dir))
+    def run_benchmark_case(
+        self,
+        benchmark_case: BenchmarkCase,
+        benchmark_results_filename: Optional[pathlib.Path],
+        capture_filename: Optional[pathlib.Path],
+    ) -> None:
+        benchmark_case_dir = benchmark_case.benchmark_case_dir
+        android_case_dir = pathlib.PurePosixPath(
+            benchmark_case_dir.relative_to(self.config.root_benchmark_dir)
+        )
 
-    run_config = benchmark_case.run_config
-    self.__check_and_push_file(
-        benchmark_case_dir / iree_artifacts.MODULE_FILENAME, android_case_dir)
-    taskset = self.__deduce_taskset_from_run_config(run_config)
-    run_args = run_config.materialize_run_flags()
-    run_args.append(f"--module={iree_artifacts.MODULE_FILENAME}")
+        run_config = benchmark_case.run_config
+        self.__check_and_push_file(
+            benchmark_case_dir / iree_artifacts.MODULE_FILENAME, android_case_dir
+        )
+        taskset = self.__deduce_taskset_from_run_config(run_config)
+        run_args = run_config.materialize_run_flags()
+        run_args.append(f"--module={iree_artifacts.MODULE_FILENAME}")
 
-    if benchmark_results_filename is not None:
-      self.__run_benchmark(android_case_dir=android_case_dir,
-                           tool_name=benchmark_case.benchmark_tool_name,
-                           driver_info=benchmark_case.driver_info,
-                           run_args=run_args,
-                           results_filename=benchmark_results_filename,
-                           taskset=taskset)
+        if benchmark_results_filename is not None:
+            self.__run_benchmark(
+                android_case_dir=android_case_dir,
+                tool_name=benchmark_case.benchmark_tool_name,
+                driver_info=benchmark_case.driver_info,
+                run_args=run_args,
+                results_filename=benchmark_results_filename,
+                taskset=taskset,
+            )
 
-    if capture_filename is not None:
-      self.__run_capture(android_case_dir=android_case_dir,
-                         tool_name=benchmark_case.benchmark_tool_name,
-                         run_args=run_args,
-                         capture_filename=capture_filename,
-                         taskset=taskset)
+        if capture_filename is not None:
+            self.__run_capture(
+                android_case_dir=android_case_dir,
+                tool_name=benchmark_case.benchmark_tool_name,
+                run_args=run_args,
+                capture_filename=capture_filename,
+                taskset=taskset,
+            )
 
-  def __run_benchmark(self, android_case_dir: pathlib.PurePosixPath,
-                      tool_name: str, driver_info: DriverInfo,
-                      run_args: Sequence[str], results_filename: pathlib.Path,
-                      taskset: str):
-    if self.config.normal_benchmark_tool_dir is None:
-      raise ValueError("normal_benchmark_tool_dir can't be None.")
+    def __run_benchmark(
+        self,
+        android_case_dir: pathlib.PurePosixPath,
+        tool_name: str,
+        driver_info: DriverInfo,
+        run_args: Sequence[str],
+        results_filename: pathlib.Path,
+        taskset: str,
+    ):
+        if self.config.normal_benchmark_tool_dir is None:
+            raise ValueError("normal_benchmark_tool_dir can't be None.")
 
-    host_tool_path = self.config.normal_benchmark_tool_dir / tool_name
-    android_tool = self.__check_and_push_file(host_tool_path,
-                                              NORMAL_TOOL_REL_DIR)
-    cmd = ["taskset", taskset, android_tool]
-    cmd += run_args
-    if tool_name == "iree-benchmark-module":
-      cmd += get_iree_benchmark_module_arguments(
-          results_filename=f"'{results_filename.name}'",
-          driver_info=driver_info,
-          benchmark_min_time=self.config.benchmark_min_time)
+        host_tool_path = self.config.normal_benchmark_tool_dir / tool_name
+        android_tool = self.__check_and_push_file(host_tool_path, NORMAL_TOOL_REL_DIR)
+        cmd = ["taskset", taskset, android_tool]
+        cmd += run_args
+        if tool_name == "iree-benchmark-module":
+            cmd += get_iree_benchmark_module_arguments(
+                results_filename=f"'{results_filename.name}'",
+                driver_info=driver_info,
+                benchmark_min_time=self.config.benchmark_min_time,
+            )
 
-    benchmark_stdout, benchmark_stderr = adb_execute_and_get_output(
-        cmd, android_case_dir, verbose=self.verbose)
-    benchmark_metrics = parse_iree_benchmark_metrics(benchmark_stdout,
-                                                     benchmark_stderr)
-    if self.verbose:
-      print(benchmark_metrics)
-    results_filename.write_text(json.dumps(benchmark_metrics.to_json_object()))
+        benchmark_stdout, benchmark_stderr = adb_execute_and_get_output(
+            cmd, android_case_dir, verbose=self.verbose
+        )
+        benchmark_metrics = parse_iree_benchmark_metrics(
+            benchmark_stdout, benchmark_stderr
+        )
+        if self.verbose:
+            print(benchmark_metrics)
+        results_filename.write_text(json.dumps(benchmark_metrics.to_json_object()))
 
-  def __run_capture(self, android_case_dir: pathlib.PurePosixPath,
-                    tool_name: str, capture_filename: pathlib.Path,
-                    run_args: Sequence[str], taskset: str):
-    capture_config = self.config.trace_capture_config
-    if capture_config is None:
-      raise ValueError("capture_config can't be None.")
+    def __run_capture(
+        self,
+        android_case_dir: pathlib.PurePosixPath,
+        tool_name: str,
+        capture_filename: pathlib.Path,
+        run_args: Sequence[str],
+        taskset: str,
+    ):
+        capture_config = self.config.trace_capture_config
+        if capture_config is None:
+            raise ValueError("capture_config can't be None.")
 
-    host_tool_path = capture_config.traced_benchmark_tool_dir / tool_name
-    android_tool = self.__check_and_push_file(host_tool_path,
-                                              TRACED_TOOL_REL_DIR)
-    run_cmd = [
-        "TRACY_NO_EXIT=1", f"IREE_PRESERVE_DYLIB_TEMP_FILES={ANDROID_TMPDIR}",
-        "taskset", taskset, android_tool
-    ]
-    run_cmd += run_args
+        host_tool_path = capture_config.traced_benchmark_tool_dir / tool_name
+        android_tool = self.__check_and_push_file(host_tool_path, TRACED_TOOL_REL_DIR)
+        run_cmd = [
+            "TRACY_NO_EXIT=1",
+            f"IREE_PRESERVE_DYLIB_TEMP_FILES={ANDROID_TMPDIR}",
+            "taskset",
+            taskset,
+            android_tool,
+        ]
+        run_cmd += run_args
 
-    # Just launch the traced benchmark tool with TRACY_NO_EXIT=1 without
-    # waiting for the adb command to complete as that won't happen.
-    process = adb_start_cmd(run_cmd, android_case_dir, verbose=self.verbose)
+        # Just launch the traced benchmark tool with TRACY_NO_EXIT=1 without
+        # waiting for the adb command to complete as that won't happen.
+        process = adb_start_cmd(run_cmd, android_case_dir, verbose=self.verbose)
 
-    wait_for_iree_benchmark_module_start(process, self.verbose)
+        wait_for_iree_benchmark_module_start(process, self.verbose)
 
-    # Now it's okay to collect the trace via the capture tool. This will
-    # send the signal to let the previously waiting benchmark tool to
-    # complete.
-    capture_cmd = [
-        capture_config.trace_capture_tool, "-f", "-o", capture_filename
-    ]
-    # If verbose, just let the subprocess print its output. The subprocess
-    # may need to detect if the output is a TTY to decide whether to log
-    # verbose progress info and use ANSI colors, so it's better to use
-    # stdout redirection than to capture the output in a string.
-    stdout_redirect = None if self.verbose else subprocess.DEVNULL
-    execute_cmd(capture_cmd, verbose=self.verbose, stdout=stdout_redirect)
+        # Now it's okay to collect the trace via the capture tool. This will
+        # send the signal to let the previously waiting benchmark tool to
+        # complete.
+        capture_cmd = [capture_config.trace_capture_tool, "-f", "-o", capture_filename]
+        # If verbose, just let the subprocess print its output. The subprocess
+        # may need to detect if the output is a TTY to decide whether to log
+        # verbose progress info and use ANSI colors, so it's better to use
+        # stdout redirection than to capture the output in a string.
+        stdout_redirect = None if self.verbose else subprocess.DEVNULL
+        execute_cmd(capture_cmd, verbose=self.verbose, stdout=stdout_redirect)
 
-  # TODO(#13187): These logics are inherited from the legacy benchmark suites,
-  # which only work for a few specific phones. We should define the topology
-  # in their device specs.
-  def __deduce_taskset_from_run_config(
-      self, run_config: iree_definitions.E2EModelRunConfig) -> str:
-    """Deduces the CPU mask according to device and execution config."""
+    # TODO(#13187): These logics are inherited from the legacy benchmark suites,
+    # which only work for a few specific phones. We should define the topology
+    # in their device specs.
+    def __deduce_taskset_from_run_config(
+        self, run_config: iree_definitions.E2EModelRunConfig
+    ) -> str:
+        """Deduces the CPU mask according to device and execution config."""
 
-    device_spec = run_config.target_device_spec
-    # For GPU benchmarks, use the most performant core.
-    if device_spec.architecture.type == common_definitions.ArchitectureType.GPU:
-      return "80"
+        device_spec = run_config.target_device_spec
+        # For GPU benchmarks, use the most performant core.
+        if device_spec.architecture.type == common_definitions.ArchitectureType.GPU:
+            return "80"
 
-    device_params = device_spec.device_parameters
-    single_thread = "1-thread" in run_config.module_execution_config.tags
-    if device_parameters.ARM_BIG_CORES in device_params:
-      return "80" if single_thread else "f0"
-    elif device_parameters.ARM_LITTLE_CORES in device_params:
-      return "08" if single_thread else "0f"
+        device_params = device_spec.device_parameters
+        single_thread = "1-thread" in run_config.module_execution_config.tags
+        if device_parameters.ARM_BIG_CORES in device_params:
+            return "80" if single_thread else "f0"
+        elif device_parameters.ARM_LITTLE_CORES in device_params:
+            return "08" if single_thread else "0f"
 
-    raise ValueError(f"Unsupported config to deduce taskset: '{run_config}'.")
+        raise ValueError(f"Unsupported config to deduce taskset: '{run_config}'.")
 
-  def __check_and_push_file(self, host_path: pathlib.Path,
-                            relative_dir: pathlib.PurePosixPath):
-    """Checks if the file has been pushed and pushes it if not."""
-    android_path = self.already_pushed_files.get(host_path)
-    if android_path is not None:
-      return android_path
+    def __check_and_push_file(
+        self, host_path: pathlib.Path, relative_dir: pathlib.PurePosixPath
+    ):
+        """Checks if the file has been pushed and pushes it if not."""
+        android_path = self.already_pushed_files.get(host_path)
+        if android_path is not None:
+            return android_path
 
-    android_path = adb_push_to_tmp_dir(host_path,
-                                       relative_dir=relative_dir,
-                                       verbose=self.verbose)
-    self.already_pushed_files[host_path] = android_path
-    return android_path
+        android_path = adb_push_to_tmp_dir(
+            host_path, relative_dir=relative_dir, verbose=self.verbose
+        )
+        self.already_pushed_files[host_path] = android_path
+        return android_path
 
 
 def set_cpu_frequency_scaling_governor(governor: str):
-  git_root = execute_cmd_and_get_stdout(["git", "rev-parse", "--show-toplevel"])
-  cpu_script = (pathlib.Path(git_root) / "build_tools" / "benchmarks" /
-                "set_android_scaling_governor.sh")
-  android_path = adb_push_to_tmp_dir(cpu_script)
-  adb_execute_as_root([android_path, governor])
+    git_root = execute_cmd_and_get_stdout(["git", "rev-parse", "--show-toplevel"])
+    cpu_script = (
+        pathlib.Path(git_root)
+        / "build_tools"
+        / "benchmarks"
+        / "set_android_scaling_governor.sh"
+    )
+    android_path = adb_push_to_tmp_dir(cpu_script)
+    adb_execute_as_root([android_path, governor])
 
 
 def set_gpu_frequency_scaling_policy(policy: str):
-  git_root = execute_cmd_and_get_stdout(["git", "rev-parse", "--show-toplevel"])
-  device_model = get_android_device_model()
-  gpu_name = get_android_gpu_name()
-  benchmarks_tool_dir = pathlib.Path(git_root) / "build_tools" / "benchmarks"
-  if device_model == "Pixel-6" or device_model == "Pixel-6-Pro":
-    gpu_script = benchmarks_tool_dir / "set_pixel6_gpu_scaling_policy.sh"
-  elif gpu_name.lower().startswith("adreno"):
-    gpu_script = benchmarks_tool_dir / "set_adreno_gpu_scaling_policy.sh"
-  else:
-    raise RuntimeError(
-        f"Unsupported device '{device_model}' for setting GPU scaling policy")
-  android_path = adb_push_to_tmp_dir(gpu_script)
-  adb_execute_as_root([android_path, policy])
+    git_root = execute_cmd_and_get_stdout(["git", "rev-parse", "--show-toplevel"])
+    device_model = get_android_device_model()
+    gpu_name = get_android_gpu_name()
+    benchmarks_tool_dir = pathlib.Path(git_root) / "build_tools" / "benchmarks"
+    if device_model == "Pixel-6" or device_model == "Pixel-6-Pro":
+        gpu_script = benchmarks_tool_dir / "set_pixel6_gpu_scaling_policy.sh"
+    elif gpu_name.lower().startswith("adreno"):
+        gpu_script = benchmarks_tool_dir / "set_adreno_gpu_scaling_policy.sh"
+    else:
+        raise RuntimeError(
+            f"Unsupported device '{device_model}' for setting GPU scaling policy"
+        )
+    android_path = adb_push_to_tmp_dir(gpu_script)
+    adb_execute_as_root([android_path, policy])
 
 
 def main(args):
-  device_info = get_android_device_info(args.verbose)
-  if args.verbose:
-    print(device_info)
+    device_info = get_android_device_info(args.verbose)
+    if args.verbose:
+        print(device_info)
 
-  commit = get_git_commit_hash("HEAD")
-  benchmark_config = BenchmarkConfig.build_from_args(args, commit)
-  benchmark_groups = json.loads(args.execution_benchmark_config.read_text())
-  benchmark_group = benchmark_groups.get(args.target_device_name)
-  if benchmark_group is None:
-    raise ValueError("Target device not found in the benchmark config.")
-  run_configs = serialization.unpack_and_deserialize(
-      data=benchmark_group["run_configs"],
-      root_type=List[iree_definitions.E2EModelRunConfig])
-  benchmark_suite = BenchmarkSuite.load_from_run_configs(
-      run_configs=run_configs,
-      root_benchmark_dir=benchmark_config.root_benchmark_dir)
+    commit = get_git_commit_hash("HEAD")
+    benchmark_config = BenchmarkConfig.build_from_args(args, commit)
+    benchmark_groups = json.loads(args.execution_benchmark_config.read_text())
+    benchmark_group = benchmark_groups.get(args.target_device_name)
+    if benchmark_group is None:
+        raise ValueError("Target device not found in the benchmark config.")
+    run_configs = serialization.unpack_and_deserialize(
+        data=benchmark_group["run_configs"],
+        root_type=List[iree_definitions.E2EModelRunConfig],
+    )
+    benchmark_suite = BenchmarkSuite.load_from_run_configs(
+        run_configs=run_configs, root_benchmark_dir=benchmark_config.root_benchmark_dir
+    )
 
-  benchmark_driver = AndroidBenchmarkDriver(device_info=device_info,
-                                            benchmark_config=benchmark_config,
-                                            benchmark_suite=benchmark_suite,
-                                            benchmark_grace_time=1.0,
-                                            verbose=args.verbose)
+    benchmark_driver = AndroidBenchmarkDriver(
+        device_info=device_info,
+        benchmark_config=benchmark_config,
+        benchmark_suite=benchmark_suite,
+        benchmark_grace_time=1.0,
+        verbose=args.verbose,
+    )
 
-  if args.pin_cpu_freq:
-    set_cpu_frequency_scaling_governor("performance")
-    atexit.register(set_cpu_frequency_scaling_governor, "schedutil")
-  if args.pin_gpu_freq:
-    set_gpu_frequency_scaling_policy("performance")
-    atexit.register(set_gpu_frequency_scaling_policy, "default")
+    if args.pin_cpu_freq:
+        set_cpu_frequency_scaling_governor("performance")
+        atexit.register(set_cpu_frequency_scaling_governor, "schedutil")
+    if args.pin_gpu_freq:
+        set_gpu_frequency_scaling_policy("performance")
+        atexit.register(set_gpu_frequency_scaling_policy, "default")
 
-  # Clear the benchmark directory on the Android device first just in case
-  # there are leftovers from manual or failed runs.
-  execute_cmd_and_get_stdout(["adb", "shell", "rm", "-rf", ANDROID_TMPDIR],
-                             verbose=args.verbose)
+    # Clear the benchmark directory on the Android device first just in case
+    # there are leftovers from manual or failed runs.
+    execute_cmd_and_get_stdout(
+        ["adb", "shell", "rm", "-rf", ANDROID_TMPDIR], verbose=args.verbose
+    )
 
-  if not args.no_clean:
-    # Clear the benchmark directory on the Android device.
-    atexit.register(execute_cmd_and_get_stdout,
-                    ["adb", "shell", "rm", "-rf", ANDROID_TMPDIR],
-                    verbose=args.verbose)
-    # Also clear temporary directory on the host device.
-    atexit.register(shutil.rmtree, args.tmp_dir)
+    if not args.no_clean:
+        # Clear the benchmark directory on the Android device.
+        atexit.register(
+            execute_cmd_and_get_stdout,
+            ["adb", "shell", "rm", "-rf", ANDROID_TMPDIR],
+            verbose=args.verbose,
+        )
+        # Also clear temporary directory on the host device.
+        atexit.register(shutil.rmtree, args.tmp_dir)
 
-  # Tracy client and server communicate over port 8086 by default. If we want
-  # to capture traces along the way, forward port via adb.
-  trace_capture_config = benchmark_config.trace_capture_config
-  if trace_capture_config:
-    execute_cmd_and_get_stdout(["adb", "forward", "tcp:8086", "tcp:8086"],
-                               verbose=args.verbose)
-    atexit.register(execute_cmd_and_get_stdout,
-                    ["adb", "forward", "--remove", "tcp:8086"],
-                    verbose=args.verbose)
+    # Tracy client and server communicate over port 8086 by default. If we want
+    # to capture traces along the way, forward port via adb.
+    trace_capture_config = benchmark_config.trace_capture_config
+    if trace_capture_config:
+        execute_cmd_and_get_stdout(
+            ["adb", "forward", "tcp:8086", "tcp:8086"], verbose=args.verbose
+        )
+        atexit.register(
+            execute_cmd_and_get_stdout,
+            ["adb", "forward", "--remove", "tcp:8086"],
+            verbose=args.verbose,
+        )
 
-  benchmark_driver.run()
+    benchmark_driver.run()
 
-  benchmark_results = benchmark_driver.get_benchmark_results()
-  if args.output is not None:
-    with open(args.output, "w") as f:
-      f.write(benchmark_results.to_json_str())
+    benchmark_results = benchmark_driver.get_benchmark_results()
+    if args.output is not None:
+        with open(args.output, "w") as f:
+            f.write(benchmark_results.to_json_str())
 
-  if args.verbose:
-    print(benchmark_results.commit)
-    print(benchmark_results.benchmarks)
+    if args.verbose:
+        print(benchmark_results.commit)
+        print(benchmark_results.benchmarks)
 
-  if trace_capture_config:
-    # Put all captures in a tarball and remove the original files.
-    with tarfile.open(trace_capture_config.capture_tarball, "w:gz") as tar:
-      for capture_filename in benchmark_driver.get_capture_filenames():
-        tar.add(capture_filename)
+    if trace_capture_config:
+        # Put all captures in a tarball and remove the original files.
+        with tarfile.open(trace_capture_config.capture_tarball, "w:gz") as tar:
+            for capture_filename in benchmark_driver.get_capture_filenames():
+                tar.add(capture_filename)
 
-  benchmark_errors = benchmark_driver.get_benchmark_errors()
-  if benchmark_errors:
-    print("Benchmarking completed with errors", file=sys.stderr)
-    raise RuntimeError(benchmark_errors)
+    benchmark_errors = benchmark_driver.get_benchmark_errors()
+    if benchmark_errors:
+        print("Benchmarking completed with errors", file=sys.stderr)
+        raise RuntimeError(benchmark_errors)
 
 
 if __name__ == "__main__":
-  main(common.common_arguments.Parser().parse_args())
+    main(common.common_arguments.Parser().parse_args())
diff --git a/build_tools/benchmarks/run_benchmarks_on_linux.py b/build_tools/benchmarks/run_benchmarks_on_linux.py
index 9554ccc..6820e0f 100755
--- a/build_tools/benchmarks/run_benchmarks_on_linux.py
+++ b/build_tools/benchmarks/run_benchmarks_on_linux.py
@@ -23,12 +23,14 @@
 from common.benchmark_driver import BenchmarkDriver
 from common.benchmark_suite import BenchmarkCase, BenchmarkSuite
 from common.benchmark_config import BenchmarkConfig
-from common.benchmark_definition import (execute_cmd,
-                                         execute_cmd_and_get_output,
-                                         get_git_commit_hash,
-                                         get_iree_benchmark_module_arguments,
-                                         wait_for_iree_benchmark_module_start,
-                                         parse_iree_benchmark_metrics)
+from common.benchmark_definition import (
+    execute_cmd,
+    execute_cmd_and_get_output,
+    get_git_commit_hash,
+    get_iree_benchmark_module_arguments,
+    wait_for_iree_benchmark_module_start,
+    parse_iree_benchmark_metrics,
+)
 from common.linux_device_utils import get_linux_device_info
 from e2e_test_framework.definitions import iree_definitions
 from e2e_test_framework import serialization
@@ -38,161 +40,174 @@
 
 
 class LinuxBenchmarkDriver(BenchmarkDriver):
-  """Linux benchmark driver."""
+    """Linux benchmark driver."""
 
-  def __init__(self, gpu_id: str, *args, **kwargs):
-    self.gpu_id = gpu_id
-    super().__init__(*args, **kwargs)
+    def __init__(self, gpu_id: str, *args, **kwargs):
+        self.gpu_id = gpu_id
+        super().__init__(*args, **kwargs)
 
-  def run_benchmark_case(self, benchmark_case: BenchmarkCase,
-                         benchmark_results_filename: Optional[pathlib.Path],
-                         capture_filename: Optional[pathlib.Path]) -> None:
+    def run_benchmark_case(
+        self,
+        benchmark_case: BenchmarkCase,
+        benchmark_results_filename: Optional[pathlib.Path],
+        capture_filename: Optional[pathlib.Path],
+    ) -> None:
+        if benchmark_results_filename:
+            self.__run_benchmark(
+                benchmark_case=benchmark_case,
+                results_filename=benchmark_results_filename,
+            )
 
-    if benchmark_results_filename:
-      self.__run_benchmark(benchmark_case=benchmark_case,
-                           results_filename=benchmark_results_filename)
+        if capture_filename:
+            self.__run_capture(
+                benchmark_case=benchmark_case, capture_filename=capture_filename
+            )
 
-    if capture_filename:
-      self.__run_capture(benchmark_case=benchmark_case,
-                         capture_filename=capture_filename)
+    def __build_tool_cmds(
+        self, benchmark_case: BenchmarkCase, tool_path: pathlib.Path
+    ) -> List[Any]:
+        run_config = benchmark_case.run_config
+        cmds: List[Any] = run_module_utils.build_linux_wrapper_cmds_for_device_spec(
+            run_config.target_device_spec
+        )
+        cmds.append(tool_path)
 
-  def __build_tool_cmds(self, benchmark_case: BenchmarkCase,
-                        tool_path: pathlib.Path) -> List[Any]:
-    run_config = benchmark_case.run_config
-    cmds: List[Any] = run_module_utils.build_linux_wrapper_cmds_for_device_spec(
-        run_config.target_device_spec)
-    cmds.append(tool_path)
+        module_dir_path = benchmark_case.benchmark_case_dir
+        cmds += [f"--module={module_dir_path / iree_artifacts.MODULE_FILENAME}"]
+        cmds += run_config.materialize_run_flags(gpu_id=self.gpu_id)
 
-    module_dir_path = benchmark_case.benchmark_case_dir
-    cmds += [f"--module={module_dir_path / iree_artifacts.MODULE_FILENAME}"]
-    cmds += run_config.materialize_run_flags(gpu_id=self.gpu_id)
+        return cmds
 
-    return cmds
+    def __run_benchmark(
+        self, benchmark_case: BenchmarkCase, results_filename: pathlib.Path
+    ):
+        if self.config.normal_benchmark_tool_dir is None:
+            raise ValueError("normal_benchmark_tool_dir can't be None.")
 
-  def __run_benchmark(self, benchmark_case: BenchmarkCase,
-                      results_filename: pathlib.Path):
-    if self.config.normal_benchmark_tool_dir is None:
-      raise ValueError("normal_benchmark_tool_dir can't be None.")
+        tool_name = benchmark_case.benchmark_tool_name
+        tool_path = self.config.normal_benchmark_tool_dir / tool_name
+        cmd = self.__build_tool_cmds(benchmark_case=benchmark_case, tool_path=tool_path)
 
-    tool_name = benchmark_case.benchmark_tool_name
-    tool_path = self.config.normal_benchmark_tool_dir / tool_name
-    cmd = self.__build_tool_cmds(benchmark_case=benchmark_case,
-                                 tool_path=tool_path)
+        if tool_name == "iree-benchmark-module":
+            cmd.extend(
+                get_iree_benchmark_module_arguments(
+                    results_filename=str(results_filename),
+                    driver_info=benchmark_case.driver_info,
+                    benchmark_min_time=self.config.benchmark_min_time,
+                )
+            )
 
-    if tool_name == "iree-benchmark-module":
-      cmd.extend(
-          get_iree_benchmark_module_arguments(
-              results_filename=str(results_filename),
-              driver_info=benchmark_case.driver_info,
-              benchmark_min_time=self.config.benchmark_min_time))
+        benchmark_stdout, benchmark_stderr = execute_cmd_and_get_output(
+            cmd, verbose=self.verbose
+        )
+        benchmark_metrics = parse_iree_benchmark_metrics(
+            benchmark_stdout, benchmark_stderr
+        )
+        if self.verbose:
+            print(benchmark_metrics)
+        results_filename.write_text(json.dumps(benchmark_metrics.to_json_object()))
 
-    benchmark_stdout, benchmark_stderr = execute_cmd_and_get_output(
-        cmd, verbose=self.verbose)
-    benchmark_metrics = parse_iree_benchmark_metrics(benchmark_stdout,
-                                                     benchmark_stderr)
-    if self.verbose:
-      print(benchmark_metrics)
-    results_filename.write_text(json.dumps(benchmark_metrics.to_json_object()))
+    def __run_capture(
+        self, benchmark_case: BenchmarkCase, capture_filename: pathlib.Path
+    ):
+        capture_config = self.config.trace_capture_config
+        if capture_config is None:
+            raise ValueError("capture_config can't be None.")
 
-  def __run_capture(self, benchmark_case: BenchmarkCase,
-                    capture_filename: pathlib.Path):
-    capture_config = self.config.trace_capture_config
-    if capture_config is None:
-      raise ValueError("capture_config can't be None.")
+        tool_path = (
+            capture_config.traced_benchmark_tool_dir
+            / benchmark_case.benchmark_tool_name
+        )
+        cmd = self.__build_tool_cmds(benchmark_case=benchmark_case, tool_path=tool_path)
 
-    tool_path = (capture_config.traced_benchmark_tool_dir /
-                 benchmark_case.benchmark_tool_name)
-    cmd = self.__build_tool_cmds(benchmark_case=benchmark_case,
-                                 tool_path=tool_path)
+        process = subprocess.Popen(
+            cmd, env={"TRACY_NO_EXIT": "1"}, stdout=subprocess.PIPE, text=True
+        )
 
-    process = subprocess.Popen(cmd,
-                               env={"TRACY_NO_EXIT": "1"},
-                               stdout=subprocess.PIPE,
-                               text=True)
+        wait_for_iree_benchmark_module_start(process, self.verbose)
 
-    wait_for_iree_benchmark_module_start(process, self.verbose)
-
-    capture_cmd = [
-        capture_config.trace_capture_tool, "-f", "-o", capture_filename
-    ]
-    stdout_redirect = None if self.verbose else subprocess.DEVNULL
-    execute_cmd(capture_cmd, verbose=self.verbose, stdout=stdout_redirect)
+        capture_cmd = [capture_config.trace_capture_tool, "-f", "-o", capture_filename]
+        stdout_redirect = None if self.verbose else subprocess.DEVNULL
+        execute_cmd(capture_cmd, verbose=self.verbose, stdout=stdout_redirect)
 
 
 def main(args):
-  device_info = get_linux_device_info(args.device_model, args.cpu_uarch,
-                                      args.gpu_id, args.verbose)
-  if args.verbose:
-    print(device_info)
+    device_info = get_linux_device_info(
+        args.device_model, args.cpu_uarch, args.gpu_id, args.verbose
+    )
+    if args.verbose:
+        print(device_info)
 
-  commit = get_git_commit_hash("HEAD")
-  benchmark_config = BenchmarkConfig.build_from_args(args, commit)
+    commit = get_git_commit_hash("HEAD")
+    benchmark_config = BenchmarkConfig.build_from_args(args, commit)
 
-  benchmark_groups = json.loads(args.execution_benchmark_config.read_text())
-  benchmark_group = benchmark_groups.get(args.target_device_name)
-  if benchmark_group is None:
-    raise ValueError("Target device not found in the benchmark config.")
-  run_configs = serialization.unpack_and_deserialize(
-      data=benchmark_group["run_configs"],
-      root_type=typing.List[iree_definitions.E2EModelRunConfig])
-  benchmark_suite = BenchmarkSuite.load_from_run_configs(
-      run_configs=run_configs,
-      root_benchmark_dir=benchmark_config.root_benchmark_dir)
+    benchmark_groups = json.loads(args.execution_benchmark_config.read_text())
+    benchmark_group = benchmark_groups.get(args.target_device_name)
+    if benchmark_group is None:
+        raise ValueError("Target device not found in the benchmark config.")
+    run_configs = serialization.unpack_and_deserialize(
+        data=benchmark_group["run_configs"],
+        root_type=typing.List[iree_definitions.E2EModelRunConfig],
+    )
+    benchmark_suite = BenchmarkSuite.load_from_run_configs(
+        run_configs=run_configs, root_benchmark_dir=benchmark_config.root_benchmark_dir
+    )
 
-  benchmark_driver = LinuxBenchmarkDriver(gpu_id=args.gpu_id,
-                                          device_info=device_info,
-                                          benchmark_config=benchmark_config,
-                                          benchmark_suite=benchmark_suite,
-                                          benchmark_grace_time=1.0,
-                                          verbose=args.verbose)
+    benchmark_driver = LinuxBenchmarkDriver(
+        gpu_id=args.gpu_id,
+        device_info=device_info,
+        benchmark_config=benchmark_config,
+        benchmark_suite=benchmark_suite,
+        benchmark_grace_time=1.0,
+        verbose=args.verbose,
+    )
 
-  if args.pin_cpu_freq:
-    raise NotImplementedError("CPU freq pinning is not supported yet.")
-  if args.pin_gpu_freq:
-    raise NotImplementedError("GPU freq pinning is not supported yet.")
-  if not args.no_clean:
-    atexit.register(shutil.rmtree, args.tmp_dir)
+    if args.pin_cpu_freq:
+        raise NotImplementedError("CPU freq pinning is not supported yet.")
+    if args.pin_gpu_freq:
+        raise NotImplementedError("GPU freq pinning is not supported yet.")
+    if not args.no_clean:
+        atexit.register(shutil.rmtree, args.tmp_dir)
 
-  benchmark_driver.run()
+    benchmark_driver.run()
 
-  benchmark_results = benchmark_driver.get_benchmark_results()
-  if args.output is not None:
-    with args.output.open("w") as f:
-      f.write(benchmark_results.to_json_str())
+    benchmark_results = benchmark_driver.get_benchmark_results()
+    if args.output is not None:
+        with args.output.open("w") as f:
+            f.write(benchmark_results.to_json_str())
 
-  if args.verbose:
-    print(benchmark_results.commit)
-    print(benchmark_results.benchmarks)
+    if args.verbose:
+        print(benchmark_results.commit)
+        print(benchmark_results.benchmarks)
 
-  trace_capture_config = benchmark_config.trace_capture_config
-  if trace_capture_config:
-    # Put all captures in a tarball and remove the original files.
-    with tarfile.open(trace_capture_config.capture_tarball, "w:gz") as tar:
-      for capture_filename in benchmark_driver.get_capture_filenames():
-        tar.add(capture_filename)
+    trace_capture_config = benchmark_config.trace_capture_config
+    if trace_capture_config:
+        # Put all captures in a tarball and remove the original files.
+        with tarfile.open(trace_capture_config.capture_tarball, "w:gz") as tar:
+            for capture_filename in benchmark_driver.get_capture_filenames():
+                tar.add(capture_filename)
 
-  benchmark_errors = benchmark_driver.get_benchmark_errors()
-  if benchmark_errors:
-    print("Benchmarking completed with errors", file=sys.stderr)
-    raise RuntimeError(benchmark_errors)
+    benchmark_errors = benchmark_driver.get_benchmark_errors()
+    if benchmark_errors:
+        print("Benchmarking completed with errors", file=sys.stderr)
+        raise RuntimeError(benchmark_errors)
 
 
 def parse_argument():
-  arg_parser = common.common_arguments.Parser()
-  arg_parser.add_argument("--device_model",
-                          default="Unknown",
-                          help="Device model")
-  arg_parser.add_argument("--cpu_uarch",
-                          default=None,
-                          help="CPU microarchitecture, e.g., CascadeLake")
-  arg_parser.add_argument(
-      "--gpu_id",
-      type=str,
-      default="0",
-      help="GPU ID to run the benchmark, e.g., '0' or 'GPU-<UUID>'")
+    arg_parser = common.common_arguments.Parser()
+    arg_parser.add_argument("--device_model", default="Unknown", help="Device model")
+    arg_parser.add_argument(
+        "--cpu_uarch", default=None, help="CPU microarchitecture, e.g., CascadeLake"
+    )
+    arg_parser.add_argument(
+        "--gpu_id",
+        type=str,
+        default="0",
+        help="GPU ID to run the benchmark, e.g., '0' or 'GPU-<UUID>'",
+    )
 
-  return arg_parser.parse_args()
+    return arg_parser.parse_args()
 
 
 if __name__ == "__main__":
-  main(parse_argument())
+    main(parse_argument())
diff --git a/build_tools/benchmarks/upload_benchmarks_to_dashboard.py b/build_tools/benchmarks/upload_benchmarks_to_dashboard.py
index 89aaf2e..137b7c6 100755
--- a/build_tools/benchmarks/upload_benchmarks_to_dashboard.py
+++ b/build_tools/benchmarks/upload_benchmarks_to_dashboard.py
@@ -32,8 +32,8 @@
 from common import benchmark_definition, benchmark_presentation, benchmark_thresholds
 
 IREE_DASHBOARD_URL = "https://perf.iree.dev"
-IREE_GITHUB_COMMIT_URL_PREFIX = 'https://github.com/openxla/iree/commit'
-IREE_PROJECT_ID = 'IREE'
+IREE_GITHUB_COMMIT_URL_PREFIX = "https://github.com/openxla/iree/commit"
+IREE_PROJECT_ID = "IREE"
 THIS_DIRECTORY = pathlib.Path(__file__).resolve().parent
 
 COMMON_DESCRIPTION = """
@@ -50,353 +50,395 @@
 # For models listed here we can provide a nicer description for them on
 # webpage.
 IREE_TF_MODEL_SOURCE_URL = {
-    'MobileBertSquad':
-        'https://github.com/google-research/google-research/tree/master/mobilebert',
-    'MobileNetV2':
-        'https://www.tensorflow.org/api_docs/python/tf/keras/applications/MobileNetV2',
-    'MobileNetV3Small':
-        'https://www.tensorflow.org/api_docs/python/tf/keras/applications/MobileNetV3Small',
+    "MobileBertSquad": "https://github.com/google-research/google-research/tree/master/mobilebert",
+    "MobileNetV2": "https://www.tensorflow.org/api_docs/python/tf/keras/applications/MobileNetV2",
+    "MobileNetV3Small": "https://www.tensorflow.org/api_docs/python/tf/keras/applications/MobileNetV3Small",
 }
 
 IREE_TFLITE_MODEL_SOURCE_URL = {
-    'DeepLabV3':
-        'https://tfhub.dev/tensorflow/lite-model/deeplabv3/1/default/1',
-    'MobileSSD':
-        'https://www.tensorflow.org/lite/performance/gpu#demo_app_tutorials',
-    'PoseNet':
-        'https://tfhub.dev/tensorflow/lite-model/posenet/mobilenet/float/075/1/default/1',
+    "DeepLabV3": "https://tfhub.dev/tensorflow/lite-model/deeplabv3/1/default/1",
+    "MobileSSD": "https://www.tensorflow.org/lite/performance/gpu#demo_app_tutorials",
+    "PoseNet": "https://tfhub.dev/tensorflow/lite-model/posenet/mobilenet/float/075/1/default/1",
 }
 
 
 def get_model_description(model_name: str, model_source: str) -> Optional[str]:
-  """Gets the model description for the given benchmark."""
-  url = None
-  if model_source == "TensorFlow":
-    url = IREE_TF_MODEL_SOURCE_URL.get(model_name)
-  elif model_source == "TFLite":
-    url = IREE_TFLITE_MODEL_SOURCE_URL.get(model_name)
-  if url is not None:
-    description = f'{model_name} from <a href="{url}">{url}</a>.'
-    return description
-  return None
+    """Gets the model description for the given benchmark."""
+    url = None
+    if model_source == "TensorFlow":
+        url = IREE_TF_MODEL_SOURCE_URL.get(model_name)
+    elif model_source == "TFLite":
+        url = IREE_TFLITE_MODEL_SOURCE_URL.get(model_name)
+    if url is not None:
+        description = f'{model_name} from <a href="{url}">{url}</a>.'
+        return description
+    return None
 
 
 def get_git_commit_hash(commit: str, verbose: bool = False) -> str:
-  """Gets the commit hash for the given commit."""
-  return benchmark_definition.execute_cmd_and_get_stdout(
-      ['git', 'rev-parse', commit], cwd=THIS_DIRECTORY, verbose=verbose)
+    """Gets the commit hash for the given commit."""
+    return benchmark_definition.execute_cmd_and_get_stdout(
+        ["git", "rev-parse", commit], cwd=THIS_DIRECTORY, verbose=verbose
+    )
 
 
 def get_git_total_commit_count(commit: str, verbose: bool = False) -> int:
-  """Gets the total commit count in history ending with the given commit."""
-  count = benchmark_definition.execute_cmd_and_get_stdout(
-      ['git', 'rev-list', '--count', commit],
-      cwd=THIS_DIRECTORY,
-      verbose=verbose)
-  return int(count)
+    """Gets the total commit count in history ending with the given commit."""
+    count = benchmark_definition.execute_cmd_and_get_stdout(
+        ["git", "rev-list", "--count", commit], cwd=THIS_DIRECTORY, verbose=verbose
+    )
+    return int(count)
 
 
 def get_git_commit_info(commit: str, verbose: bool = False) -> Dict[str, str]:
-  """Gets commit information dictionary for the given commit."""
-  cmd = [
-      'git', 'show', '--format=%H:::%h:::%an:::%ae:::%s', '--no-patch', commit
-  ]
-  info = benchmark_definition.execute_cmd_and_get_stdout(cmd,
-                                                         cwd=THIS_DIRECTORY,
-                                                         verbose=verbose)
-  segments = info.split(':::')
-  return {
-      'hash': segments[0],
-      'abbrevHash': segments[1],
-      'authorName': segments[2],
-      'authorEmail': segments[3],
-      'subject': segments[4],
-  }
+    """Gets commit information dictionary for the given commit."""
+    cmd = ["git", "show", "--format=%H:::%h:::%an:::%ae:::%s", "--no-patch", commit]
+    info = benchmark_definition.execute_cmd_and_get_stdout(
+        cmd, cwd=THIS_DIRECTORY, verbose=verbose
+    )
+    segments = info.split(":::")
+    return {
+        "hash": segments[0],
+        "abbrevHash": segments[1],
+        "authorName": segments[2],
+        "authorEmail": segments[3],
+        "subject": segments[4],
+    }
 
 
-def compose_series_payload(project_id: str,
-                           series_id: str,
-                           series_unit: str,
-                           series_name: Optional[str] = None,
-                           series_description: Optional[str] = None,
-                           average_range: Union[int, str] = '5%',
-                           average_min_count: int = 3,
-                           better_criterion: str = 'smaller',
-                           override: bool = False) -> Dict[str, Any]:
-  """Composes the payload dictionary for a series."""
-  payload = {
-      'projectId': project_id,
-      'serieId': series_id,
-      'serieUnit': series_unit,
-      'serieName': series_name,
-      'analyse': {
-          'benchmark': {
-              'range': average_range,
-              'required': average_min_count,
-              'trend': better_criterion,
-          }
-      },
-      'override': override,
-  }
-  if series_description is not None:
-    payload['description'] = series_description
-  return payload
+def compose_series_payload(
+    project_id: str,
+    series_id: str,
+    series_unit: str,
+    series_name: Optional[str] = None,
+    series_description: Optional[str] = None,
+    average_range: Union[int, str] = "5%",
+    average_min_count: int = 3,
+    better_criterion: str = "smaller",
+    override: bool = False,
+) -> Dict[str, Any]:
+    """Composes the payload dictionary for a series."""
+    payload = {
+        "projectId": project_id,
+        "serieId": series_id,
+        "serieUnit": series_unit,
+        "serieName": series_name,
+        "analyse": {
+            "benchmark": {
+                "range": average_range,
+                "required": average_min_count,
+                "trend": better_criterion,
+            }
+        },
+        "override": override,
+    }
+    if series_description is not None:
+        payload["description"] = series_description
+    return payload
 
 
-def compose_build_payload(project_id: str,
-                          project_github_commit_url: str,
-                          build_id: int,
-                          commit: str,
-                          override: bool = False) -> Dict[str, Any]:
-  """Composes the payload dictionary for a build."""
-  commit_info = get_git_commit_info(commit)
-  commit_info['url'] = f'{project_github_commit_url}/{commit_info["hash"]}'
-  return {
-      'projectId': project_id,
-      'build': {
-          'buildId': build_id,
-          'infos': commit_info,
-      },
-      'override': override,
-  }
+def compose_build_payload(
+    project_id: str,
+    project_github_commit_url: str,
+    build_id: int,
+    commit: str,
+    override: bool = False,
+) -> Dict[str, Any]:
+    """Composes the payload dictionary for a build."""
+    commit_info = get_git_commit_info(commit)
+    commit_info["url"] = f'{project_github_commit_url}/{commit_info["hash"]}'
+    return {
+        "projectId": project_id,
+        "build": {
+            "buildId": build_id,
+            "infos": commit_info,
+        },
+        "override": override,
+    }
 
 
-def compose_sample_payload(project_id: str,
-                           series_id: str,
-                           build_id: int,
-                           sample_unit: str,
-                           sample_value: int,
-                           override: bool = False) -> Dict[str, Any]:
-  """Composes the payload dictionary for a sample."""
-  return {
-      'projectId': project_id,
-      'serieId': series_id,
-      'sampleUnit': sample_unit,
-      'sample': {
-          'buildId': build_id,
-          'value': sample_value
-      },
-      'override': override
-  }
+def compose_sample_payload(
+    project_id: str,
+    series_id: str,
+    build_id: int,
+    sample_unit: str,
+    sample_value: int,
+    override: bool = False,
+) -> Dict[str, Any]:
+    """Composes the payload dictionary for a sample."""
+    return {
+        "projectId": project_id,
+        "serieId": series_id,
+        "sampleUnit": sample_unit,
+        "sample": {"buildId": build_id, "value": sample_value},
+        "override": override,
+    }
 
 
 def get_required_env_var(var: str) -> str:
-  """Gets the value for a required environment variable."""
-  value = os.getenv(var)
-  if value is None:
-    raise RuntimeError(f'Missing environment variable "{var}"')
-  return value
+    """Gets the value for a required environment variable."""
+    value = os.getenv(var)
+    if value is None:
+        raise RuntimeError(f'Missing environment variable "{var}"')
+    return value
 
 
-def post_to_dashboard(url: str,
-                      payload: Dict[str, Any],
-                      dry_run: bool = False,
-                      verbose: bool = False):
-  data = json.dumps(payload)
+def post_to_dashboard(
+    url: str, payload: Dict[str, Any], dry_run: bool = False, verbose: bool = False
+):
+    data = json.dumps(payload)
 
-  if dry_run or verbose:
-    print(f'API request payload: {data}')
+    if dry_run or verbose:
+        print(f"API request payload: {data}")
 
-  if dry_run:
-    return
+    if dry_run:
+        return
 
-  api_token = get_required_env_var('IREE_DASHBOARD_API_TOKEN')
-  headers = {
-      'Content-type': 'application/json',
-      'Authorization': f'Bearer {api_token}',
-  }
+    api_token = get_required_env_var("IREE_DASHBOARD_API_TOKEN")
+    headers = {
+        "Content-type": "application/json",
+        "Authorization": f"Bearer {api_token}",
+    }
 
-  response = requests.post(url, data=data, headers=headers)
-  code = response.status_code
-  if code != 200:
-    raise requests.RequestException(
-        f'Failed to post to dashboard server with {code} - {response.text}')
+    response = requests.post(url, data=data, headers=headers)
+    code = response.status_code
+    if code != 200:
+        raise requests.RequestException(
+            f"Failed to post to dashboard server with {code} - {response.text}"
+        )
 
 
-def add_new_iree_series(series_id: str,
-                        series_unit: str,
-                        series_name: str,
-                        series_description: Optional[str] = None,
-                        average_range: Optional[Union[str, int]] = None,
-                        override: bool = False,
-                        dry_run: bool = False,
-                        verbose: bool = False):
-  """Posts a new series to the dashboard."""
-  if average_range is None:
-    raise ValueError(f"no matched threshold setting for benchmark: {series_id}")
+def add_new_iree_series(
+    series_id: str,
+    series_unit: str,
+    series_name: str,
+    series_description: Optional[str] = None,
+    average_range: Optional[Union[str, int]] = None,
+    override: bool = False,
+    dry_run: bool = False,
+    verbose: bool = False,
+):
+    """Posts a new series to the dashboard."""
+    if average_range is None:
+        raise ValueError(f"no matched threshold setting for benchmark: {series_id}")
 
-  payload = compose_series_payload(IREE_PROJECT_ID,
-                                   series_id,
-                                   series_unit,
-                                   series_name,
-                                   series_description,
-                                   average_range=average_range,
-                                   override=override)
-  post_to_dashboard(f'{IREE_DASHBOARD_URL}/apis/v2/addSerie',
-                    payload,
-                    dry_run=dry_run,
-                    verbose=verbose)
+    payload = compose_series_payload(
+        IREE_PROJECT_ID,
+        series_id,
+        series_unit,
+        series_name,
+        series_description,
+        average_range=average_range,
+        override=override,
+    )
+    post_to_dashboard(
+        f"{IREE_DASHBOARD_URL}/apis/v2/addSerie",
+        payload,
+        dry_run=dry_run,
+        verbose=verbose,
+    )
 
 
-def add_new_iree_build(build_id: int,
-                       commit: str,
-                       override: bool = False,
-                       dry_run: bool = False,
-                       verbose: bool = False):
-  """Posts a new build to the dashboard."""
-  payload = compose_build_payload(IREE_PROJECT_ID,
-                                  IREE_GITHUB_COMMIT_URL_PREFIX, build_id,
-                                  commit, override)
-  post_to_dashboard(f'{IREE_DASHBOARD_URL}/apis/addBuild',
-                    payload,
-                    dry_run=dry_run,
-                    verbose=verbose)
+def add_new_iree_build(
+    build_id: int,
+    commit: str,
+    override: bool = False,
+    dry_run: bool = False,
+    verbose: bool = False,
+):
+    """Posts a new build to the dashboard."""
+    payload = compose_build_payload(
+        IREE_PROJECT_ID, IREE_GITHUB_COMMIT_URL_PREFIX, build_id, commit, override
+    )
+    post_to_dashboard(
+        f"{IREE_DASHBOARD_URL}/apis/addBuild", payload, dry_run=dry_run, verbose=verbose
+    )
 
 
-def add_new_sample(series_id: str,
-                   build_id: int,
-                   sample_unit: str,
-                   sample_value: int,
-                   override: bool = False,
-                   dry_run: bool = False,
-                   verbose: bool = False):
-  """Posts a new sample to the dashboard."""
-  payload = compose_sample_payload(IREE_PROJECT_ID, series_id, build_id,
-                                   sample_unit, sample_value, override)
-  post_to_dashboard(f'{IREE_DASHBOARD_URL}/apis/v2/addSample',
-                    payload,
-                    dry_run=dry_run,
-                    verbose=verbose)
+def add_new_sample(
+    series_id: str,
+    build_id: int,
+    sample_unit: str,
+    sample_value: int,
+    override: bool = False,
+    dry_run: bool = False,
+    verbose: bool = False,
+):
+    """Posts a new sample to the dashboard."""
+    payload = compose_sample_payload(
+        IREE_PROJECT_ID, series_id, build_id, sample_unit, sample_value, override
+    )
+    post_to_dashboard(
+        f"{IREE_DASHBOARD_URL}/apis/v2/addSample",
+        payload,
+        dry_run=dry_run,
+        verbose=verbose,
+    )
 
 
 def parse_arguments():
-  """Parses command-line options."""
+    """Parses command-line options."""
 
-  parser = argparse.ArgumentParser()
-  parser.add_argument(
-      '--benchmark_files',
-      metavar='<benchmark-json-files>',
-      default=[],
-      action="append",
-      help=("Paths to the JSON files containing benchmark results, "
-            "accepts wildcards"))
-  parser.add_argument(
-      "--compile_stats_files",
-      metavar="<compile-stats-json-files>",
-      default=[],
-      action="append",
-      help=("Paths to the JSON files containing compilation statistics, "
-            "accepts wildcards"))
-  parser.add_argument("--dry-run",
-                      action="store_true",
-                      help="Print the comment instead of posting to dashboard")
-  parser.add_argument('--verbose',
-                      action='store_true',
-                      help='Print internal information during execution')
-  args = parser.parse_args()
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--benchmark_files",
+        metavar="<benchmark-json-files>",
+        default=[],
+        action="append",
+        help=(
+            "Paths to the JSON files containing benchmark results, " "accepts wildcards"
+        ),
+    )
+    parser.add_argument(
+        "--compile_stats_files",
+        metavar="<compile-stats-json-files>",
+        default=[],
+        action="append",
+        help=(
+            "Paths to the JSON files containing compilation statistics, "
+            "accepts wildcards"
+        ),
+    )
+    parser.add_argument(
+        "--dry-run",
+        action="store_true",
+        help="Print the comment instead of posting to dashboard",
+    )
+    parser.add_argument(
+        "--verbose",
+        action="store_true",
+        help="Print internal information during execution",
+    )
+    args = parser.parse_args()
 
-  return args
+    return args
 
 
 def main(args):
-  benchmark_files = expand_and_check_file_paths(args.benchmark_files)
-  compile_stats_files = expand_and_check_file_paths(args.compile_stats_files)
+    benchmark_files = expand_and_check_file_paths(args.benchmark_files)
+    compile_stats_files = expand_and_check_file_paths(args.compile_stats_files)
 
-  if len(benchmark_files) > 0:
-    committish = benchmark_definition.BenchmarkResults.from_json_str(
-        benchmark_files[0].read_text()).commit
-  elif len(compile_stats_files) > 0:
-    committish = benchmark_definition.CompilationResults.from_json_object(
-        json.loads(compile_stats_files[0].read_text())).commit
-  else:
-    raise ValueError("No benchmark/compilation results.")
+    if len(benchmark_files) > 0:
+        committish = benchmark_definition.BenchmarkResults.from_json_str(
+            benchmark_files[0].read_text()
+        ).commit
+    elif len(compile_stats_files) > 0:
+        committish = benchmark_definition.CompilationResults.from_json_object(
+            json.loads(compile_stats_files[0].read_text())
+        ).commit
+    else:
+        raise ValueError("No benchmark/compilation results.")
 
-  # Register a new build for the current commit.
-  commit_hash = get_git_commit_hash(committish, verbose=args.verbose)
-  commit_count = get_git_total_commit_count(commit_hash, verbose=args.verbose)
+    # Register a new build for the current commit.
+    commit_hash = get_git_commit_hash(committish, verbose=args.verbose)
+    commit_count = get_git_total_commit_count(commit_hash, verbose=args.verbose)
 
-  aggregate_results = benchmark_presentation.aggregate_all_benchmarks(
-      benchmark_files=benchmark_files, expected_pr_commit=commit_hash)
+    aggregate_results = benchmark_presentation.aggregate_all_benchmarks(
+        benchmark_files=benchmark_files, expected_pr_commit=commit_hash
+    )
 
-  all_compilation_metrics = benchmark_presentation.collect_all_compilation_metrics(
-      compile_stats_files=compile_stats_files, expected_pr_commit=commit_hash)
+    all_compilation_metrics = benchmark_presentation.collect_all_compilation_metrics(
+        compile_stats_files=compile_stats_files, expected_pr_commit=commit_hash
+    )
 
-  # Allow override to support uploading data for the same build in
-  # different batches.
-  add_new_iree_build(commit_count,
-                     commit_hash,
-                     override=True,
-                     dry_run=args.dry_run,
-                     verbose=args.verbose)
+    # Allow override to support uploading data for the same build in
+    # different batches.
+    add_new_iree_build(
+        commit_count,
+        commit_hash,
+        override=True,
+        dry_run=args.dry_run,
+        verbose=args.verbose,
+    )
 
-  # Upload benchmark results to the dashboard.
-  for series_id, benchmark_latency in aggregate_results.items():
-    series_name = benchmark_latency.name
-    benchmark_info = benchmark_latency.benchmark_info
-    description = get_model_description(benchmark_info.model_name,
-                                        benchmark_info.model_source)
-    if description is None:
-      description = ""
-    description += COMMON_DESCRIPTION
+    # Upload benchmark results to the dashboard.
+    for series_id, benchmark_latency in aggregate_results.items():
+        series_name = benchmark_latency.name
+        benchmark_info = benchmark_latency.benchmark_info
+        description = get_model_description(
+            benchmark_info.model_name, benchmark_info.model_source
+        )
+        if description is None:
+            description = ""
+        description += COMMON_DESCRIPTION
 
-    threshold = next(
-        (threshold for threshold in benchmark_thresholds.BENCHMARK_THRESHOLDS
-         if threshold.regex.match(series_name)), None)
-    average_range = (threshold.get_threshold_str()
-                     if threshold is not None else None)
+        threshold = next(
+            (
+                threshold
+                for threshold in benchmark_thresholds.BENCHMARK_THRESHOLDS
+                if threshold.regex.match(series_name)
+            ),
+            None,
+        )
+        average_range = threshold.get_threshold_str() if threshold is not None else None
 
-    # Override by default to allow updates to the series.
-    add_new_iree_series(series_id=series_id,
-                        series_unit="ns",
-                        series_name=benchmark_latency.name,
-                        series_description=description,
-                        average_range=average_range,
-                        override=True,
-                        dry_run=args.dry_run,
-                        verbose=args.verbose)
-    add_new_sample(series_id=series_id,
-                   build_id=commit_count,
-                   sample_unit="ns",
-                   sample_value=benchmark_latency.mean_time,
-                   dry_run=args.dry_run,
-                   verbose=args.verbose)
+        # Override by default to allow updates to the series.
+        add_new_iree_series(
+            series_id=series_id,
+            series_unit="ns",
+            series_name=benchmark_latency.name,
+            series_description=description,
+            average_range=average_range,
+            override=True,
+            dry_run=args.dry_run,
+            verbose=args.verbose,
+        )
+        add_new_sample(
+            series_id=series_id,
+            build_id=commit_count,
+            sample_unit="ns",
+            sample_value=benchmark_latency.mean_time,
+            dry_run=args.dry_run,
+            verbose=args.verbose,
+        )
 
-  for target_id, compile_metrics in all_compilation_metrics.items():
-    description = get_model_description(
-        compile_metrics.compilation_info.model_name,
-        compile_metrics.compilation_info.model_source)
-    if description is None:
-      description = ""
-    description += COMMON_DESCRIPTION
+    for target_id, compile_metrics in all_compilation_metrics.items():
+        description = get_model_description(
+            compile_metrics.compilation_info.model_name,
+            compile_metrics.compilation_info.model_source,
+        )
+        if description is None:
+            description = ""
+        description += COMMON_DESCRIPTION
 
-    for mapper in benchmark_presentation.COMPILATION_METRICS_TO_TABLE_MAPPERS:
-      sample_value, _ = mapper.get_current_and_base_value(compile_metrics)
-      series_unit = mapper.get_unit()
-      series_id = mapper.get_series_id(target_id)
-      series_name = mapper.get_series_name(compile_metrics.name)
+        for mapper in benchmark_presentation.COMPILATION_METRICS_TO_TABLE_MAPPERS:
+            sample_value, _ = mapper.get_current_and_base_value(compile_metrics)
+            series_unit = mapper.get_unit()
+            series_id = mapper.get_series_id(target_id)
+            series_name = mapper.get_series_name(compile_metrics.name)
 
-      threshold = next(
-          (threshold for threshold in mapper.get_metric_thresholds()
-           if threshold.regex.match(series_name)), None)
-      average_range = (threshold.get_threshold_str()
-                       if threshold is not None else None)
+            threshold = next(
+                (
+                    threshold
+                    for threshold in mapper.get_metric_thresholds()
+                    if threshold.regex.match(series_name)
+                ),
+                None,
+            )
+            average_range = (
+                threshold.get_threshold_str() if threshold is not None else None
+            )
 
-      # Override by default to allow updates to the series.
-      add_new_iree_series(series_id=series_id,
-                          series_unit=series_unit,
-                          series_name=series_name,
-                          series_description=description,
-                          average_range=average_range,
-                          override=True,
-                          dry_run=args.dry_run,
-                          verbose=args.verbose)
-      add_new_sample(series_id=series_id,
-                     build_id=commit_count,
-                     sample_unit=series_unit,
-                     sample_value=sample_value,
-                     dry_run=args.dry_run,
-                     verbose=args.verbose)
+            # Override by default to allow updates to the series.
+            add_new_iree_series(
+                series_id=series_id,
+                series_unit=series_unit,
+                series_name=series_name,
+                series_description=description,
+                average_range=average_range,
+                override=True,
+                dry_run=args.dry_run,
+                verbose=args.verbose,
+            )
+            add_new_sample(
+                series_id=series_id,
+                build_id=commit_count,
+                sample_unit=series_unit,
+                sample_value=sample_value,
+                dry_run=args.dry_run,
+                verbose=args.verbose,
+            )
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())
diff --git a/build_tools/docker/get_image_name.py b/build_tools/docker/get_image_name.py
index ae8e180..993e00c 100755
--- a/build_tools/docker/get_image_name.py
+++ b/build_tools/docker/get_image_name.py
@@ -22,22 +22,23 @@
 
 
 def find_image_by_name(img_name):
-  this_dir = Path(__file__).resolve().parent
+    this_dir = Path(__file__).resolve().parent
 
-  with open(this_dir / "prod_digests.txt", "rt") as f:
-    for line in f.readlines():
-      line = line.strip()
-      if line.startswith(f"gcr.io/iree-oss/{img_name}@"):
-        return line
-    else:
-      raise ValueError(
-          f"ERROR: Image name {img_name} not found in prod_digests.txt")
+    with open(this_dir / "prod_digests.txt", "rt") as f:
+        for line in f.readlines():
+            line = line.strip()
+            if line.startswith(f"gcr.io/iree-oss/{img_name}@"):
+                return line
+        else:
+            raise ValueError(
+                f"ERROR: Image name {img_name} not found in prod_digests.txt"
+            )
 
 
 if __name__ == "__main__":
-  if len(sys.argv) != 2:
-    print("ERROR: Expected image short name", file=sys.stderr)
-    sys.exit(1)
-  short_name = sys.argv[1]
-  image_name = find_image_by_name(short_name)
-  print(image_name)
+    if len(sys.argv) != 2:
+        print("ERROR: Expected image short name", file=sys.stderr)
+        sys.exit(1)
+    short_name = sys.argv[1]
+    image_name = find_image_by_name(short_name)
+    print(image_name)
diff --git a/build_tools/docker/manage_images.py b/build_tools/docker/manage_images.py
index f10b433..3bde67a 100755
--- a/build_tools/docker/manage_images.py
+++ b/build_tools/docker/manage_images.py
@@ -67,193 +67,212 @@
 
 IMAGES_TO_DEPENDENT_IMAGES = {k: [] for k in IMAGES_TO_DEPENDENCIES}
 for image, dependencies in IMAGES_TO_DEPENDENCIES.items():
-  for dependency in dependencies:
-    IMAGES_TO_DEPENDENT_IMAGES[dependency].append(image)
+    for dependency in dependencies:
+        IMAGES_TO_DEPENDENT_IMAGES[dependency].append(image)
 
 IMAGES_HELP = [f"`{name}`" for name in IMAGES_TO_DEPENDENCIES]
 IMAGES_HELP = f"{', '.join(IMAGES_HELP)} or `all`"
 
 
 def parse_arguments():
-  """Parses command-line options."""
-  parser = argparse.ArgumentParser(
-      description="Build IREE's Docker images and optionally push them to GCR.")
-  parser.add_argument("--images",
-                      "--image",
-                      type=str,
-                      required=True,
-                      action="append",
-                      help=f"Name of the image to build: {IMAGES_HELP}.")
-  parser.add_argument(
-      "--dry_run",
-      "--dry-run",
-      "-n",
-      action="store_true",
-      help="Print output without building or pushing any images.")
-  parser.add_argument(
-      "--only_references",
-      "--only-references",
-      action="store_true",
-      help=
-      "Just update references to images using the digests in prod_digests.txt")
+    """Parses command-line options."""
+    parser = argparse.ArgumentParser(
+        description="Build IREE's Docker images and optionally push them to GCR."
+    )
+    parser.add_argument(
+        "--images",
+        "--image",
+        type=str,
+        required=True,
+        action="append",
+        help=f"Name of the image to build: {IMAGES_HELP}.",
+    )
+    parser.add_argument(
+        "--dry_run",
+        "--dry-run",
+        "-n",
+        action="store_true",
+        help="Print output without building or pushing any images.",
+    )
+    parser.add_argument(
+        "--only_references",
+        "--only-references",
+        action="store_true",
+        help="Just update references to images using the digests in prod_digests.txt",
+    )
 
-  args = parser.parse_args()
-  for image in args.images:
-    if image == "all":
-      # Sort for a determinstic order
-      args.images = sorted(IMAGES_TO_DEPENDENCIES.keys())
-    elif image not in IMAGES_TO_DEPENDENCIES:
-      raise parser.error("Expected --image to be one of:\n"
-                         f"  {IMAGES_HELP}\n"
-                         f"but got `{image}`.")
-  return args
+    args = parser.parse_args()
+    for image in args.images:
+        if image == "all":
+            # Sort for a determinstic order
+            args.images = sorted(IMAGES_TO_DEPENDENCIES.keys())
+        elif image not in IMAGES_TO_DEPENDENCIES:
+            raise parser.error(
+                "Expected --image to be one of:\n"
+                f"  {IMAGES_HELP}\n"
+                f"but got `{image}`."
+            )
+    return args
 
 
-def _dag_dfs(input_nodes: Sequence[str],
-             node_to_child_nodes: Dict[str, Sequence[str]]) -> List[str]:
-  # Python doesn't have a builtin OrderedSet, but we don't have many images, so
-  # we just use a list.
-  ordered_nodes = []
+def _dag_dfs(
+    input_nodes: Sequence[str], node_to_child_nodes: Dict[str, Sequence[str]]
+) -> List[str]:
+    # Python doesn't have a builtin OrderedSet, but we don't have many images, so
+    # we just use a list.
+    ordered_nodes = []
 
-  def add_children(parent_node: str):
-    if parent_node not in ordered_nodes:
-      for child_node in node_to_child_nodes[parent_node]:
-        add_children(child_node)
-      ordered_nodes.append(parent_node)
+    def add_children(parent_node: str):
+        if parent_node not in ordered_nodes:
+            for child_node in node_to_child_nodes[parent_node]:
+                add_children(child_node)
+            ordered_nodes.append(parent_node)
 
-  for node in input_nodes:
-    add_children(node)
-  return ordered_nodes
+    for node in input_nodes:
+        add_children(node)
+    return ordered_nodes
 
 
 def get_ordered_images_to_process(images: Sequence[str]) -> List[str]:
-  dependents = _dag_dfs(images, IMAGES_TO_DEPENDENT_IMAGES)
-  dependents.reverse()
-  return dependents
+    dependents = _dag_dfs(images, IMAGES_TO_DEPENDENT_IMAGES)
+    dependents.reverse()
+    return dependents
 
 
 def get_dependencies(images: Sequence[str]) -> List[str]:
-  return _dag_dfs(images, IMAGES_TO_DEPENDENCIES)
+    return _dag_dfs(images, IMAGES_TO_DEPENDENCIES)
 
 
 def get_repo_digest(tagged_image_url: str, dry_run: bool = False) -> str:
-  inspect_command = [
-      "docker",
-      "image",
-      "inspect",
-      tagged_image_url,
-      "-f",
-      "{{index .RepoDigests 0}}",
-  ]
-  try:
-    completed_process = utils.run_command(
-        inspect_command,
-        dry_run=False,  # Run even if --dry_run is True.
-        capture_output=True,
-        timeout=10)
-  except subprocess.CalledProcessError as error:
-    if dry_run:
-      return ""
-    else:
-      raise RuntimeError(
-          f"Computing the repository digest for {tagged_image_url} failed. Has "
-          "it been pushed to GCR?") from error
-  _, repo_digest = completed_process.stdout.strip().split("@")
-  return repo_digest
+    inspect_command = [
+        "docker",
+        "image",
+        "inspect",
+        tagged_image_url,
+        "-f",
+        "{{index .RepoDigests 0}}",
+    ]
+    try:
+        completed_process = utils.run_command(
+            inspect_command,
+            dry_run=False,  # Run even if --dry_run is True.
+            capture_output=True,
+            timeout=10,
+        )
+    except subprocess.CalledProcessError as error:
+        if dry_run:
+            return ""
+        else:
+            raise RuntimeError(
+                f"Computing the repository digest for {tagged_image_url} failed. Has "
+                "it been pushed to GCR?"
+            ) from error
+    _, repo_digest = completed_process.stdout.strip().split("@")
+    return repo_digest
 
 
 def update_references(image_url: str, digest: str, dry_run: bool = False):
-  """Updates all references to "image_url" with a sha256 digest."""
-  print(f"Updating references to {image_url}")
+    """Updates all references to "image_url" with a sha256 digest."""
+    print(f"Updating references to {image_url}")
 
-  grep_command = ["git", "grep", "-l", f"{image_url}@sha256"]
-  try:
-    completed_process = utils.run_command(grep_command,
-                                          capture_output=True,
-                                          timeout=5)
-  except subprocess.CalledProcessError as error:
-    if error.returncode == 1:
-      print(f"Found no references to {image_url}")
-      return
-    raise error
+    grep_command = ["git", "grep", "-l", f"{image_url}@sha256"]
+    try:
+        completed_process = utils.run_command(
+            grep_command, capture_output=True, timeout=5
+        )
+    except subprocess.CalledProcessError as error:
+        if error.returncode == 1:
+            print(f"Found no references to {image_url}")
+            return
+        raise error
 
-  # Update references in all grepped files.
-  files = completed_process.stdout.split()
-  print(f"Updating references in {len(files)} files: {files}")
-  if not dry_run:
-    for line in fileinput.input(files=files, inplace=True):
-      print(re.sub(f"{image_url}@{DIGEST_REGEX}", f"{image_url}@{digest}",
-                   line),
-            end="")
+    # Update references in all grepped files.
+    files = completed_process.stdout.split()
+    print(f"Updating references in {len(files)} files: {files}")
+    if not dry_run:
+        for line in fileinput.input(files=files, inplace=True):
+            print(
+                re.sub(f"{image_url}@{DIGEST_REGEX}", f"{image_url}@{digest}", line),
+                end="",
+            )
 
 
 def parse_prod_digests() -> Dict[str, str]:
-  image_urls_to_prod_digests = {}
-  with open(utils.PROD_DIGESTS_PATH, "r") as f:
-    for line in f:
-      image_url, digest = line.strip().split("@")
-      image_urls_to_prod_digests[image_url] = digest
-  return image_urls_to_prod_digests
+    image_urls_to_prod_digests = {}
+    with open(utils.PROD_DIGESTS_PATH, "r") as f:
+        for line in f:
+            image_url, digest = line.strip().split("@")
+            image_urls_to_prod_digests[image_url] = digest
+    return image_urls_to_prod_digests
 
 
 if __name__ == "__main__":
-  args = parse_arguments()
-  image_urls_to_prod_digests = parse_prod_digests()
-  images_to_process = get_ordered_images_to_process(args.images)
-  print(f"Also processing dependent images. Will process: {images_to_process}")
+    args = parse_arguments()
+    image_urls_to_prod_digests = parse_prod_digests()
+    images_to_process = get_ordered_images_to_process(args.images)
+    print(f"Also processing dependent images. Will process: {images_to_process}")
 
-  if not args.only_references:
-    # Ensure the user has the correct authorization to push to GCR.
-    utils.check_gcloud_auth(dry_run=args.dry_run)
+    if not args.only_references:
+        # Ensure the user has the correct authorization to push to GCR.
+        utils.check_gcloud_auth(dry_run=args.dry_run)
 
-    dependencies = get_dependencies(images_to_process)
-    print(f"Pulling image dependencies: {dependencies}")
-    for dependency in dependencies:
-      dependency_url = posixpath.join(IREE_GCR_URL, dependency)
-      # If `dependency` is a new image then it may not have a prod digest yet.
-      if dependency_url in image_urls_to_prod_digests:
-        digest = image_urls_to_prod_digests[dependency_url]
-        dependency_with_digest = f"{dependency_url}@{digest}"
-        utils.run_command(["docker", "pull", dependency_with_digest],
-                          dry_run=args.dry_run)
+        dependencies = get_dependencies(images_to_process)
+        print(f"Pulling image dependencies: {dependencies}")
+        for dependency in dependencies:
+            dependency_url = posixpath.join(IREE_GCR_URL, dependency)
+            # If `dependency` is a new image then it may not have a prod digest yet.
+            if dependency_url in image_urls_to_prod_digests:
+                digest = image_urls_to_prod_digests[dependency_url]
+                dependency_with_digest = f"{dependency_url}@{digest}"
+                utils.run_command(
+                    ["docker", "pull", dependency_with_digest], dry_run=args.dry_run
+                )
 
-  for image in images_to_process:
-    print("\n" * 5 + f"Processing image {image}")
-    image_url = posixpath.join(IREE_GCR_URL, image)
-    tagged_image_url = f"{image_url}"
-    image_path = os.path.join(DOCKER_DIR, "dockerfiles", f"{image}.Dockerfile")
+    for image in images_to_process:
+        print("\n" * 5 + f"Processing image {image}")
+        image_url = posixpath.join(IREE_GCR_URL, image)
+        tagged_image_url = f"{image_url}"
+        image_path = os.path.join(DOCKER_DIR, "dockerfiles", f"{image}.Dockerfile")
 
-    if args.only_references:
-      digest = image_urls_to_prod_digests[image_url]
-    else:
-      # We deliberately give the whole repository as context so we can reuse
-      # scripts and such. It would be nice if Docker gave us a way to make this
-      # more explicit, like symlinking files in the context, but they refuse to
-      # with the justification that it makes builds non-hermetic, a hilarious
-      # concern for something that allows and encourages arbitrary network
-      # access in builds.
-      # We're assuming this is being run from the root of the repository.
-      # FIXME: make this more robust to where it is run from.
-      utils.run_command([
-          "docker", "build", "--file", image_path, "--tag", tagged_image_url,
-          "."
-      ],
-                        dry_run=args.dry_run)
+        if args.only_references:
+            digest = image_urls_to_prod_digests[image_url]
+        else:
+            # We deliberately give the whole repository as context so we can reuse
+            # scripts and such. It would be nice if Docker gave us a way to make this
+            # more explicit, like symlinking files in the context, but they refuse to
+            # with the justification that it makes builds non-hermetic, a hilarious
+            # concern for something that allows and encourages arbitrary network
+            # access in builds.
+            # We're assuming this is being run from the root of the repository.
+            # FIXME: make this more robust to where it is run from.
+            utils.run_command(
+                [
+                    "docker",
+                    "build",
+                    "--file",
+                    image_path,
+                    "--tag",
+                    tagged_image_url,
+                    ".",
+                ],
+                dry_run=args.dry_run,
+            )
 
-      utils.run_command(["docker", "push", tagged_image_url],
-                        dry_run=args.dry_run)
+            utils.run_command(
+                ["docker", "push", tagged_image_url], dry_run=args.dry_run
+            )
 
-      digest = get_repo_digest(tagged_image_url, args.dry_run)
+            digest = get_repo_digest(tagged_image_url, args.dry_run)
 
-      # Check that the image is in "prod_digests.txt" and append it to the list
-      # in the file if it isn't.
-      if image_url not in image_urls_to_prod_digests:
-        image_with_digest = f"{image_url}@{digest}"
-        print(
-            f"Adding new image {image_with_digest} to {utils.PROD_DIGESTS_PATH}"
-        )
-        if not args.dry_run:
-          with open(utils.PROD_DIGESTS_PATH, "a") as f:
-            f.write(f"{image_with_digest}\n")
+            # Check that the image is in "prod_digests.txt" and append it to the list
+            # in the file if it isn't.
+            if image_url not in image_urls_to_prod_digests:
+                image_with_digest = f"{image_url}@{digest}"
+                print(
+                    f"Adding new image {image_with_digest} to {utils.PROD_DIGESTS_PATH}"
+                )
+                if not args.dry_run:
+                    with open(utils.PROD_DIGESTS_PATH, "a") as f:
+                        f.write(f"{image_with_digest}\n")
 
-    update_references(image_url, digest, dry_run=args.dry_run)
+        update_references(image_url, digest, dry_run=args.dry_run)
diff --git a/build_tools/docker/utils.py b/build_tools/docker/utils.py
index cec694f..cde92bc 100644
--- a/build_tools/docker/utils.py
+++ b/build_tools/docker/utils.py
@@ -13,32 +13,33 @@
 PROD_DIGESTS_PATH = "build_tools/docker/prod_digests.txt".replace("/", os.sep)
 
 
-def run_command(command: Sequence[str],
-                dry_run: bool = False,
-                check: bool = True,
-                capture_output: bool = False,
-                text: bool = True,
-                **run_kwargs) -> subprocess.CompletedProcess:
-  """Thin wrapper around subprocess.run"""
-  print(f"Running: `{' '.join(command)}`")
-  if dry_run:
-    # Dummy CompletedProess with successful returncode.
-    return subprocess.CompletedProcess(command, returncode=0)
+def run_command(
+    command: Sequence[str],
+    dry_run: bool = False,
+    check: bool = True,
+    capture_output: bool = False,
+    text: bool = True,
+    **run_kwargs,
+) -> subprocess.CompletedProcess:
+    """Thin wrapper around subprocess.run"""
+    print(f"Running: `{' '.join(command)}`")
+    if dry_run:
+        # Dummy CompletedProess with successful returncode.
+        return subprocess.CompletedProcess(command, returncode=0)
 
-  completed_process = subprocess.run(command,
-                                     text=text,
-                                     check=check,
-                                     capture_output=capture_output,
-                                     **run_kwargs)
-  return completed_process
+    completed_process = subprocess.run(
+        command, text=text, check=check, capture_output=capture_output, **run_kwargs
+    )
+    return completed_process
 
 
 def check_gcloud_auth(dry_run: bool = False):
-  # Ensure the user has the correct authorization if they try to push to GCR.
-  try:
-    run_command(['which', 'gcloud'])
-  except subprocess.CalledProcessError as error:
-    raise RuntimeError(
-        'gcloud not found. See https://cloud.google.com/sdk/install for '
-        'installation.') from error
-  run_command(["gcloud", "auth", "configure-docker"], dry_run)
+    # Ensure the user has the correct authorization if they try to push to GCR.
+    try:
+        run_command(["which", "gcloud"])
+    except subprocess.CalledProcessError as error:
+        raise RuntimeError(
+            "gcloud not found. See https://cloud.google.com/sdk/install for "
+            "installation."
+        ) from error
+    run_command(["gcloud", "auth", "configure-docker"], dry_run)
diff --git a/build_tools/github_actions/build_dist.py b/build_tools/github_actions/build_dist.py
index b067eba..19dbcc6 100644
--- a/build_tools/github_actions/build_dist.py
+++ b/build_tools/github_actions/build_dist.py
@@ -62,142 +62,157 @@
 TF_INTEGRATIONS_DIR = os.path.join(IREESRC_DIR, "integrations/tensorflow")
 BINDIST_DIR = os.environ.get("BINDIST_DIR")
 if BINDIST_DIR is None:
-  BINDIST_DIR = os.path.join(WORK_DIR, "bindist")
+    BINDIST_DIR = os.path.join(WORK_DIR, "bindist")
 THIS_DIR = os.path.realpath(os.path.dirname(__file__))
 CMAKE_CI_SCRIPT = os.path.join(THIS_DIR, "cmake_ci.py")
-BUILD_REQUIREMENTS_TXT = os.path.join(IREESRC_DIR, "runtime", "bindings",
-                                      "python", "iree", "runtime",
-                                      "build_requirements.txt")
+BUILD_REQUIREMENTS_TXT = os.path.join(
+    IREESRC_DIR,
+    "runtime",
+    "bindings",
+    "python",
+    "iree",
+    "runtime",
+    "build_requirements.txt",
+)
 CI_REQUIREMENTS_TXT = os.path.join(THIS_DIR, "ci_requirements.txt")
 CONFIGURE_BAZEL_PY = os.path.join(IREESRC_DIR, "configure_bazel.py")
-INSTALL_TARGET = ("install"
-                  if platform.system() == "Windows" else "install/strip")
+INSTALL_TARGET = "install" if platform.system() == "Windows" else "install/strip"
 
 
 # Load version info.
 def load_version_info():
-  with open(os.path.join(IREESRC_DIR, "version_info.json"), "rt") as f:
-    return json.load(f)
+    with open(os.path.join(IREESRC_DIR, "version_info.json"), "rt") as f:
+        return json.load(f)
 
 
 try:
-  version_info = load_version_info()
+    version_info = load_version_info()
 except FileNotFoundError:
-  print("version_info.json not found. Using defaults")
-  version_info = {
-      "package-version": "0.1dev1",
-      "package-suffix": "-dev",
-  }
+    print("version_info.json not found. Using defaults")
+    version_info = {
+        "package-version": "0.1dev1",
+        "package-suffix": "-dev",
+    }
 
 
 def remove_cmake_cache():
-  cache_file = os.path.join(BUILD_DIR, "CMakeCache.txt")
-  if os.path.exists(cache_file):
-    print(f"Removing {cache_file}")
-    os.remove(cache_file)
-  else:
-    print(f"Not removing cache file (does not exist): {cache_file}")
+    cache_file = os.path.join(BUILD_DIR, "CMakeCache.txt")
+    if os.path.exists(cache_file):
+        print(f"Removing {cache_file}")
+        os.remove(cache_file)
+    else:
+        print(f"Not removing cache file (does not exist): {cache_file}")
 
 
 def install_python_requirements():
-  print("Installing python requirements...")
-  subprocess.check_call(
-      [sys.executable, "-m", "pip", "install", "-r", BUILD_REQUIREMENTS_TXT])
-  subprocess.check_call(
-      [sys.executable, "-m", "pip", "install", "-r", CI_REQUIREMENTS_TXT])
+    print("Installing python requirements...")
+    subprocess.check_call(
+        [sys.executable, "-m", "pip", "install", "-r", BUILD_REQUIREMENTS_TXT]
+    )
+    subprocess.check_call(
+        [sys.executable, "-m", "pip", "install", "-r", CI_REQUIREMENTS_TXT]
+    )
 
 
 def configure_bazel():
-  print("Generating configured.bazelrc...")
-  subprocess.check_call([sys.executable, CONFIGURE_BAZEL_PY])
+    print("Generating configured.bazelrc...")
+    subprocess.check_call([sys.executable, CONFIGURE_BAZEL_PY])
 
 
 def build_main_dist():
-  """Builds the main distribution binaries.
+    """Builds the main distribution binaries.
 
-  Additional packages that are installable as part of a full build and do not
-  benefit from a more restricted build can be added here.
-  """
-  install_python_requirements()
+    Additional packages that are installable as part of a full build and do not
+    benefit from a more restricted build can be added here.
+    """
+    install_python_requirements()
 
-  # Clean up install and build trees.
-  shutil.rmtree(INSTALL_DIR, ignore_errors=True)
-  remove_cmake_cache()
+    # Clean up install and build trees.
+    shutil.rmtree(INSTALL_DIR, ignore_errors=True)
+    remove_cmake_cache()
 
-  # CMake configure.
-  print("*** Configuring ***")
-  subprocess.run(
-      [
-          sys.executable,
-          CMAKE_CI_SCRIPT,
-          f"-B{BUILD_DIR}",
-          "--log-level=VERBOSE",
-          f"-DCMAKE_INSTALL_PREFIX={INSTALL_DIR}",
-          # On some distributions, this will install to lib64. We would like
-          # consistency in built packages, so hard-code it.
-          "-DCMAKE_INSTALL_LIBDIR=lib",
-          f"-DCMAKE_BUILD_TYPE=Release",
-          f"-DIREE_BUILD_COMPILER=ON",
-          f"-DIREE_BUILD_PYTHON_BINDINGS=OFF",
-          f"-DIREE_BUILD_SAMPLES=OFF",
-      ],
-      check=True)
+    # CMake configure.
+    print("*** Configuring ***")
+    subprocess.run(
+        [
+            sys.executable,
+            CMAKE_CI_SCRIPT,
+            f"-B{BUILD_DIR}",
+            "--log-level=VERBOSE",
+            f"-DCMAKE_INSTALL_PREFIX={INSTALL_DIR}",
+            # On some distributions, this will install to lib64. We would like
+            # consistency in built packages, so hard-code it.
+            "-DCMAKE_INSTALL_LIBDIR=lib",
+            f"-DCMAKE_BUILD_TYPE=Release",
+            f"-DIREE_BUILD_COMPILER=ON",
+            f"-DIREE_BUILD_PYTHON_BINDINGS=OFF",
+            f"-DIREE_BUILD_SAMPLES=OFF",
+        ],
+        check=True,
+    )
 
-  print("*** Building ***")
-  subprocess.run([
-      sys.executable,
-      CMAKE_CI_SCRIPT,
-      "--build",
-      BUILD_DIR,
-      "--target",
-      INSTALL_TARGET,
-  ],
-                 check=True)
+    print("*** Building ***")
+    subprocess.run(
+        [
+            sys.executable,
+            CMAKE_CI_SCRIPT,
+            "--build",
+            BUILD_DIR,
+            "--target",
+            INSTALL_TARGET,
+        ],
+        check=True,
+    )
 
-  print("*** Packaging ***")
-  dist_entries = [
-      "bin",
-      "lib",
-  ]
-  dist_archive = os.path.join(
-      BINDIST_DIR, f"iree-dist{version_info['package-suffix']}"
-      f"-{version_info['package-version']}"
-      f"-{sysconfig.get_platform()}.tar.xz")
-  print(f"Creating archive {dist_archive}")
-  os.makedirs(os.path.dirname(dist_archive), exist_ok=True)
-  with tarfile.open(dist_archive, mode="w:xz") as tf:
-    for entry in dist_entries:
-      print(f"Adding entry: {entry}")
-      tf.add(os.path.join(INSTALL_DIR, entry), arcname=entry, recursive=True)
+    print("*** Packaging ***")
+    dist_entries = [
+        "bin",
+        "lib",
+    ]
+    dist_archive = os.path.join(
+        BINDIST_DIR,
+        f"iree-dist{version_info['package-suffix']}"
+        f"-{version_info['package-version']}"
+        f"-{sysconfig.get_platform()}.tar.xz",
+    )
+    print(f"Creating archive {dist_archive}")
+    os.makedirs(os.path.dirname(dist_archive), exist_ok=True)
+    with tarfile.open(dist_archive, mode="w:xz") as tf:
+        for entry in dist_entries:
+            print(f"Adding entry: {entry}")
+            tf.add(os.path.join(INSTALL_DIR, entry), arcname=entry, recursive=True)
 
 
 def build_py_tf_compiler_tools_pkg():
-  """Builds the iree-install/python_packages/iree_tools_tf package."""
-  install_python_requirements()
-  configure_bazel()
+    """Builds the iree-install/python_packages/iree_tools_tf package."""
+    install_python_requirements()
+    configure_bazel()
 
-  # Clean up install and build trees.
-  shutil.rmtree(INSTALL_DIR, ignore_errors=True)
-  remove_cmake_cache()
+    # Clean up install and build trees.
+    shutil.rmtree(INSTALL_DIR, ignore_errors=True)
+    remove_cmake_cache()
 
-  os.makedirs(BINDIST_DIR, exist_ok=True)
+    os.makedirs(BINDIST_DIR, exist_ok=True)
 
-  for project in ["iree_tflite", "iree_tf"]:
-    print(f"*** Building wheel for {project} ***")
-    subprocess.run(
-        [
-            sys.executable, "-m", "pip", "wheel",
-            os.path.join(TF_INTEGRATIONS_DIR, "python_projects", project)
-        ],
-        cwd=BINDIST_DIR,
-        check=True,
-    )
+    for project in ["iree_tflite", "iree_tf"]:
+        print(f"*** Building wheel for {project} ***")
+        subprocess.run(
+            [
+                sys.executable,
+                "-m",
+                "pip",
+                "wheel",
+                os.path.join(TF_INTEGRATIONS_DIR, "python_projects", project),
+            ],
+            cwd=BINDIST_DIR,
+            check=True,
+        )
 
 
 command = sys.argv[1]
 if command == "main-dist":
-  build_main_dist()
+    build_main_dist()
 elif command == "py-tf-compiler-tools-pkg":
-  build_py_tf_compiler_tools_pkg()
+    build_py_tf_compiler_tools_pkg()
 else:
-  print(f"Unrecognized command: {command}")
+    print(f"Unrecognized command: {command}")
diff --git a/build_tools/github_actions/cmake_ci.py b/build_tools/github_actions/cmake_ci.py
index 0e328bd..b01a7ef 100644
--- a/build_tools/github_actions/cmake_ci.py
+++ b/build_tools/github_actions/cmake_ci.py
@@ -10,14 +10,15 @@
 # This future is needed to print Python2 EOL message
 from __future__ import print_function
 import sys
+
 if sys.version_info < (3,):
-  print("Python 2 has reached end-of-life and is no longer supported.")
-  sys.exit(-1)
-if sys.platform == 'win32' and sys.maxsize.bit_length() == 31:
-  print(
-      "32-bit Windows Python runtime is not supported. Please switch to 64-bit Python."
-  )
-  sys.exit(-1)
+    print("Python 2 has reached end-of-life and is no longer supported.")
+    sys.exit(-1)
+if sys.platform == "win32" and sys.maxsize.bit_length() == 31:
+    print(
+        "32-bit Windows Python runtime is not supported. Please switch to 64-bit Python."
+    )
+    sys.exit(-1)
 
 import importlib
 import json
@@ -27,175 +28,177 @@
 import sysconfig
 import tempfile
 
-is_windows = platform.system() == 'Windows'
+is_windows = platform.system() == "Windows"
 
 
 def display_help():
-  print('Syntax: python build_tools/cmake/cmake_ci.py [--install|--build] ...')
-  print('If neither --install or --build are the first argument, then it is ')
-  print('assumed to be a generate invocation')
+    print("Syntax: python build_tools/cmake/cmake_ci.py [--install|--build] ...")
+    print("If neither --install or --build are the first argument, then it is ")
+    print("assumed to be a generate invocation")
 
 
-mode = 'generate'
+mode = "generate"
 if len(sys.argv) < 2:
-  display_help()
-  sys.exit(1)
-if sys.argv[1] == '--install':
-  mode = 'install'
-elif sys.argv[1] == '--build':
-  mode = 'build'
+    display_help()
+    sys.exit(1)
+if sys.argv[1] == "--install":
+    mode = "install"
+elif sys.argv[1] == "--build":
+    mode = "build"
 
 
 def report(*args):
-  print('--', *args)
+    print("--", *args)
 
 
 def get_setting(varname, default_value):
-  value = os.environ.get(varname)
-  if value is None:
-    return default_value
-  return value
+    value = os.environ.get(varname)
+    if value is None:
+        return default_value
+    return value
 
 
 def get_bool_setting(varname, default_value):
-  value = get_setting(varname, default_value)
-  if value is True or value is False:
-    return value
-  return value == '' or value == 'ON' or value == '1'
+    value = get_setting(varname, default_value)
+    if value is True or value is False:
+        return value
+    return value == "" or value == "ON" or value == "1"
 
 
 def which(thefile):
-  path = os.environ.get("PATH", os.defpath).split(os.pathsep)
-  for d in path:
-    fname = os.path.join(d, thefile)
-    fnames = [fname]
-    if sys.platform == 'win32':
-      exts = os.environ.get('PATHEXT', '').split(os.pathsep)
-      fnames += [fname + ext for ext in exts]
-    for name in fnames:
-      if os.access(name, os.F_OK | os.X_OK) and not os.path.isdir(name):
-        return name
-  return None
+    path = os.environ.get("PATH", os.defpath).split(os.pathsep)
+    for d in path:
+        fname = os.path.join(d, thefile)
+        fnames = [fname]
+        if sys.platform == "win32":
+            exts = os.environ.get("PATHEXT", "").split(os.pathsep)
+            fnames += [fname + ext for ext in exts]
+        for name in fnames:
+            if os.access(name, os.F_OK | os.X_OK) and not os.path.isdir(name):
+                return name
+    return None
 
 
 def use_tool_path(toolname, varname=None):
-  if not varname:
-    varname = toolname.upper()
-  value = get_setting(f'USE_{varname}', 'ON')
-  if value.upper() == 'OFF':
-    return None
-  if value.upper() == 'ON' or value == '':
-    return which(toolname)
-  if os.access(value, os.F_OK | os.X_OK) and not os.path.isdir(value):
-    return value
+    if not varname:
+        varname = toolname.upper()
+    value = get_setting(f"USE_{varname}", "ON")
+    if value.upper() == "OFF":
+        return None
+    if value.upper() == "ON" or value == "":
+        return which(toolname)
+    if os.access(value, os.F_OK | os.X_OK) and not os.path.isdir(value):
+        return value
 
 
 ### Detect cmake.
-use_cmake = use_tool_path('cmake') or 'cmake'
+use_cmake = use_tool_path("cmake") or "cmake"
 cmake_command_prefix = [use_cmake]
 cmake_environ = os.environ
 
 
 def cmake_commandline(args):
-  return cmake_command_prefix + args
+    return cmake_command_prefix + args
 
 
 if is_windows:
-  # Bazel needs msys bash and TensorFlow will melt down and cry if it finds
-  # system bash. Because, of course it will.
-  # Note that we don't set this as a CMake option because it may have spaces
-  # in the path, use backslashes or various other things that get corrupted
-  # in the five or six layers of shoddy string transformations between here
-  # and where it gets used.
-  bash_exe = which('bash')
-  report('Found Windows bash:', bash_exe)
-  report('NOTE: If the above is system32 bash and you are using bazel to build '
-         'TensorFlow, you are going to have a bad time. Suggest being explicit '
-         'adding the correct directory to your path. I\'m really sorry. '
-         'I didn\'t make this mess... just the messenger')
-  report(f'Full path = {os.environ.get("PATH")}')
+    # Bazel needs msys bash and TensorFlow will melt down and cry if it finds
+    # system bash. Because, of course it will.
+    # Note that we don't set this as a CMake option because it may have spaces
+    # in the path, use backslashes or various other things that get corrupted
+    # in the five or six layers of shoddy string transformations between here
+    # and where it gets used.
+    bash_exe = which("bash")
+    report("Found Windows bash:", bash_exe)
+    report(
+        "NOTE: If the above is system32 bash and you are using bazel to build "
+        "TensorFlow, you are going to have a bad time. Suggest being explicit "
+        "adding the correct directory to your path. I'm really sorry. "
+        "I didn't make this mess... just the messenger"
+    )
+    report(f'Full path = {os.environ.get("PATH")}')
 
 
 def invoke_generate():
-  ##############################################################################
-  # Figure out where we are and where we are going.
-  ##############################################################################
-  repo_root = os.path.abspath(
-      get_setting('REPO_DIR', os.path.join(os.path.dirname(__file__), '..',
-                                           '..')))
-  report(f'Using REPO_DIR = {repo_root}')
+    ##############################################################################
+    # Figure out where we are and where we are going.
+    ##############################################################################
+    repo_root = os.path.abspath(
+        get_setting("REPO_DIR", os.path.join(os.path.dirname(__file__), "..", ".."))
+    )
+    report(f"Using REPO_DIR = {repo_root}")
 
-  ##############################################################################
-  # Load version_info.json
-  ##############################################################################
+    ##############################################################################
+    # Load version_info.json
+    ##############################################################################
 
-  def load_version_info():
-    with open(os.path.join(repo_root, 'version_info.json'), 'rt') as f:
-      return json.load(f)
+    def load_version_info():
+        with open(os.path.join(repo_root, "version_info.json"), "rt") as f:
+            return json.load(f)
 
-  try:
-    version_info = load_version_info()
-  except FileNotFoundError:
-    report('version_info.json found')
-    version_info = {}
+    try:
+        version_info = load_version_info()
+    except FileNotFoundError:
+        report("version_info.json found")
+        version_info = {}
 
-  ##############################################################################
-  # CMake configure.
-  ##############################################################################
+    ##############################################################################
+    # CMake configure.
+    ##############################################################################
 
-  cmake_args = [
-      f'-S{repo_root}',
-      f'-DPython3_EXECUTABLE:FILEPATH={sys.executable}',
-      # The old python package settings should not be needed, but since there
-      # can be configuration races between packages that use both mechanisms,
-      # be explicit.
-      f'-DPYTHON_EXECUTABLE:FILEPATH={sys.executable}',
-      f'-DPython3_INCLUDE_DIR:PATH={sysconfig.get_path("include")}',
-      f'-DPYTHON_INCLUDE_DIR:PATH={sysconfig.get_path("include")}',
-      f'-DIREE_RELEASE_PACKAGE_SUFFIX:STRING={version_info.get("package-suffix") or ""}',
-      f'-DIREE_RELEASE_VERSION:STRING={version_info.get("package-version") or "0.0.1a1"}',
-      f'-DIREE_RELEASE_REVISION:STRING={version_info.get("iree-revision") or "HEAD"}',
-  ]
+    cmake_args = [
+        f"-S{repo_root}",
+        f"-DPython3_EXECUTABLE:FILEPATH={sys.executable}",
+        # The old python package settings should not be needed, but since there
+        # can be configuration races between packages that use both mechanisms,
+        # be explicit.
+        f"-DPYTHON_EXECUTABLE:FILEPATH={sys.executable}",
+        f'-DPython3_INCLUDE_DIR:PATH={sysconfig.get_path("include")}',
+        f'-DPYTHON_INCLUDE_DIR:PATH={sysconfig.get_path("include")}',
+        f'-DIREE_RELEASE_PACKAGE_SUFFIX:STRING={version_info.get("package-suffix") or ""}',
+        f'-DIREE_RELEASE_VERSION:STRING={version_info.get("package-version") or "0.0.1a1"}',
+        f'-DIREE_RELEASE_REVISION:STRING={version_info.get("iree-revision") or "HEAD"}',
+    ]
 
-  ### Detect generator.
-  if use_tool_path('ninja'):
-    report('Using ninja')
-    cmake_args.append('-GNinja')
-  elif is_windows:
-    cmake_args.extend(['-G', 'NMake Makefiles'])
+    ### Detect generator.
+    if use_tool_path("ninja"):
+        report("Using ninja")
+        cmake_args.append("-GNinja")
+    elif is_windows:
+        cmake_args.extend(["-G", "NMake Makefiles"])
 
-  # Detect other build tools.
-  use_ccache = use_tool_path('ccache')
-  if not is_windows and use_ccache:
-    report(f'Using ccache {use_ccache}')
-    cmake_args.append(f'-DCMAKE_CXX_COMPILER_LAUNCHER={use_ccache}')
+    # Detect other build tools.
+    use_ccache = use_tool_path("ccache")
+    if not is_windows and use_ccache:
+        report(f"Using ccache {use_ccache}")
+        cmake_args.append(f"-DCMAKE_CXX_COMPILER_LAUNCHER={use_ccache}")
 
-  # Clang
-  use_clang = use_tool_path('clang')
-  if not is_windows and use_clang:
-    report(f'Using clang {use_clang}')
-    cmake_args.append(f'-DCMAKE_C_COMPILER={use_clang}')
-  use_clangcpp = use_tool_path('clang++', 'CLANGCPP')
-  if not is_windows and use_clangcpp:
-    report(f'Using clang++ {use_clangcpp}')
-    cmake_args.append(f'-DCMAKE_CXX_COMPILER={use_clangcpp}')
+    # Clang
+    use_clang = use_tool_path("clang")
+    if not is_windows and use_clang:
+        report(f"Using clang {use_clang}")
+        cmake_args.append(f"-DCMAKE_C_COMPILER={use_clang}")
+    use_clangcpp = use_tool_path("clang++", "CLANGCPP")
+    if not is_windows and use_clangcpp:
+        report(f"Using clang++ {use_clangcpp}")
+        cmake_args.append(f"-DCMAKE_CXX_COMPILER={use_clangcpp}")
 
-  # LLD
-  use_lld = use_tool_path('lld')
-  if not is_windows and use_lld:
-    report(f'Using linker {use_lld}')
-    cmake_args.append('-DIREE_ENABLE_LLD=ON')
+    # LLD
+    use_lld = use_tool_path("lld")
+    if not is_windows and use_lld:
+        report(f"Using linker {use_lld}")
+        cmake_args.append("-DIREE_ENABLE_LLD=ON")
 
-  cmake_args.extend(sys.argv[1:])
-  report(f'Running cmake (generate): {" ".join(cmake_args)}')
-  subprocess.check_call(cmake_commandline(cmake_args), env=cmake_environ)
+    cmake_args.extend(sys.argv[1:])
+    report(f'Running cmake (generate): {" ".join(cmake_args)}')
+    subprocess.check_call(cmake_commandline(cmake_args), env=cmake_environ)
 
 
 # Select which mode.
-if mode == 'generate':
-  invoke_generate()
+if mode == "generate":
+    invoke_generate()
 else:
-  # Just pass-through.
-  cmake_args = cmake_commandline(sys.argv[1:])
-  report('Invoke CMake:', ' '.join(cmake_args))
-  subprocess.check_call(cmake_args, env=cmake_environ)
+    # Just pass-through.
+    cmake_args = cmake_commandline(sys.argv[1:])
+    report("Invoke CMake:", " ".join(cmake_args))
+    subprocess.check_call(cmake_args, env=cmake_environ)
diff --git a/build_tools/github_actions/configure_ci.py b/build_tools/github_actions/configure_ci.py
index 8c80110..0e009c2 100755
--- a/build_tools/github_actions/configure_ci.py
+++ b/build_tools/github_actions/configure_ci.py
@@ -78,14 +78,17 @@
 RUNNER_ENV_OPTIONS = [RUNNER_ENV_DEFAULT, "testing"]
 
 DEFAULT_BENCHMARK_PRESET_GROUP = [
-    "cuda", "x86_64", "android-cpu", "android-gpu", "vulkan-nvidia",
-    "comp-stats"
+    "cuda",
+    "x86_64",
+    "android-cpu",
+    "android-gpu",
+    "vulkan-nvidia",
+    "comp-stats",
 ]
 DEFAULT_BENCHMARK_PRESET = "default"
 LARGE_BENCHMARK_PRESET_GROUP = ["cuda-large", "x86_64-large"]
 # All available benchmark preset options including experimental presets.
-BENCHMARK_PRESET_OPTIONS = (DEFAULT_BENCHMARK_PRESET_GROUP +
-                            LARGE_BENCHMARK_PRESET_GROUP)
+BENCHMARK_PRESET_OPTIONS = DEFAULT_BENCHMARK_PRESET_GROUP + LARGE_BENCHMARK_PRESET_GROUP
 BENCHMARK_LABEL_PREFIX = "benchmarks"
 
 PR_DESCRIPTION_TEMPLATE = "{title}" "\n\n" "{body}"
@@ -95,67 +98,76 @@
 # intended to be merged and should exclude test/draft PRs as well as
 # PRs that include temporary patches to the submodule during review.
 # See also: https://github.com/openxla/iree/issues/12268
-LLVM_INTEGRATE_TITLE_PATTERN = re.compile("^integrate.+llvm-project",
-                                          re.IGNORECASE)
+LLVM_INTEGRATE_TITLE_PATTERN = re.compile("^integrate.+llvm-project", re.IGNORECASE)
 LLVM_INTEGRATE_BRANCH_PATTERN = re.compile("bump-llvm|llvm-bump", re.IGNORECASE)
 LLVM_INTEGRATE_LABEL = "llvm-integrate"
 
 
 def skip_path(path: str) -> bool:
-  return any(fnmatch.fnmatch(path, pattern) for pattern in SKIP_PATH_PATTERNS)
+    return any(fnmatch.fnmatch(path, pattern) for pattern in SKIP_PATH_PATTERNS)
 
 
 def set_output(d: Mapping[str, str]):
-  print(f"Setting outputs: {d}")
-  step_output_file = os.environ["GITHUB_OUTPUT"]
-  with open(step_output_file, "a") as f:
-    f.writelines(f"{k}={v}" + "\n" for k, v in d.items())
+    print(f"Setting outputs: {d}")
+    step_output_file = os.environ["GITHUB_OUTPUT"]
+    with open(step_output_file, "a") as f:
+        f.writelines(f"{k}={v}" + "\n" for k, v in d.items())
 
 
 def write_job_summary(summary: str):
-  """Write markdown messages on Github workflow UI.
-  See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-job-summary
-  """
-  step_summary_file = os.environ["GITHUB_STEP_SUMMARY"]
-  with open(step_summary_file, "a") as f:
-    # Use double newlines to split sections in markdown.
-    f.write(summary + "\n\n")
+    """Write markdown messages on Github workflow UI.
+    See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-job-summary
+    """
+    step_summary_file = os.environ["GITHUB_STEP_SUMMARY"]
+    with open(step_summary_file, "a") as f:
+        # Use double newlines to split sections in markdown.
+        f.write(summary + "\n\n")
 
 
-def check_description_and_show_diff(original_description: str,
-                                    original_labels: Sequence[str],
-                                    current_description: str,
-                                    current_labels: Sequence[str]):
-  original_labels = sorted(original_labels)
-  current_labels = sorted(current_labels)
-  if (original_description == current_description and
-      original_labels == current_labels):
-    return
+def check_description_and_show_diff(
+    original_description: str,
+    original_labels: Sequence[str],
+    current_description: str,
+    current_labels: Sequence[str],
+):
+    original_labels = sorted(original_labels)
+    current_labels = sorted(current_labels)
+    if (
+        original_description == current_description
+        and original_labels == current_labels
+    ):
+        return
 
-  description_diffs = difflib.unified_diff(
-      original_description.splitlines(keepends=True),
-      current_description.splitlines(keepends=True))
-  description_diffs = "".join(description_diffs)
+    description_diffs = difflib.unified_diff(
+        original_description.splitlines(keepends=True),
+        current_description.splitlines(keepends=True),
+    )
+    description_diffs = "".join(description_diffs)
 
-  if description_diffs != "":
-    description_diffs = textwrap.dedent("""\
+    if description_diffs != "":
+        description_diffs = textwrap.dedent(
+            """\
     ```diff
     {}
     ```
-    """).format(description_diffs)
+    """
+        ).format(description_diffs)
 
-  if original_labels == current_labels:
-    label_diffs = ""
-  else:
-    label_diffs = textwrap.dedent("""\
+    if original_labels == current_labels:
+        label_diffs = ""
+    else:
+        label_diffs = textwrap.dedent(
+            """\
     ```
     Original labels: {original_labels}
     Current labels: {current_labels}
     ```
-    """).format(original_labels=original_labels, current_labels=current_labels)
+    """
+        ).format(original_labels=original_labels, current_labels=current_labels)
 
-  write_job_summary(
-      textwrap.dedent("""\
+    write_job_summary(
+        textwrap.dedent(
+            """\
   :pushpin: Using the PR description and labels different from the original PR event that started this workflow.
 
   <details>
@@ -164,184 +176,203 @@
   {description_diffs}
 
   {label_diffs}
-  </details>""").format(description_diffs=description_diffs,
-                        label_diffs=label_diffs))
+  </details>"""
+        ).format(description_diffs=description_diffs, label_diffs=label_diffs)
+    )
 
 
 def get_trailers_and_labels(is_pr: bool) -> Tuple[Mapping[str, str], List[str]]:
-  if not is_pr:
-    return ({}, [])
+    if not is_pr:
+        return ({}, [])
 
-  title = os.environ["PR_TITLE"]
-  body = os.environ.get("PR_BODY", "")
-  labels = json.loads(os.environ.get("PR_LABELS", "[]"))
-  original_title = os.environ.get("ORIGINAL_PR_TITLE")
-  original_body = os.environ.get("ORIGINAL_PR_BODY", "")
-  original_labels = json.loads(os.environ.get("ORIGINAL_PR_LABELS", "[]"))
+    title = os.environ["PR_TITLE"]
+    body = os.environ.get("PR_BODY", "")
+    labels = json.loads(os.environ.get("PR_LABELS", "[]"))
+    original_title = os.environ.get("ORIGINAL_PR_TITLE")
+    original_body = os.environ.get("ORIGINAL_PR_BODY", "")
+    original_labels = json.loads(os.environ.get("ORIGINAL_PR_LABELS", "[]"))
 
-  description = PR_DESCRIPTION_TEMPLATE.format(title=title, body=body)
+    description = PR_DESCRIPTION_TEMPLATE.format(title=title, body=body)
 
-  # PR information can be fetched from API for the latest updates. If
-  # ORIGINAL_PR_TITLE is set, compare the current and original description and
-  # show a notice if they are different. This is mostly to inform users that the
-  # workflow might not parse the PR description they expect.
-  if original_title is not None:
-    original_description = PR_DESCRIPTION_TEMPLATE.format(title=original_title,
-                                                          body=original_body)
-    print("Original PR description and labels:",
-          original_description,
-          original_labels,
-          sep="\n")
-    check_description_and_show_diff(original_description=original_description,
-                                    original_labels=original_labels,
-                                    current_description=description,
-                                    current_labels=labels)
+    # PR information can be fetched from API for the latest updates. If
+    # ORIGINAL_PR_TITLE is set, compare the current and original description and
+    # show a notice if they are different. This is mostly to inform users that the
+    # workflow might not parse the PR description they expect.
+    if original_title is not None:
+        original_description = PR_DESCRIPTION_TEMPLATE.format(
+            title=original_title, body=original_body
+        )
+        print(
+            "Original PR description and labels:",
+            original_description,
+            original_labels,
+            sep="\n",
+        )
+        check_description_and_show_diff(
+            original_description=original_description,
+            original_labels=original_labels,
+            current_description=description,
+            current_labels=labels,
+        )
 
-  print("Parsing PR description and labels:", description, labels, sep="\n")
+    print("Parsing PR description and labels:", description, labels, sep="\n")
 
-  trailer_lines = subprocess.run(
-      ["git", "interpret-trailers", "--parse", "--no-divider"],
-      input=description,
-      stdout=subprocess.PIPE,
-      check=True,
-      text=True,
-      timeout=60).stdout.splitlines()
-  trailer_map = {
-      k.lower().strip(): v.strip()
-      for k, v in (line.split(":", maxsplit=1) for line in trailer_lines)
-  }
-  return (trailer_map, labels)
+    trailer_lines = subprocess.run(
+        ["git", "interpret-trailers", "--parse", "--no-divider"],
+        input=description,
+        stdout=subprocess.PIPE,
+        check=True,
+        text=True,
+        timeout=60,
+    ).stdout.splitlines()
+    trailer_map = {
+        k.lower().strip(): v.strip()
+        for k, v in (line.split(":", maxsplit=1) for line in trailer_lines)
+    }
+    return (trailer_map, labels)
 
 
 def get_modified_paths(base_ref: str) -> Iterable[str]:
-  return subprocess.run(["git", "diff", "--name-only", base_ref],
-                        stdout=subprocess.PIPE,
-                        check=True,
-                        text=True,
-                        timeout=60).stdout.splitlines()
+    return subprocess.run(
+        ["git", "diff", "--name-only", base_ref],
+        stdout=subprocess.PIPE,
+        check=True,
+        text=True,
+        timeout=60,
+    ).stdout.splitlines()
 
 
 def modifies_included_path(base_ref: str) -> bool:
-  return any(not skip_path(p) for p in get_modified_paths(base_ref))
+    return any(not skip_path(p) for p in get_modified_paths(base_ref))
 
 
 def should_run_ci(is_pr: bool, trailers: Mapping[str, str]) -> bool:
-  if not is_pr:
-    print("Running CI independent of diff because run was not triggered by a"
-          " pull request event.")
+    if not is_pr:
+        print(
+            "Running CI independent of diff because run was not triggered by a"
+            " pull request event."
+        )
+        return True
+
+    if SKIP_CI_KEY in trailers:
+        print(f"Not running CI because PR description has '{SKIP_CI_KEY}' trailer.")
+        return False
+
+    base_ref = os.environ["BASE_REF"]
+    try:
+        modifies = modifies_included_path(base_ref)
+    except TimeoutError as e:
+        print("Computing modified files timed out. Running the CI")
+        return True
+
+    if not modifies:
+        print("Skipping CI because all modified files are marked as excluded.")
+        return False
+
+    print("CI should run")
     return True
 
-  if SKIP_CI_KEY in trailers:
-    print(f"Not running CI because PR description has '{SKIP_CI_KEY}' trailer.")
-    return False
-
-  base_ref = os.environ["BASE_REF"]
-  try:
-    modifies = modifies_included_path(base_ref)
-  except TimeoutError as e:
-    print("Computing modified files timed out. Running the CI")
-    return True
-
-  if not modifies:
-    print("Skipping CI because all modified files are marked as excluded.")
-    return False
-
-  print("CI should run")
-  return True
-
 
 def get_runner_env(trailers: Mapping[str, str]) -> str:
-  runner_env = trailers.get(RUNNER_ENV_KEY)
-  if runner_env is None:
-    print(f"Using '{RUNNER_ENV_DEFAULT}' runners because '{RUNNER_ENV_KEY}'"
-          f" not found in {trailers}")
-    runner_env = RUNNER_ENV_DEFAULT
-  else:
-    print(
-        f"Using runner environment '{runner_env}' from PR description trailers")
-  return runner_env
+    runner_env = trailers.get(RUNNER_ENV_KEY)
+    if runner_env is None:
+        print(
+            f"Using '{RUNNER_ENV_DEFAULT}' runners because '{RUNNER_ENV_KEY}'"
+            f" not found in {trailers}"
+        )
+        runner_env = RUNNER_ENV_DEFAULT
+    else:
+        print(f"Using runner environment '{runner_env}' from PR description trailers")
+    return runner_env
 
 
-def get_benchmark_presets(trailers: Mapping[str, str], labels: Sequence[str],
-                          is_pr: bool, is_llvm_integrate_pr: bool) -> str:
-  """Parses and validates the benchmark presets from trailers.
+def get_benchmark_presets(
+    trailers: Mapping[str, str],
+    labels: Sequence[str],
+    is_pr: bool,
+    is_llvm_integrate_pr: bool,
+) -> str:
+    """Parses and validates the benchmark presets from trailers.
 
-  Args:
-    trailers: trailers from PR description.
-    labels: list of PR labels.
-    is_pr: is pull request event.
-    is_llvm_integrate_pr: is LLVM integration PR.
+    Args:
+      trailers: trailers from PR description.
+      labels: list of PR labels.
+      is_pr: is pull request event.
+      is_llvm_integrate_pr: is LLVM integration PR.
 
-  Returns:
-    A comma separated preset string, which later will be parsed by
-    build_tools/benchmarks/export_benchmark_config.py.
-  """
+    Returns:
+      A comma separated preset string, which later will be parsed by
+      build_tools/benchmarks/export_benchmark_config.py.
+    """
 
-  skip_llvm_integrate_benchmark = SKIP_LLVM_INTEGRATE_BENCHMARK_KEY in trailers
-  if skip_llvm_integrate_benchmark:
-    print("Skipping default benchmarking on LLVM integration because PR "
-          f"description has '{SKIP_LLVM_INTEGRATE_BENCHMARK_KEY}' trailer.")
+    skip_llvm_integrate_benchmark = SKIP_LLVM_INTEGRATE_BENCHMARK_KEY in trailers
+    if skip_llvm_integrate_benchmark:
+        print(
+            "Skipping default benchmarking on LLVM integration because PR "
+            f"description has '{SKIP_LLVM_INTEGRATE_BENCHMARK_KEY}' trailer."
+        )
 
-  if not is_pr:
-    preset_options = {DEFAULT_BENCHMARK_PRESET}
-    print(f"Using benchmark presets '{preset_options}' for non-PR run")
-  elif is_llvm_integrate_pr and not skip_llvm_integrate_benchmark:
-    # Run all benchmark presets for LLVM integration PRs.
-    preset_options = {DEFAULT_BENCHMARK_PRESET}
-    print(f"Using benchmark preset '{preset_options}' for LLVM integration PR")
-  else:
-    preset_options = set(
-        label.split(":", maxsplit=1)[1]
-        for label in labels
-        if label.startswith(BENCHMARK_LABEL_PREFIX + ":"))
-    trailer = trailers.get(BENCHMARK_EXTRA_KEY)
-    if trailer is not None:
-      preset_options = preset_options.union(
-          option.strip() for option in trailer.split(","))
-    print(f"Using benchmark preset '{preset_options}' from trailers and labels")
+    if not is_pr:
+        preset_options = {DEFAULT_BENCHMARK_PRESET}
+        print(f"Using benchmark presets '{preset_options}' for non-PR run")
+    elif is_llvm_integrate_pr and not skip_llvm_integrate_benchmark:
+        # Run all benchmark presets for LLVM integration PRs.
+        preset_options = {DEFAULT_BENCHMARK_PRESET}
+        print(f"Using benchmark preset '{preset_options}' for LLVM integration PR")
+    else:
+        preset_options = set(
+            label.split(":", maxsplit=1)[1]
+            for label in labels
+            if label.startswith(BENCHMARK_LABEL_PREFIX + ":")
+        )
+        trailer = trailers.get(BENCHMARK_EXTRA_KEY)
+        if trailer is not None:
+            preset_options = preset_options.union(
+                option.strip() for option in trailer.split(",")
+            )
+        print(f"Using benchmark preset '{preset_options}' from trailers and labels")
 
-  if DEFAULT_BENCHMARK_PRESET in preset_options:
-    preset_options.remove(DEFAULT_BENCHMARK_PRESET)
-    preset_options.update(DEFAULT_BENCHMARK_PRESET_GROUP)
+    if DEFAULT_BENCHMARK_PRESET in preset_options:
+        preset_options.remove(DEFAULT_BENCHMARK_PRESET)
+        preset_options.update(DEFAULT_BENCHMARK_PRESET_GROUP)
 
-  if preset_options.intersection(DEFAULT_BENCHMARK_PRESET_GROUP):
-    # The is a sugar to run the compilation benchmarks when any default
-    # benchmark preset is present.
-    preset_options.add("comp-stats")
+    if preset_options.intersection(DEFAULT_BENCHMARK_PRESET_GROUP):
+        # The is a sugar to run the compilation benchmarks when any default
+        # benchmark preset is present.
+        preset_options.add("comp-stats")
 
-  preset_options = sorted(preset_options)
-  for preset_option in preset_options:
-    if preset_option not in BENCHMARK_PRESET_OPTIONS:
-      raise ValueError(f"Unknown benchmark preset option: '{preset_option}'.\n"
-                       f"Available options: '{BENCHMARK_PRESET_OPTIONS}'.")
+    preset_options = sorted(preset_options)
+    for preset_option in preset_options:
+        if preset_option not in BENCHMARK_PRESET_OPTIONS:
+            raise ValueError(
+                f"Unknown benchmark preset option: '{preset_option}'.\n"
+                f"Available options: '{BENCHMARK_PRESET_OPTIONS}'."
+            )
 
-  return ",".join(preset_options)
+    return ",".join(preset_options)
 
 
 def main():
-  is_pr = os.environ["GITHUB_EVENT_NAME"] == "pull_request"
-  trailers, labels = get_trailers_and_labels(is_pr)
-  is_llvm_integrate_pr = bool(
-      LLVM_INTEGRATE_TITLE_PATTERN.search(os.environ.get("PR_TITLE", "")) or
-      LLVM_INTEGRATE_BRANCH_PATTERN.search(os.environ.get("PR_BRANCH", "")) or
-      LLVM_INTEGRATE_LABEL in labels)
-  output = {
-      "should-run":
-          json.dumps(should_run_ci(is_pr, trailers)),
-      "is-pr":
-          json.dumps(is_pr),
-      "runner-env":
-          get_runner_env(trailers),
-      "runner-group":
-          "presubmit" if is_pr else "postsubmit",
-      "write-caches":
-          "0" if is_pr else "1",
-      "benchmark-presets":
-          get_benchmark_presets(trailers, labels, is_pr, is_llvm_integrate_pr),
-  }
+    is_pr = os.environ["GITHUB_EVENT_NAME"] == "pull_request"
+    trailers, labels = get_trailers_and_labels(is_pr)
+    is_llvm_integrate_pr = bool(
+        LLVM_INTEGRATE_TITLE_PATTERN.search(os.environ.get("PR_TITLE", ""))
+        or LLVM_INTEGRATE_BRANCH_PATTERN.search(os.environ.get("PR_BRANCH", ""))
+        or LLVM_INTEGRATE_LABEL in labels
+    )
+    output = {
+        "should-run": json.dumps(should_run_ci(is_pr, trailers)),
+        "is-pr": json.dumps(is_pr),
+        "runner-env": get_runner_env(trailers),
+        "runner-group": "presubmit" if is_pr else "postsubmit",
+        "write-caches": "0" if is_pr else "1",
+        "benchmark-presets": get_benchmark_presets(
+            trailers, labels, is_pr, is_llvm_integrate_pr
+        ),
+    }
 
-  set_output(output)
+    set_output(output)
 
 
 if __name__ == "__main__":
-  main()
+    main()
diff --git a/build_tools/github_actions/configure_ci_test.py b/build_tools/github_actions/configure_ci_test.py
index 04b6d4f..0640ec2 100644
--- a/build_tools/github_actions/configure_ci_test.py
+++ b/build_tools/github_actions/configure_ci_test.py
@@ -11,93 +11,99 @@
 import configure_ci
 
 SORTED_DEFAULT_BENCHMARK_PRESETS_STR = ",".join(
-    sorted(configure_ci.DEFAULT_BENCHMARK_PRESET_GROUP))
+    sorted(configure_ci.DEFAULT_BENCHMARK_PRESET_GROUP)
+)
 
 
 class GetBenchmarkPresetsTest(unittest.TestCase):
+    def test_get_benchmark_presets_no_preset(self):
+        presets_str = configure_ci.get_benchmark_presets(
+            trailers={},
+            labels=["unrelated-labels"],
+            is_pr=True,
+            is_llvm_integrate_pr=False,
+        )
 
-  def test_get_benchmark_presets_no_preset(self):
-    presets_str = configure_ci.get_benchmark_presets(
-        trailers={},
-        labels=["unrelated-labels"],
-        is_pr=True,
-        is_llvm_integrate_pr=False)
+        self.assertEqual(presets_str, "")
 
-    self.assertEqual(presets_str, "")
+    def test_get_benchmark_presets_from_pr_labels(self):
+        presets_str = configure_ci.get_benchmark_presets(
+            trailers={},
+            labels=["benchmarks:x86_64", "benchmarks:cuda"],
+            is_pr=True,
+            is_llvm_integrate_pr=False,
+        )
 
-  def test_get_benchmark_presets_from_pr_labels(self):
-    presets_str = configure_ci.get_benchmark_presets(
-        trailers={},
-        labels=["benchmarks:x86_64", "benchmarks:cuda"],
-        is_pr=True,
-        is_llvm_integrate_pr=False)
+        self.assertEqual(presets_str, "comp-stats,cuda,x86_64")
 
-    self.assertEqual(presets_str, "comp-stats,cuda,x86_64")
+    def test_get_benchmark_presets_from_trailers_and_labels(self):
+        presets_str = configure_ci.get_benchmark_presets(
+            trailers={"benchmark-extra": "android-cpu,cuda-large,x86_64-large"},
+            labels=["benchmarks:vulkan-nvidia"],
+            is_pr=True,
+            is_llvm_integrate_pr=False,
+        )
 
-  def test_get_benchmark_presets_from_trailers_and_labels(self):
-    presets_str = configure_ci.get_benchmark_presets(
-        trailers={"benchmark-extra": "android-cpu,cuda-large,x86_64-large"},
-        labels=["benchmarks:vulkan-nvidia"],
-        is_pr=True,
-        is_llvm_integrate_pr=False)
+        self.assertEqual(
+            presets_str, "android-cpu,comp-stats,cuda-large,vulkan-nvidia,x86_64-large"
+        )
 
-    self.assertEqual(
-        presets_str,
-        "android-cpu,comp-stats,cuda-large,vulkan-nvidia,x86_64-large")
-
-  def test_get_benchmark_presets_from_default_group(self):
-    presets_str = configure_ci.get_benchmark_presets(
-        trailers={"benchmark-extra": "default"},
-        labels=[],
-        is_pr=True,
-        is_llvm_integrate_pr=False)
-
-    self.assertEqual(presets_str, SORTED_DEFAULT_BENCHMARK_PRESETS_STR)
-    # Sanity check to ensure no `*-large` preset in the default group.
-    self.assertNotIn("-large", presets_str)
-
-  def test_get_benchmark_presets_for_non_pr(self):
-    presets_str = configure_ci.get_benchmark_presets(trailers={},
-                                                     labels=[],
-                                                     is_pr=False,
-                                                     is_llvm_integrate_pr=False)
-
-    self.assertEqual(presets_str, SORTED_DEFAULT_BENCHMARK_PRESETS_STR)
-
-  def test_get_benchmark_presets_for_llvm_integrate_pr(self):
-    presets_str = configure_ci.get_benchmark_presets(trailers={},
-                                                     labels=[],
-                                                     is_pr=True,
-                                                     is_llvm_integrate_pr=True)
-
-    self.assertEqual(presets_str, SORTED_DEFAULT_BENCHMARK_PRESETS_STR)
-
-  # Sample PR description:
-  # ```
-  # PR Title
-  #
-  # PR body...
-  #
-  # skip-llvm-integrate-benchmark: some good reasons
-  # ```
-  # Result: No benchmark is automatically enabled on the LLVM integrate PR.
-  def test_get_benchmark_presets_skip_llvm_integrate_benchmark(self):
-    presets_str = configure_ci.get_benchmark_presets(
-        trailers={"skip-llvm-integrate-benchmark": "some good reasons"},
-        labels=[],
-        is_pr=True,
-        is_llvm_integrate_pr=True)
-
-    self.assertEqual(presets_str, "")
-
-  def test_get_benchmark_presets_unknown_preset(self):
-    self.assertRaises(
-        ValueError, lambda: configure_ci.get_benchmark_presets(
-            trailers={"benchmark-extra": "unknown"},
+    def test_get_benchmark_presets_from_default_group(self):
+        presets_str = configure_ci.get_benchmark_presets(
+            trailers={"benchmark-extra": "default"},
             labels=[],
             is_pr=True,
-            is_llvm_integrate_pr=False))
+            is_llvm_integrate_pr=False,
+        )
+
+        self.assertEqual(presets_str, SORTED_DEFAULT_BENCHMARK_PRESETS_STR)
+        # Sanity check to ensure no `*-large` preset in the default group.
+        self.assertNotIn("-large", presets_str)
+
+    def test_get_benchmark_presets_for_non_pr(self):
+        presets_str = configure_ci.get_benchmark_presets(
+            trailers={}, labels=[], is_pr=False, is_llvm_integrate_pr=False
+        )
+
+        self.assertEqual(presets_str, SORTED_DEFAULT_BENCHMARK_PRESETS_STR)
+
+    def test_get_benchmark_presets_for_llvm_integrate_pr(self):
+        presets_str = configure_ci.get_benchmark_presets(
+            trailers={}, labels=[], is_pr=True, is_llvm_integrate_pr=True
+        )
+
+        self.assertEqual(presets_str, SORTED_DEFAULT_BENCHMARK_PRESETS_STR)
+
+    # Sample PR description:
+    # ```
+    # PR Title
+    #
+    # PR body...
+    #
+    # skip-llvm-integrate-benchmark: some good reasons
+    # ```
+    # Result: No benchmark is automatically enabled on the LLVM integrate PR.
+    def test_get_benchmark_presets_skip_llvm_integrate_benchmark(self):
+        presets_str = configure_ci.get_benchmark_presets(
+            trailers={"skip-llvm-integrate-benchmark": "some good reasons"},
+            labels=[],
+            is_pr=True,
+            is_llvm_integrate_pr=True,
+        )
+
+        self.assertEqual(presets_str, "")
+
+    def test_get_benchmark_presets_unknown_preset(self):
+        self.assertRaises(
+            ValueError,
+            lambda: configure_ci.get_benchmark_presets(
+                trailers={"benchmark-extra": "unknown"},
+                labels=[],
+                is_pr=True,
+                is_llvm_integrate_pr=False,
+            ),
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/github_actions/runner/config/health_server/health_server.py b/build_tools/github_actions/runner/config/health_server/health_server.py
index d62df74..626bb20 100755
--- a/build_tools/github_actions/runner/config/health_server/health_server.py
+++ b/build_tools/github_actions/runner/config/health_server/health_server.py
@@ -31,62 +31,62 @@
 
 
 class HealthCheckHandler(http.server.BaseHTTPRequestHandler):
+    def send_success(self, *, msg: Optional[str] = None, body: Optional[str] = None):
+        self.send_response(OK)
+        self.send_header("Content-type", "text/html")
+        self.end_headers()
+        if body is not None:
+            self.wfile.write(bytes(body, encoding="utf-8"))
 
-  def send_success(self,
-                   *,
-                   msg: Optional[str] = None,
-                   body: Optional[str] = None):
-    self.send_response(OK)
-    self.send_header("Content-type", "text/html")
-    self.end_headers()
-    if body is not None:
-      self.wfile.write(bytes(body, encoding="utf-8"))
+    def do_GET(self):
+        try:
+            subprocess.run(
+                CHECK_SERVICE_CMD,
+                check=True,
+                text=True,
+                stdout=subprocess.PIPE,
+                timeout=CHECK_SERVICE_TIMEOUT,
+            )
+        except subprocess.TimeoutExpired as e:
+            msg = f"'{' '.join(e.cmd)}' timed out: {e.stdout}"
+            return self.send_error(INTERNAL_SERVER_ERROR, msg)
+        except subprocess.CalledProcessError as e:
+            return self.send_error(
+                NOT_FOUND,
+                f"Runner service not found: '{' '.join(e.cmd)}' returned"
+                f" '{e.stdout.strip()}' (exit code {e.returncode})",
+            )
 
-  def do_GET(self):
-    try:
-      subprocess.run(CHECK_SERVICE_CMD,
-                     check=True,
-                     text=True,
-                     stdout=subprocess.PIPE,
-                     timeout=CHECK_SERVICE_TIMEOUT)
-    except subprocess.TimeoutExpired as e:
-      msg = f"'{' '.join(e.cmd)}' timed out: {e.stdout}"
-      return self.send_error(INTERNAL_SERVER_ERROR, msg)
-    except subprocess.CalledProcessError as e:
-      return self.send_error(
-          NOT_FOUND, f"Runner service not found: '{' '.join(e.cmd)}' returned"
-          f" '{e.stdout.strip()}' (exit code {e.returncode})")
+        # The runner writes a log file for each job it runs. In our case it only
+        # runs one, so we glob for anything matching that pattern. Yes that is an
+        # absolutely ludicrous way to get the runner's status. GitHub should really
+        # implement a proper health check so we don't have to hack around like this.
+        if glob.glob(RUNNER_WORK_LOG_PATTERN):
+            return self.send_success(body="active")
 
-    # The runner writes a log file for each job it runs. In our case it only
-    # runs one, so we glob for anything matching that pattern. Yes that is an
-    # absolutely ludicrous way to get the runner's status. GitHub should really
-    # implement a proper health check so we don't have to hack around like this.
-    if glob.glob(RUNNER_WORK_LOG_PATTERN):
-      return self.send_success(body="active")
-
-    return self.send_success(body="idle")
+        return self.send_success(body="idle")
 
 
 def main(args: argparse.Namespace):
-  webServer = http.server.HTTPServer(("", args.port), HealthCheckHandler)
-  print(f"Server started on port {args.port}. Ctrl+C to stop.")
+    webServer = http.server.HTTPServer(("", args.port), HealthCheckHandler)
+    print(f"Server started on port {args.port}. Ctrl+C to stop.")
 
-  try:
-    webServer.serve_forever()
-  except KeyboardInterrupt:
-    # Don't print an exception on interrupt. Add a newline to handle printing of
-    # "^C"
-    print()
+    try:
+        webServer.serve_forever()
+    except KeyboardInterrupt:
+        # Don't print an exception on interrupt. Add a newline to handle printing of
+        # "^C"
+        print()
 
-  webServer.server_close()
-  print("Server stopped.")
+    webServer.server_close()
+    print("Server stopped.")
 
 
 def parse_args():
-  parser = argparse.ArgumentParser()
-  parser.add_argument("--port", type=int, default=8080)
-  return parser.parse_args()
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--port", type=int, default=8080)
+    return parser.parse_args()
 
 
 if __name__ == "__main__":
-  main(parse_args())
+    main(parse_args())
diff --git a/build_tools/github_actions/runner/gcp/update_instance_groups.py b/build_tools/github_actions/runner/gcp/update_instance_groups.py
index 88f7eff..6835b5f 100755
--- a/build_tools/github_actions/runner/gcp/update_instance_groups.py
+++ b/build_tools/github_actions/runner/gcp/update_instance_groups.py
@@ -25,345 +25,415 @@
 
 
 def resource_basename(resource):
-  return os.path.basename(urllib.parse.urlparse(resource).path)
+    return os.path.basename(urllib.parse.urlparse(resource).path)
 
 
 def error(msg):
-  print("ERROR: ", msg, file=sys.stderr)
-  sys.exit(1)
+    print("ERROR: ", msg, file=sys.stderr)
+    sys.exit(1)
 
 
 def confirm(msg):
-  user_input = ""
-  while user_input.lower() not in ["yes", "no", "y", "n"]:
-    user_input = input(f"{msg} [y/n] ")
-  if user_input.lower() in ["n", "no"]:
-    print("Aborting")
-    sys.exit(1)
+    user_input = ""
+    while user_input.lower() not in ["yes", "no", "y", "n"]:
+        user_input = input(f"{msg} [y/n] ")
+    if user_input.lower() in ["n", "no"]:
+        print("Aborting")
+        sys.exit(1)
 
 
 def check_scary_action(action, skip_confirmation):
-  if skip_confirmation:
-    print(f"WARNING: Performing {action}.\n"
-          f"Proceeding because '--skip-confirmation' is set.")
-  else:
-    confirm(f"You are about to perform {action}.\n"
-            f" Are you sure you want to proceed?")
+    if skip_confirmation:
+        print(
+            f"WARNING: Performing {action}.\n"
+            f"Proceeding because '--skip-confirmation' is set."
+        )
+    else:
+        confirm(
+            f"You are about to perform {action}.\n"
+            f" Are you sure you want to proceed?"
+        )
 
 
 def summarize_versions(versions):
-  return {v.name: resource_basename(v.instance_template) for v in versions}
+    return {v.name: resource_basename(v.instance_template) for v in versions}
 
 
-class MigFetcher():
+class MigFetcher:
+    def __init__(self, *, migs_client, regions_client, project):
+        self._migs_client = migs_client
+        self._regions_client = regions_client
+        self._project = project
 
-  def __init__(self, *, migs_client, regions_client, project):
-    self._migs_client = migs_client
-    self._regions_client = regions_client
-    self._project = project
+    def get_migs(self, *, region, type, group, prefix, modifier=None):
+        print("Finding matching MIGs")
+        migs = []
 
-  def get_migs(self, *, region, type, group, prefix, modifier=None):
-    print("Finding matching MIGs")
-    migs = []
+        request = compute.ListRegionsRequest(project=self._project)
+        if region != "all":
+            request.filter = f"name eq {region}"
+        regions = [r.name for r in self._regions_client.list(request)]
 
-    request = compute.ListRegionsRequest(project=self._project)
-    if region != "all":
-      request.filter = f"name eq {region}"
-    regions = [r.name for r in self._regions_client.list(request)]
+        if type == "all":
+            type = r"\w+"
 
-    if type == "all":
-      type = r"\w+"
+        if group == "all":
+            group = r"\w+"
 
-    if group == "all":
-      group = r"\w+"
-
-    for region in regions:
-      filter_parts = [p for p in [prefix, modifier, group, type, region] if p]
-      filter = f"name eq '{'-'.join(filter_parts)}'"
-      list_mig_request = compute.ListRegionInstanceGroupManagersRequest(
-          project=self._project,
-          region=region,
-          filter=filter,
-      )
-      region_migs = self._migs_client.list(list_mig_request)
-      migs.extend([mig for mig in region_migs])
-    return migs
+        for region in regions:
+            filter_parts = [p for p in [prefix, modifier, group, type, region] if p]
+            filter = f"name eq '{'-'.join(filter_parts)}'"
+            list_mig_request = compute.ListRegionInstanceGroupManagersRequest(
+                project=self._project,
+                region=region,
+                filter=filter,
+            )
+            region_migs = self._migs_client.list(list_mig_request)
+            migs.extend([mig for mig in region_migs])
+        return migs
 
 
 def main(args):
-  templates_client = compute.InstanceTemplatesClient()
-  migs_client = compute.RegionInstanceGroupManagersClient()
-  updater = MigFetcher(
-      migs_client=migs_client,
-      regions_client=compute.RegionsClient(),
-      project=args.project,
-  )
+    templates_client = compute.InstanceTemplatesClient()
+    migs_client = compute.RegionInstanceGroupManagersClient()
+    updater = MigFetcher(
+        migs_client=migs_client,
+        regions_client=compute.RegionsClient(),
+        project=args.project,
+    )
 
-  # Prod instances just have the bare name
-  modifier = None if args.env == PROD_ENV_NAME else args.env
-  migs = updater.get_migs(region=args.region,
-                          type=args.type,
-                          group=args.group,
-                          prefix=args.name_prefix,
-                          modifier=modifier)
-  if len(migs) == 0:
-    error("arguments matched no instance groups")
-    sys.exit(1)
+    # Prod instances just have the bare name
+    modifier = None if args.env == PROD_ENV_NAME else args.env
+    migs = updater.get_migs(
+        region=args.region,
+        type=args.type,
+        group=args.group,
+        prefix=args.name_prefix,
+        modifier=modifier,
+    )
+    if len(migs) == 0:
+        error("arguments matched no instance groups")
+        sys.exit(1)
 
-  print(f"Found:\n  ", "\n  ".join([m.name for m in migs]), sep="")
-  if args.skip_confirmation:
-    print("Proceeding with update as --skip-confirmation is set")
-  else:
-    confirm("Proceed with updating these MIGs?")
+    print(f"Found:\n  ", "\n  ".join([m.name for m in migs]), sep="")
+    if args.skip_confirmation:
+        print("Proceeding with update as --skip-confirmation is set")
+    else:
+        confirm("Proceed with updating these MIGs?")
 
-  if args.mode == "proactive" and args.action != "refresh":
-    mig_desc = f"'{migs[0].name}'" if len(migs) == 1 else f"{len(migs)} groups"
-    scary_action = (
-        f"an update on {mig_desc} that will shut down instances even if"
-        f" they're in the middle of running a job")
-    check_scary_action(scary_action, args.skip_confirmation)
-
-  for mig in migs:
-    region = resource_basename(mig.region)
-    if args.command in [DIRECT_UPDATE_COMMAND_NAME, CANARY_COMMAND_NAME]:
-      if "testing" in args.version and args.env != TESTING_ENV_NAME:
-        scary_action = (f"using testing template version '{args.version}' in"
-                        f" environment '{args.env}'")
+    if args.mode == "proactive" and args.action != "refresh":
+        mig_desc = f"'{migs[0].name}'" if len(migs) == 1 else f"{len(migs)} groups"
+        scary_action = (
+            f"an update on {mig_desc} that will shut down instances even if"
+            f" they're in the middle of running a job"
+        )
         check_scary_action(scary_action, args.skip_confirmation)
 
-      strip = f"-{region}"
-      if not mig.name.endswith(strip):
-        raise ValueError(f"MIG name does not end with '{strip}' as expected")
-      template_name = f"{mig.name[:-len(strip)]}-{args.version}"
+    for mig in migs:
+        region = resource_basename(mig.region)
+        if args.command in [DIRECT_UPDATE_COMMAND_NAME, CANARY_COMMAND_NAME]:
+            if "testing" in args.version and args.env != TESTING_ENV_NAME:
+                scary_action = (
+                    f"using testing template version '{args.version}' in"
+                    f" environment '{args.env}'"
+                )
+                check_scary_action(scary_action, args.skip_confirmation)
 
-      # TODO(gcmn): Make template naming consistent (ran into length limits)
-      template_name = template_name.replace(f"-{args.env}-", "-")
-      template_url = templates_client.get(
-          project=args.project, instance_template=template_name).self_link
+            strip = f"-{region}"
+            if not mig.name.endswith(strip):
+                raise ValueError(f"MIG name does not end with '{strip}' as expected")
+            template_name = f"{mig.name[:-len(strip)]}-{args.version}"
 
-    current_templates = {v.name: v.instance_template for v in mig.versions}
+            # TODO(gcmn): Make template naming consistent (ran into length limits)
+            template_name = template_name.replace(f"-{args.env}-", "-")
+            template_url = templates_client.get(
+                project=args.project, instance_template=template_name
+            ).self_link
 
-    if not current_templates:
-      error(f"Found no template versions for '{mig.name}'."
-            f" This shouldn't be possible.")
+        current_templates = {v.name: v.instance_template for v in mig.versions}
 
-    # TODO(gcmn): These should probably be factored into functions
-    if args.command == CANARY_COMMAND_NAME:
-      if len(current_templates) > 1:
-        error(f"Instance group '{mig.name}' has multiple versions, but canary"
-              f" requires it start with exactly one. Current versions:"
-              f" {summarize_versions(mig.versions)}")
+        if not current_templates:
+            error(
+                f"Found no template versions for '{mig.name}'."
+                f" This shouldn't be possible."
+            )
 
-      base_template = current_templates.get(args.base_version_name)
-      if not base_template:
-        error(f"Instance group '{mig.name}' does not have a current version"
-              f" named '{args.base_version_name}', which is required for an"
-              f" automatic canary. Current versions:"
-              f" {summarize_versions(mig.versions)}")
+        # TODO(gcmn): These should probably be factored into functions
+        if args.command == CANARY_COMMAND_NAME:
+            if len(current_templates) > 1:
+                error(
+                    f"Instance group '{mig.name}' has multiple versions, but canary"
+                    f" requires it start with exactly one. Current versions:"
+                    f" {summarize_versions(mig.versions)}"
+                )
 
-      if base_template == template_url:
-        error(f"Instance group '{mig.name}' already has the requested canary"
-              f" version '{template_name}' as its base version. Current"
-              " versions:"
-              f" {summarize_versions(mig.versions)}")
-      new_versions = [
-          compute.InstanceGroupManagerVersion(name=args.base_version_name,
-                                              instance_template=base_template),
-          compute.InstanceGroupManagerVersion(name=args.canary_version_name,
-                                              instance_template=template_url,
-                                              target_size=CANARY_SIZE)
-      ]
-    elif args.command == DIRECT_UPDATE_COMMAND_NAME:
-      scary_action = (f"an update of all instances in '{mig.name}' directly"
-                      f" without doing a canary")
-      check_scary_action(scary_action, args.skip_confirmation)
+            base_template = current_templates.get(args.base_version_name)
+            if not base_template:
+                error(
+                    f"Instance group '{mig.name}' does not have a current version"
+                    f" named '{args.base_version_name}', which is required for an"
+                    f" automatic canary. Current versions:"
+                    f" {summarize_versions(mig.versions)}"
+                )
 
-      new_versions = [
-          compute.InstanceGroupManagerVersion(name=args.base_version_name,
-                                              instance_template=template_url)
-      ]
-    elif args.command == PROMOTE_CANARY_COMMAND_NAME:
-      new_base_template = current_templates.get(args.canary_version_name)
-      if new_base_template is None:
-        error(f"Instance group '{mig.name}' does not have a current version"
-              f" named '{args.canary_version_name}', which is required for an"
-              f" automatic canary promotion. Current versions:"
-              f" {summarize_versions(mig.versions)}")
-      new_versions = [
-          compute.InstanceGroupManagerVersion(
-              name=args.base_version_name, instance_template=new_base_template)
-      ]
-    elif args.command == ROLLBACK_CANARY_COMMAND_NAME:
-      base_template = current_templates.get(args.base_version_name)
-      if base_template is None:
-        error(f"Instance group '{mig.name}' does not have a current version"
-              f" named '{args.base_version_name}', which is required for an"
-              f" automatic canary rollback. Current versions:"
-              f" {summarize_versions(mig.versions)}")
-      new_versions = [
-          compute.InstanceGroupManagerVersion(name=args.base_version_name,
-                                              instance_template=base_template)
-      ]
-    else:
-      error(f"Unrecognized command '{args.command}'")
+            if base_template == template_url:
+                error(
+                    f"Instance group '{mig.name}' already has the requested canary"
+                    f" version '{template_name}' as its base version. Current"
+                    " versions:"
+                    f" {summarize_versions(mig.versions)}"
+                )
+            new_versions = [
+                compute.InstanceGroupManagerVersion(
+                    name=args.base_version_name, instance_template=base_template
+                ),
+                compute.InstanceGroupManagerVersion(
+                    name=args.canary_version_name,
+                    instance_template=template_url,
+                    target_size=CANARY_SIZE,
+                ),
+            ]
+        elif args.command == DIRECT_UPDATE_COMMAND_NAME:
+            scary_action = (
+                f"an update of all instances in '{mig.name}' directly"
+                f" without doing a canary"
+            )
+            check_scary_action(scary_action, args.skip_confirmation)
 
-    update_policy = compute.InstanceGroupManagerUpdatePolicy(
-        type_=args.mode,
-        minimal_action=args.action,
-        most_disruptive_allowed_action=args.action)
+            new_versions = [
+                compute.InstanceGroupManagerVersion(
+                    name=args.base_version_name, instance_template=template_url
+                )
+            ]
+        elif args.command == PROMOTE_CANARY_COMMAND_NAME:
+            new_base_template = current_templates.get(args.canary_version_name)
+            if new_base_template is None:
+                error(
+                    f"Instance group '{mig.name}' does not have a current version"
+                    f" named '{args.canary_version_name}', which is required for an"
+                    f" automatic canary promotion. Current versions:"
+                    f" {summarize_versions(mig.versions)}"
+                )
+            new_versions = [
+                compute.InstanceGroupManagerVersion(
+                    name=args.base_version_name, instance_template=new_base_template
+                )
+            ]
+        elif args.command == ROLLBACK_CANARY_COMMAND_NAME:
+            base_template = current_templates.get(args.base_version_name)
+            if base_template is None:
+                error(
+                    f"Instance group '{mig.name}' does not have a current version"
+                    f" named '{args.base_version_name}', which is required for an"
+                    f" automatic canary rollback. Current versions:"
+                    f" {summarize_versions(mig.versions)}"
+                )
+            new_versions = [
+                compute.InstanceGroupManagerVersion(
+                    name=args.base_version_name, instance_template=base_template
+                )
+            ]
+        else:
+            error(f"Unrecognized command '{args.command}'")
 
-    print(f"Updating {mig.name} to new versions:"
-          f" {summarize_versions(new_versions)}")
+        update_policy = compute.InstanceGroupManagerUpdatePolicy(
+            type_=args.mode,
+            minimal_action=args.action,
+            most_disruptive_allowed_action=args.action,
+        )
 
-    request = compute.PatchRegionInstanceGroupManagerRequest(
-        project=args.project,
-        region=region,
-        instance_group_manager=mig.name,
-        instance_group_manager_resource=compute.InstanceGroupManager(
-            versions=new_versions, update_policy=update_policy))
+        print(
+            f"Updating {mig.name} to new versions:"
+            f" {summarize_versions(new_versions)}"
+        )
 
-    if not args.dry_run:
-      migs_client.patch(request)
-    else:
-      print(f"Dry run, so not sending this patch request:\n```\n{request}```")
-    print(f"Successfully updated {mig.name}")
+        request = compute.PatchRegionInstanceGroupManagerRequest(
+            project=args.project,
+            region=region,
+            instance_group_manager=mig.name,
+            instance_group_manager_resource=compute.InstanceGroupManager(
+                versions=new_versions, update_policy=update_policy
+            ),
+        )
+
+        if not args.dry_run:
+            migs_client.patch(request)
+        else:
+            print(f"Dry run, so not sending this patch request:\n```\n{request}```")
+        print(f"Successfully updated {mig.name}")
 
 
 def parse_args():
-  parser = argparse.ArgumentParser(description=(
-      "Updates one or more GCP Managed Instance Groups (MIGs) to new"
-      " instance template versions. Wraps the GCP API with shortcuts for the"
-      " patterns we have in our MIGs. See the README and"
-      " https://cloud.google.com/compute/docs/instance-groups/updating-migs for"
-      " more details."))
+    parser = argparse.ArgumentParser(
+        description=(
+            "Updates one or more GCP Managed Instance Groups (MIGs) to new"
+            " instance template versions. Wraps the GCP API with shortcuts for the"
+            " patterns we have in our MIGs. See the README and"
+            " https://cloud.google.com/compute/docs/instance-groups/updating-migs for"
+            " more details."
+        )
+    )
 
-  # Makes global options come *after* command.
-  # See https://stackoverflow.com/q/23296695
-  subparser_base = argparse.ArgumentParser(add_help=False)
-  subparser_base.add_argument("--project",
-                              default="iree-oss",
-                              help="The cloud project for the MIGs.")
-  subparser_base.add_argument(
-      "--region",
-      "--regions",
-      required=True,
-      help=("The cloud region (e.g. 'us-west1') of the MIG to update, an RE2"
+    # Makes global options come *after* command.
+    # See https://stackoverflow.com/q/23296695
+    subparser_base = argparse.ArgumentParser(add_help=False)
+    subparser_base.add_argument(
+        "--project", default="iree-oss", help="The cloud project for the MIGs."
+    )
+    subparser_base.add_argument(
+        "--region",
+        "--regions",
+        required=True,
+        help=(
+            "The cloud region (e.g. 'us-west1') of the MIG to update, an RE2"
             " regex for matching region names (e.g. 'us-.*'), or 'all' to"
-            " search for MIGs in all regions."))
-  subparser_base.add_argument(
-      "--group",
-      "--groups",
-      required=True,
-      help=("The runner group of the MIGs to update, an RE2 regex for matching"
+            " search for MIGs in all regions."
+        ),
+    )
+    subparser_base.add_argument(
+        "--group",
+        "--groups",
+        required=True,
+        help=(
+            "The runner group of the MIGs to update, an RE2 regex for matching"
             " the group (e.g. 'cpu|gpu'), or 'all' to search for MIGs for all"
-            " groups."),
-  )
-  subparser_base.add_argument(
-      "--type",
-      "--types",
-      required=True,
-      help=("The runner type of the MIGs to update, an RE2 regex for matching"
+            " groups."
+        ),
+    )
+    subparser_base.add_argument(
+        "--type",
+        "--types",
+        required=True,
+        help=(
+            "The runner type of the MIGs to update, an RE2 regex for matching"
             " the type (e.g. 'presubmit|postsubmit'), or 'all' to search for"
-            " MIGs for all types."),
-  )
-  subparser_base.add_argument(
-      "--mode",
-      default="opportunistic",
-      choices=["opportunistic", "proactive"],
-      help=(
-          "The mode in which to update instances. See README and"
-          " https://cloud.google.com/compute/docs/instance-groups/updating-migs."
-      ))
-  subparser_base.add_argument(
-      "--action",
-      choices=["refresh", "restart", "replace"],
-      help=(
-          "What action to take when updating an instance. See README and"
-          " https://cloud.google.com/compute/docs/instance-groups/updating-migs."
-      ))
-  subparser_base.add_argument("--env",
-                              "--environment",
-                              default=TESTING_ENV_NAME,
-                              help="The environment for the MIGs.",
-                              choices=[PROD_ENV_NAME, TESTING_ENV_NAME])
-  subparser_base.add_argument(
-      "--dry-run",
-      action="store_true",
-      default=False,
-      help="Print all output but don't actually send the update request.")
+            " MIGs for all types."
+        ),
+    )
+    subparser_base.add_argument(
+        "--mode",
+        default="opportunistic",
+        choices=["opportunistic", "proactive"],
+        help=(
+            "The mode in which to update instances. See README and"
+            " https://cloud.google.com/compute/docs/instance-groups/updating-migs."
+        ),
+    )
+    subparser_base.add_argument(
+        "--action",
+        choices=["refresh", "restart", "replace"],
+        help=(
+            "What action to take when updating an instance. See README and"
+            " https://cloud.google.com/compute/docs/instance-groups/updating-migs."
+        ),
+    )
+    subparser_base.add_argument(
+        "--env",
+        "--environment",
+        default=TESTING_ENV_NAME,
+        help="The environment for the MIGs.",
+        choices=[PROD_ENV_NAME, TESTING_ENV_NAME],
+    )
+    subparser_base.add_argument(
+        "--dry-run",
+        action="store_true",
+        default=False,
+        help="Print all output but don't actually send the update request.",
+    )
 
-  # Defaulting to true for testing environment avoids people getting in the
-  # habit of routinely passing --force.
-  skip_confirmation = subparser_base.add_mutually_exclusive_group()
-  skip_confirmation.add_argument(
-      "--skip-confirmation",
-      "--force",
-      action="store_true",
-      default=None,
-      help=("Skip all confirmation prompts. Be careful."
-            " Defaults to True for testing environment"))
-  skip_confirmation.add_argument("--noskip-confirmation",
-                                 "--noforce",
-                                 action="store_false",
-                                 default=None,
-                                 dest="skip_confirmation")
+    # Defaulting to true for testing environment avoids people getting in the
+    # habit of routinely passing --force.
+    skip_confirmation = subparser_base.add_mutually_exclusive_group()
+    skip_confirmation.add_argument(
+        "--skip-confirmation",
+        "--force",
+        action="store_true",
+        default=None,
+        help=(
+            "Skip all confirmation prompts. Be careful."
+            " Defaults to True for testing environment"
+        ),
+    )
+    skip_confirmation.add_argument(
+        "--noskip-confirmation",
+        "--noforce",
+        action="store_false",
+        default=None,
+        dest="skip_confirmation",
+    )
 
-  # These shouldn't be set very often, but it's just as easy to make them flags
-  # as it is to make them global constants.
-  subparser_base.add_argument("--name-prefix",
-                              default="gh-runner",
-                              help="The first part of MIG and template names.")
-  subparser_base.add_argument(
-      "--base-version-name",
-      default="base",
-      help="The name given to the MIG instance version that isn't in canary.")
-  subparser_base.add_argument(
-      "--canary-version-name",
-      default="canary",
-      help="The name given to the MIG instance version that is being canaried.")
+    # These shouldn't be set very often, but it's just as easy to make them flags
+    # as it is to make them global constants.
+    subparser_base.add_argument(
+        "--name-prefix",
+        default="gh-runner",
+        help="The first part of MIG and template names.",
+    )
+    subparser_base.add_argument(
+        "--base-version-name",
+        default="base",
+        help="The name given to the MIG instance version that isn't in canary.",
+    )
+    subparser_base.add_argument(
+        "--canary-version-name",
+        default="canary",
+        help="The name given to the MIG instance version that is being canaried.",
+    )
 
-  subparsers = parser.add_subparsers(required=True, dest="command")
+    subparsers = parser.add_subparsers(required=True, dest="command")
 
-  canary_sp = subparsers.add_parser(CANARY_COMMAND_NAME,
-                                    parents=[subparser_base],
-                                    help="Canary a new template version.")
-  rollback_sp = subparsers.add_parser(
-      ROLLBACK_CANARY_COMMAND_NAME,
-      parents=[subparser_base],
-      help=("Rollback a previous canary, restoring all instances to the base"
-            " version."))
-  promote_sp = subparsers.add_parser(
-      PROMOTE_CANARY_COMMAND_NAME,
-      parents=[subparser_base],
-      help="Promote the current canary version to be the base version.")
-  direct_sp = subparsers.add_parser(
-      DIRECT_UPDATE_COMMAND_NAME,
-      parents=[subparser_base],
-      help=("Update all instances in the MIG to a new version. Generally should"
-            " not be used for prod."))
+    canary_sp = subparsers.add_parser(
+        CANARY_COMMAND_NAME,
+        parents=[subparser_base],
+        help="Canary a new template version.",
+    )
+    rollback_sp = subparsers.add_parser(
+        ROLLBACK_CANARY_COMMAND_NAME,
+        parents=[subparser_base],
+        help=(
+            "Rollback a previous canary, restoring all instances to the base"
+            " version."
+        ),
+    )
+    promote_sp = subparsers.add_parser(
+        PROMOTE_CANARY_COMMAND_NAME,
+        parents=[subparser_base],
+        help="Promote the current canary version to be the base version.",
+    )
+    direct_sp = subparsers.add_parser(
+        DIRECT_UPDATE_COMMAND_NAME,
+        parents=[subparser_base],
+        help=(
+            "Update all instances in the MIG to a new version. Generally should"
+            " not be used for prod."
+        ),
+    )
 
-  for sp in [canary_sp, direct_sp]:
-    sp.add_argument(
-        "--version",
-        help=("The new instance template version. Usually git hash +"
-              " 3-character uid, e.g. 56e40f6505-9lp"))
+    for sp in [canary_sp, direct_sp]:
+        sp.add_argument(
+            "--version",
+            help=(
+                "The new instance template version. Usually git hash +"
+                " 3-character uid, e.g. 56e40f6505-9lp"
+            ),
+        )
 
-  # TODO: Add this argument with a custom parser
-  # canary_sp.add_argument("--canary-size", type=int, default=1)
+    # TODO: Add this argument with a custom parser
+    # canary_sp.add_argument("--canary-size", type=int, default=1)
 
-  args = parser.parse_args()
+    args = parser.parse_args()
 
-  if args.skip_confirmation is None:
-    args.skip_confirmation = args.env == TESTING_ENV_NAME
+    if args.skip_confirmation is None:
+        args.skip_confirmation = args.env == TESTING_ENV_NAME
 
-  if args.action is None:
-    if args.mode == "proactive":
-      args.action = "refresh"
-    else:
-      args.action = "replace"
+    if args.action is None:
+        if args.mode == "proactive":
+            args.action = "refresh"
+        else:
+            args.action = "replace"
 
-  return args
+    return args
 
 
 if __name__ == "__main__":
-  main(parse_args())
+    main(parse_args())
diff --git a/build_tools/github_actions/runner/gcp/update_runner_version.py b/build_tools/github_actions/runner/gcp/update_runner_version.py
index f2e2ad4..4124288 100755
--- a/build_tools/github_actions/runner/gcp/update_runner_version.py
+++ b/build_tools/github_actions/runner/gcp/update_runner_version.py
@@ -27,12 +27,15 @@
 # This is using the old printf-style string formatting because we're creating
 # lines that have Bash substitutions using braces
 VERSION_LINE_FORMAT_STRING = 'GITHUB_RUNNER_VERSION="${GITHUB_RUNNER_VERSION:-%s}"'
-DIGEST_LINE_FORMAT_STRING = 'GITHUB_RUNNER_ARCHIVE_DIGEST="${GITHUB_RUNNER_ARCHIVE_DIGEST:-%s}"'
+DIGEST_LINE_FORMAT_STRING = (
+    'GITHUB_RUNNER_ARCHIVE_DIGEST="${GITHUB_RUNNER_ARCHIVE_DIGEST:-%s}"'
+)
 
-DIGEST_SEARCH_PATTERN = r"^.*\bBEGIN.SHA linux-x64\b.*\b([a-fA-F0-9]{64})\b.*END.SHA linux-x64\b.*$"
+DIGEST_SEARCH_PATTERN = (
+    r"^.*\bBEGIN.SHA linux-x64\b.*\b([a-fA-F0-9]{64})\b.*END.SHA linux-x64\b.*$"
+)
 
-RUNNER_ARCHIVE_TEMPLATE = string.Template(
-    "actions-runner-linux-x64-${version}.tar.gz")
+RUNNER_ARCHIVE_TEMPLATE = string.Template("actions-runner-linux-x64-${version}.tar.gz")
 ASSET_URL_TEMPLATE = string.Template(
     "https://github.com/actions/runner/releases/download/v${version}/${archive}"
 )
@@ -43,57 +46,61 @@
 
 
 def error(*msg):
-  print(*msg, file=sys.stderr)
-  sys.exit(1)
+    print(*msg, file=sys.stderr)
+    sys.exit(1)
 
 
 if __name__ == "__main__":
-  release = json.loads(
-      subprocess.run(["gh", "api", "/repos/actions/runner/releases?per_page=1"],
-                     check=True,
-                     text=True,
-                     stdout=subprocess.PIPE).stdout.strip())[0]
+    release = json.loads(
+        subprocess.run(
+            ["gh", "api", "/repos/actions/runner/releases?per_page=1"],
+            check=True,
+            text=True,
+            stdout=subprocess.PIPE,
+        ).stdout.strip()
+    )[0]
 
-  if not release["tag_name"].startswith("v"):
-    error(
-        f"ERROR: Release tag name '{release.tag_name}' does not start with 'v' as expected"
-    )
+    if not release["tag_name"].startswith("v"):
+        error(
+            f"ERROR: Release tag name '{release.tag_name}' does not start with 'v' as expected"
+        )
 
-  version = release["tag_name"][1:]
-  digest = None
+    version = release["tag_name"][1:]
+    digest = None
 
-  sha_pattern = re.compile(DIGEST_SEARCH_PATTERN, flags=re.MULTILINE)
-  matches = sha_pattern.findall(release["body"])
+    sha_pattern = re.compile(DIGEST_SEARCH_PATTERN, flags=re.MULTILINE)
+    matches = sha_pattern.findall(release["body"])
 
-  if not matches:
-    error(
-        f"ERROR: No lines match digest search regex: '{DIGEST_SEARCH_PATTERN}'")
+    if not matches:
+        error(f"ERROR: No lines match digest search regex: '{DIGEST_SEARCH_PATTERN}'")
 
-  if len(matches) > 1:
-    error(f"ERROR: Multiple lines match digest search regex:", matches)
+    if len(matches) > 1:
+        error(f"ERROR: Multiple lines match digest search regex:", matches)
 
-  digest = matches[0]
+    digest = matches[0]
 
-  archive = RUNNER_ARCHIVE_TEMPLATE.substitute(version=version)
-  asset_url = ASSET_URL_TEMPLATE.substitute(version=version, archive=archive)
+    archive = RUNNER_ARCHIVE_TEMPLATE.substitute(version=version)
+    asset_url = ASSET_URL_TEMPLATE.substitute(version=version, archive=archive)
 
-  # With Python 3.11 we could use hashlib.file_digest
-  hash = hashlib.sha256()
-  with urllib.request.urlopen(asset_url) as f:
-    hash.update(f.read())
+    # With Python 3.11 we could use hashlib.file_digest
+    hash = hashlib.sha256()
+    with urllib.request.urlopen(asset_url) as f:
+        hash.update(f.read())
 
-  actual_digest = hash.hexdigest()
+    actual_digest = hash.hexdigest()
 
-  if digest != actual_digest:
-    error(f"Digest extracted from release notes ('{digest}') does not match"
-          f" digest obtained from fetching '{asset_url}' ('{actual_digest}')")
+    if digest != actual_digest:
+        error(
+            f"Digest extracted from release notes ('{digest}') does not match"
+            f" digest obtained from fetching '{asset_url}' ('{actual_digest}')"
+        )
 
-  for line in fileinput.input(files=[TARGET_SCRIPT], inplace=True):
-    if line.startswith("GITHUB_RUNNER_VERSION"):
-      print(VERSION_LINE_FORMAT_STRING % (version,))
-    elif line.startswith("GITHUB_RUNNER_ARCHIVE_DIGEST"):
-      print(DIGEST_LINE_FORMAT_STRING % (digest,))
-    else:
-      print(line, end="")
+    for line in fileinput.input(files=[TARGET_SCRIPT], inplace=True):
+        if line.startswith("GITHUB_RUNNER_VERSION"):
+            print(VERSION_LINE_FORMAT_STRING % (version,))
+        elif line.startswith("GITHUB_RUNNER_ARCHIVE_DIGEST"):
+            print(DIGEST_LINE_FORMAT_STRING % (digest,))
+        else:
+            print(line, end="")
 
-  print(f"Successfully updated {TARGET_SCRIPT}")
+    print(f"Successfully updated {TARGET_SCRIPT}")
diff --git a/build_tools/github_actions/runner/instance_deleter/main.py b/build_tools/github_actions/runner/instance_deleter/main.py
index 9307879..5c05a0c 100644
--- a/build_tools/github_actions/runner/instance_deleter/main.py
+++ b/build_tools/github_actions/runner/instance_deleter/main.py
@@ -64,8 +64,14 @@
 import random
 import re
 import time
-from http.client import (BAD_REQUEST, FORBIDDEN, GATEWAY_TIMEOUT,
-                         INTERNAL_SERVER_ERROR, NOT_FOUND, UNAUTHORIZED)
+from http.client import (
+    BAD_REQUEST,
+    FORBIDDEN,
+    GATEWAY_TIMEOUT,
+    INTERNAL_SERVER_ERROR,
+    NOT_FOUND,
+    UNAUTHORIZED,
+)
 
 import flask
 import functions_framework
@@ -92,119 +98,133 @@
 
 
 def _verify_token(token: str) -> dict:
-  """Verify token signature and return the token payload"""
-  request = transport.requests.Request(session)
-  payload = id_token.verify_oauth2_token(token, request=request)
-  return payload
+    """Verify token signature and return the token payload"""
+    request = transport.requests.Request(session)
+    payload = id_token.verify_oauth2_token(token, request=request)
+    return payload
 
 
 def _get_region(zone: str) -> str:
-  """Extract region name from zone name"""
-  # Drop the trailing zone identifier to get the region. Yeah it kinda does seem
-  # like there should be a better way to do this...
-  region, _ = zone.rsplit("-", maxsplit=1)
-  return region
+    """Extract region name from zone name"""
+    # Drop the trailing zone identifier to get the region. Yeah it kinda does seem
+    # like there should be a better way to do this...
+    region, _ = zone.rsplit("-", maxsplit=1)
+    return region
 
 
 def _get_name_from_resource(resource: str) -> str:
-  """Extract just the final name component from a fully scoped resource name."""
-  _, name = resource.rsplit("/", maxsplit=1)
-  return name
+    """Extract just the final name component from a fully scoped resource name."""
+    _, name = resource.rsplit("/", maxsplit=1)
+    return name
 
 
 def _get_from_items(items: compute.Items, key: str):
-  # Why would the GCP Python API return something as silly as a dictionary?
-  return next((item.value for item in items if item.key == key), None)
+    # Why would the GCP Python API return something as silly as a dictionary?
+    return next((item.value for item in items if item.key == key), None)
 
 
-def delete_instance_from_mig(mig_name: str, project: str, region: str,
-                             instance: compute.Instance):
-  try:
-    operation = migs_client.delete_instances(
-        instance_group_manager=mig_name,
-        project=project,
-        region=region,
-        # For some reason we can't just use a list of instance names and need to
-        # build this RhymingRythmicJavaClasses proto. Also, unlike all the other
-        # parameters, the instance has to be a fully-specified URL for the
-        # instance, not just its name.
-        region_instance_group_managers_delete_instances_request_resource=(
-            compute.RegionInstanceGroupManagersDeleteInstancesRequest(
-                instances=[instance.self_link])))
-  except (google.api_core.exceptions.Forbidden,
-          google.api_core.exceptions.Unauthorized,
-          google.api_core.exceptions.NotFound) as e:
-    print(e)
-    return flask.abort(
-        e.code, f"Error requesting that {mig_name} delete {instance.name}.")
-  except Exception as e:
-    # We'll call any other error here a server error.
-    print(e)
-    return flask.abort(
-        INTERNAL_SERVER_ERROR,
-        f"Error requesting that {mig_name} delete {instance.name}.")
+def delete_instance_from_mig(
+    mig_name: str, project: str, region: str, instance: compute.Instance
+):
+    try:
+        operation = migs_client.delete_instances(
+            instance_group_manager=mig_name,
+            project=project,
+            region=region,
+            # For some reason we can't just use a list of instance names and need to
+            # build this RhymingRythmicJavaClasses proto. Also, unlike all the other
+            # parameters, the instance has to be a fully-specified URL for the
+            # instance, not just its name.
+            region_instance_group_managers_delete_instances_request_resource=(
+                compute.RegionInstanceGroupManagersDeleteInstancesRequest(
+                    instances=[instance.self_link]
+                )
+            ),
+        )
+    except (
+        google.api_core.exceptions.Forbidden,
+        google.api_core.exceptions.Unauthorized,
+        google.api_core.exceptions.NotFound,
+    ) as e:
+        print(e)
+        return flask.abort(
+            e.code, f"Error requesting that {mig_name} delete {instance.name}."
+        )
+    except Exception as e:
+        # We'll call any other error here a server error.
+        print(e)
+        return flask.abort(
+            INTERNAL_SERVER_ERROR,
+            f"Error requesting that {mig_name} delete {instance.name}.",
+        )
 
-  try:
-    # This is actually an extended operation that you have to poll to get its
-    # status, but we just check the status once because it appears that errors
-    # always show up here and all we just want to return success in marking for
-    # deletion. We don't need to wait for the deletion to actually take place.
-    operation.result()
-  except google.api_core.exceptions.ClientError as e:
-    print(e)
-    # Unpack the actual usable error message
-    msg = (
-        f"Error requesting that {mig_name} delete {instance.name}:"
-        "\n" + "\n".join(
-            [f"{err.code}: {err.message}" for err in e.response.error.errors]))
-    print(msg)
-    # We're not actually totally sure whether this is a client or server error
-    # for the overall request, but let's call it a client error (the only client
-    # here is our VM instances, so I think we can be a bit loose).
-    return flask.abort(BAD_REQUEST, msg)
+    try:
+        # This is actually an extended operation that you have to poll to get its
+        # status, but we just check the status once because it appears that errors
+        # always show up here and all we just want to return success in marking for
+        # deletion. We don't need to wait for the deletion to actually take place.
+        operation.result()
+    except google.api_core.exceptions.ClientError as e:
+        print(e)
+        # Unpack the actual usable error message
+        msg = (
+            f"Error requesting that {mig_name} delete {instance.name}:"
+            "\n"
+            + "\n".join(
+                [f"{err.code}: {err.message}" for err in e.response.error.errors]
+            )
+        )
+        print(msg)
+        # We're not actually totally sure whether this is a client or server error
+        # for the overall request, but let's call it a client error (the only client
+        # here is our VM instances, so I think we can be a bit loose).
+        return flask.abort(BAD_REQUEST, msg)
 
-  success_msg = f"{instance.name} has been marked for deletion by {mig_name}."
-  print(success_msg)
-  return success_msg
+    success_msg = f"{instance.name} has been marked for deletion by {mig_name}."
+    print(success_msg)
+    return success_msg
 
 
 def should_scale_down(mig_name: str, project: str, region: str):
-  start = time.time()
-  print(f"Polling {mig_name} for stability")
-  while time.time() - start < STABILIZE_TIMEOUT_SECONDS:
-    try:
-      mig = migs_client.get(project=project,
-                            region=region,
-                            instance_group_manager=mig_name)
-    except google.api_core.exceptions.NotFound as e:
-      print(e)
-      return flask.abort(
-          e.code,
-          f"Cannot find {mig_name} in region={region}, project={project}")
-    if mig.status.is_stable:
-      break
-    # We sleep for a random amount of time here to avoid synchronizing callers
-    # waiting for the MIG to be stable.
-    sleep_secs = random.randint(1, 15)
-    print(f"{mig_name} is not stable. Retrying in {sleep_secs} seconds")
-    time.sleep(sleep_secs)
-  else:
-    return flask.abort(GATEWAY_TIMEOUT,
-                       "Timed out waiting for the MIG to become stable")
-  autoscaler = autoscalers_client.get(project=project,
-                                      region=region,
-                                      autoscaler=_get_name_from_resource(
-                                          mig.status.autoscaler))
-  response = "true" if autoscaler.recommended_size < mig.target_size else "false"
-  print(
-      f"Autoscaler recommends size {autoscaler.recommended_size} and"
-      f" {mig_name} is targetting size {mig.target_size}. Sending: {response}")
-  return response
+    start = time.time()
+    print(f"Polling {mig_name} for stability")
+    while time.time() - start < STABILIZE_TIMEOUT_SECONDS:
+        try:
+            mig = migs_client.get(
+                project=project, region=region, instance_group_manager=mig_name
+            )
+        except google.api_core.exceptions.NotFound as e:
+            print(e)
+            return flask.abort(
+                e.code, f"Cannot find {mig_name} in region={region}, project={project}"
+            )
+        if mig.status.is_stable:
+            break
+        # We sleep for a random amount of time here to avoid synchronizing callers
+        # waiting for the MIG to be stable.
+        sleep_secs = random.randint(1, 15)
+        print(f"{mig_name} is not stable. Retrying in {sleep_secs} seconds")
+        time.sleep(sleep_secs)
+    else:
+        return flask.abort(
+            GATEWAY_TIMEOUT, "Timed out waiting for the MIG to become stable"
+        )
+    autoscaler = autoscalers_client.get(
+        project=project,
+        region=region,
+        autoscaler=_get_name_from_resource(mig.status.autoscaler),
+    )
+    response = "true" if autoscaler.recommended_size < mig.target_size else "false"
+    print(
+        f"Autoscaler recommends size {autoscaler.recommended_size} and"
+        f" {mig_name} is targetting size {mig.target_size}. Sending: {response}"
+    )
+    return response
 
 
 @functions_framework.http
 def delete_self(request: flask.Request):
-  """HTTP Cloud Function to delete the instance group making the request.
+    """HTTP Cloud Function to delete the instance group making the request.
     Args:
         request: The request object.
         https://flask.palletsprojects.com/en/1.1.x/api/#incoming-request-data
@@ -216,104 +236,117 @@
         For more information on how Flask integrates with Cloud
         Functions, see the `Writing HTTP functions` page.
         https://cloud.google.com/functions/docs/writing/http#http_frameworks
-  """
-  if request.method not in ALLOWED_HTTP_METHODS:
-    return flask.abort(
-        BAD_REQUEST, f"Invalid method {request.method}."
-        f" Allowed methods: {ALLOWED_HTTP_METHODS}")
+    """
+    if request.method not in ALLOWED_HTTP_METHODS:
+        return flask.abort(
+            BAD_REQUEST,
+            f"Invalid method {request.method}."
+            f" Allowed methods: {ALLOWED_HTTP_METHODS}",
+        )
 
-  # No path is needed, since the token and method contain all the information we
-  # need. Maybe that design was a mistake, but since the resource being operated
-  # on is always the instance making the call, it seemed handy.
-  if request.path != "/":
-    return flask.abort(
-        BAD_REQUEST,
-        f"Invalid request path {request.path}. Only root path is valid).")
+    # No path is needed, since the token and method contain all the information we
+    # need. Maybe that design was a mistake, but since the resource being operated
+    # on is always the instance making the call, it seemed handy.
+    if request.path != "/":
+        return flask.abort(
+            BAD_REQUEST,
+            f"Invalid request path {request.path}. Only root path is valid).",
+        )
 
-  auth_header = request.headers.get("Authorization")
-  if auth_header is None:
-    return flask.abort(UNAUTHORIZED, "Authorization header is missing")
-  if not auth_header.startswith(AUTH_HEADER_PREFIX):
-    return flask.abort(
-        UNAUTHORIZED,
-        f"Authorization header does not start with expected string"
-        f" {AUTH_HEADER_PREFIX}.")
+    auth_header = request.headers.get("Authorization")
+    if auth_header is None:
+        return flask.abort(UNAUTHORIZED, "Authorization header is missing")
+    if not auth_header.startswith(AUTH_HEADER_PREFIX):
+        return flask.abort(
+            UNAUTHORIZED,
+            f"Authorization header does not start with expected string"
+            f" {AUTH_HEADER_PREFIX}.",
+        )
 
-  token = auth_header[len(AUTH_HEADER_PREFIX):]
+    token = auth_header[len(AUTH_HEADER_PREFIX) :]
 
-  try:
-    # We don't verify audience here because Cloud IAM will have already done so
-    # and jwt's matching of audiences is exact, which means trailing slashes or
-    # http vs https matters and that's pretty brittle.
-    token_payload = _verify_token(token)
-  except (ValueError, google.auth.exceptions.GoogleAuthError) as e:
-    print(e)
-    return flask.abort(UNAUTHORIZED, "Decoding bearer token failed.")
+    try:
+        # We don't verify audience here because Cloud IAM will have already done so
+        # and jwt's matching of audiences is exact, which means trailing slashes or
+        # http vs https matters and that's pretty brittle.
+        token_payload = _verify_token(token)
+    except (ValueError, google.auth.exceptions.GoogleAuthError) as e:
+        print(e)
+        return flask.abort(UNAUTHORIZED, "Decoding bearer token failed.")
 
-  print(f"Token payload: {token_payload}")
+    print(f"Token payload: {token_payload}")
 
-  try:
-    compute_info = token_payload["google"]["compute_engine"]
-  except KeyError:
-    return flask.abort(
-        UNAUTHORIZED,
-        "Bearer token payload does not have expected field google.compute")
+    try:
+        compute_info = token_payload["google"]["compute_engine"]
+    except KeyError:
+        return flask.abort(
+            UNAUTHORIZED,
+            "Bearer token payload does not have expected field google.compute",
+        )
 
-  project = compute_info["project_id"]
-  zone = compute_info["zone"]
-  region = _get_region(zone)
-  instance_name = compute_info["instance_name"]
+    project = compute_info["project_id"]
+    zone = compute_info["zone"]
+    region = _get_region(zone)
+    instance_name = compute_info["instance_name"]
 
-  if request.method == "DELETE":
-    print(f"Received request to delete {instance_name}")
-  else:
+    if request.method == "DELETE":
+        print(f"Received request to delete {instance_name}")
+    else:
+        assert request.method == "GET"
+        print(f"Received inquiry whether to delete {instance_name}")
+    try:
+        instance = instances_client.get(
+            instance=instance_name, project=project, zone=zone
+        )
+    except (
+        google.api_core.exceptions.NotFound,
+        google.api_core.exceptions.Forbidden,
+    ) as e:
+        print(e)
+        return flask.abort(
+            e.code, f"Cannot view {instance_name} in zone={zone}, project={project}"
+        )
+
+    instance_id = int(compute_info["instance_id"])
+    # Verify it's *actually* the same instance. Names get reused, but IDs don't.
+    # For some reason you can't reference anything by ID in the API.
+    if instance.id != instance_id:
+        return flask.abort(
+            BAD_REQUEST,
+            f"Existing instance of the same name {instance.name} has a different"
+            f" ID {instance.id} than token specifies {instance_id}.",
+        )
+
+    mig_name = _get_from_items(instance.metadata.items, MIG_METADATA_KEY)
+
+    if mig_name is None:
+        return flask.abort(
+            BAD_REQUEST,
+            (
+                f"Instance is not part of a managed instance group."
+                f" Did not find {MIG_METADATA_KEY} in metadata."
+            ),
+        )
+    mig_name = _get_name_from_resource(mig_name)
+
+    # General good practice would be to compile the regex once, but the only way
+    # to do that is to make it a global, which makes this difficult to test and
+    # compiling this regex should not be expensive.
+    allowed_mig_pattern = os.environ.get(ALLOWED_MIG_PATTERN_ENV_VARIABLE)
+    if allowed_mig_pattern is None:
+        flask.abort(
+            INTERNAL_SERVER_ERROR,
+            f"Missing required environment variable"
+            f" {ALLOWED_MIG_PATTERN_ENV_VARIABLE}",
+        )
+
+    if not re.fullmatch(allowed_mig_pattern, mig_name):
+        return flask.abort(FORBIDDEN, f"No access to MIG {mig_name}")
+
+    if request.method == "DELETE":
+        return delete_instance_from_mig(
+            mig_name=mig_name, project=project, region=region, instance=instance
+        )
+
     assert request.method == "GET"
-    print(f"Received inquiry whether to delete {instance_name}")
-  try:
-    instance = instances_client.get(instance=instance_name,
-                                    project=project,
-                                    zone=zone)
-  except (google.api_core.exceptions.NotFound,
-          google.api_core.exceptions.Forbidden) as e:
-    print(e)
-    return flask.abort(
-        e.code,
-        f"Cannot view {instance_name} in zone={zone}, project={project}")
-
-  instance_id = int(compute_info["instance_id"])
-  # Verify it's *actually* the same instance. Names get reused, but IDs don't.
-  # For some reason you can't reference anything by ID in the API.
-  if instance.id != instance_id:
-    return flask.abort(
-        BAD_REQUEST,
-        f"Existing instance of the same name {instance.name} has a different"
-        f" ID {instance.id} than token specifies {instance_id}.")
-
-  mig_name = _get_from_items(instance.metadata.items, MIG_METADATA_KEY)
-
-  if mig_name is None:
-    return flask.abort(BAD_REQUEST,
-                       (f"Instance is not part of a managed instance group."
-                        f" Did not find {MIG_METADATA_KEY} in metadata."))
-  mig_name = _get_name_from_resource(mig_name)
-
-  # General good practice would be to compile the regex once, but the only way
-  # to do that is to make it a global, which makes this difficult to test and
-  # compiling this regex should not be expensive.
-  allowed_mig_pattern = os.environ.get(ALLOWED_MIG_PATTERN_ENV_VARIABLE)
-  if allowed_mig_pattern is None:
-    flask.abort(
-        INTERNAL_SERVER_ERROR, f"Missing required environment variable"
-        f" {ALLOWED_MIG_PATTERN_ENV_VARIABLE}")
-
-  if not re.fullmatch(allowed_mig_pattern, mig_name):
-    return flask.abort(FORBIDDEN, f"No access to MIG {mig_name}")
-
-  if request.method == "DELETE":
-    return delete_instance_from_mig(mig_name=mig_name,
-                                    project=project,
-                                    region=region,
-                                    instance=instance)
-
-  assert request.method == "GET"
-  return should_scale_down(mig_name=mig_name, project=project, region=region)
+    return should_scale_down(mig_name=mig_name, project=project, region=region)
diff --git a/build_tools/github_actions/runner/instance_deleter/main_test.py b/build_tools/github_actions/runner/instance_deleter/main_test.py
index 43397d6..535a59b 100644
--- a/build_tools/github_actions/runner/instance_deleter/main_test.py
+++ b/build_tools/github_actions/runner/instance_deleter/main_test.py
@@ -34,506 +34,564 @@
 
 
 def get_message(ctx):
-  return ctx.exception.get_response().get_data(as_text=True)
+    return ctx.exception.get_response().get_data(as_text=True)
 
 
 # A fake for oauth2 token verification that pretends the encoding scheme is just
 # JSON.
 def fake_verify_oauth2_token(token, request):
-  del request
-  return json.loads(token)
+    del request
+    return json.loads(token)
 
 
 def make_token(payload: dict):
-  return json.dumps(payload)
+    return json.dumps(payload)
 
 
-@mock.patch("google.oauth2.id_token.verify_oauth2_token",
-            fake_verify_oauth2_token)
+@mock.patch("google.oauth2.id_token.verify_oauth2_token", fake_verify_oauth2_token)
 class InstanceDeleterTest(unittest.TestCase):
+    def setUp(self):
+        self.addCleanup(mock.patch.stopall)
+        instances_client_patcher = mock.patch("main.instances_client", autospec=True)
+        self.instances_client = instances_client_patcher.start()
+        migs_client_patcher = mock.patch("main.migs_client", autospec=True)
+        self.migs_client = migs_client_patcher.start()
+        os_environ_patcher = mock.patch.dict(
+            "os.environ", {main.ALLOWED_MIG_PATTERN_ENV_VARIABLE: ".*"}
+        )
+        self.environ = os_environ_patcher.start()
+        autoscalers_client_patcher = mock.patch(
+            "main.autoscalers_client", autospec=True
+        )
+        self.autoscalers_client = autoscalers_client_patcher.start()
+        time_patcher = mock.patch("time.time", autospec=True)
+        self.time = time_patcher.start()
+        self.time.return_value = 0
+        # Just noop sleep
+        mock.patch("time.sleep", autospec=True).start()
 
-  def setUp(self):
-    self.addCleanup(mock.patch.stopall)
-    instances_client_patcher = mock.patch("main.instances_client",
-                                          autospec=True)
-    self.instances_client = instances_client_patcher.start()
-    migs_client_patcher = mock.patch("main.migs_client", autospec=True)
-    self.migs_client = migs_client_patcher.start()
-    os_environ_patcher = mock.patch.dict(
-        "os.environ", {main.ALLOWED_MIG_PATTERN_ENV_VARIABLE: ".*"})
-    self.environ = os_environ_patcher.start()
-    autoscalers_client_patcher = mock.patch("main.autoscalers_client",
-                                            autospec=True)
-    self.autoscalers_client = autoscalers_client_patcher.start()
-    time_patcher = mock.patch("time.time", autospec=True)
-    self.time = time_patcher.start()
-    self.time.return_value = 0
-    # Just noop sleep
-    mock.patch("time.sleep", autospec=True).start()
+    def test_delete_happy_path(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-  def test_delete_happy_path(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
-
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": f"{REGION}-a",
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": f"{REGION}-a",
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    self_link = f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}"
-    instance = compute.Instance(
-        id=ID1,
-        name=INSTANCE_NAME,
-        zone=ZONE,
-        self_link=self_link,
-        metadata=compute.Metadata(items=[
-            compute.Items(key=main.MIG_METADATA_KEY,
-                          value=f"{MIG_PATH_PREFIX}{MIG_NAME}")
-        ]))
-    self.instances_client.get.return_value = instance
+        self_link = f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}"
+        instance = compute.Instance(
+            id=ID1,
+            name=INSTANCE_NAME,
+            zone=ZONE,
+            self_link=self_link,
+            metadata=compute.Metadata(
+                items=[
+                    compute.Items(
+                        key=main.MIG_METADATA_KEY, value=f"{MIG_PATH_PREFIX}{MIG_NAME}"
+                    )
+                ]
+            ),
+        )
+        self.instances_client.get.return_value = instance
 
-    response = main.delete_self(req)
+        response = main.delete_self(req)
 
-    self.assertIn(MIG_NAME, response)
-    self.assertIn(INSTANCE_NAME, response)
+        self.assertIn(MIG_NAME, response)
+        self.assertIn(INSTANCE_NAME, response)
 
-    self.migs_client.delete_instances.assert_called_once_with(
-        instance_group_manager=MIG_NAME,
-        project=PROJECT,
-        region=REGION,
-        region_instance_group_managers_delete_instances_request_resource=compute
-        .RegionInstanceGroupManagersDeleteInstancesRequest(
-            instances=[instance.self_link]))
+        self.migs_client.delete_instances.assert_called_once_with(
+            instance_group_manager=MIG_NAME,
+            project=PROJECT,
+            region=REGION,
+            region_instance_group_managers_delete_instances_request_resource=compute.RegionInstanceGroupManagersDeleteInstancesRequest(
+                instances=[instance.self_link]
+            ),
+        )
 
-  def test_get_happy_path(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "GET"
+    def test_get_happy_path(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "GET"
 
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": f"{REGION}-a",
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": f"{REGION}-a",
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    self_link = f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}"
-    instance = compute.Instance(
-        id=ID1,
-        name=INSTANCE_NAME,
-        zone=ZONE,
-        self_link=self_link,
-        metadata=compute.Metadata(items=[
-            compute.Items(key=main.MIG_METADATA_KEY,
-                          value=f"{MIG_PATH_PREFIX}{MIG_NAME}")
-        ]))
-    self.instances_client.get.return_value = instance
+        self_link = f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}"
+        instance = compute.Instance(
+            id=ID1,
+            name=INSTANCE_NAME,
+            zone=ZONE,
+            self_link=self_link,
+            metadata=compute.Metadata(
+                items=[
+                    compute.Items(
+                        key=main.MIG_METADATA_KEY, value=f"{MIG_PATH_PREFIX}{MIG_NAME}"
+                    )
+                ]
+            ),
+        )
+        self.instances_client.get.return_value = instance
 
-    mig = compute.InstanceGroupManager(
-        target_size=5,
-        status={
-            "is_stable": True,
-            "autoscaler": "autoscaler_link/autoscaler_name"
-        })
-    self.migs_client.get.return_value = mig
+        mig = compute.InstanceGroupManager(
+            target_size=5,
+            status={"is_stable": True, "autoscaler": "autoscaler_link/autoscaler_name"},
+        )
+        self.migs_client.get.return_value = mig
 
-    autoscaler = compute.Autoscaler(recommended_size=3)
-    self.autoscalers_client.get.return_value = autoscaler
+        autoscaler = compute.Autoscaler(recommended_size=3)
+        self.autoscalers_client.get.return_value = autoscaler
 
-    response = main.delete_self(req)
+        response = main.delete_self(req)
 
-    self.assertEqual(response, "true")
+        self.assertEqual(response, "true")
 
-  def test_get_timeout(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "GET"
+    def test_get_timeout(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "GET"
 
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": f"{REGION}-a",
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": f"{REGION}-a",
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    self_link = f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}"
-    instance = compute.Instance(
-        id=ID1,
-        name=INSTANCE_NAME,
-        zone=ZONE,
-        self_link=self_link,
-        metadata=compute.Metadata(items=[
-            compute.Items(key=main.MIG_METADATA_KEY,
-                          value=f"{MIG_PATH_PREFIX}{MIG_NAME}")
-        ]))
-    self.instances_client.get.return_value = instance
+        self_link = f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}"
+        instance = compute.Instance(
+            id=ID1,
+            name=INSTANCE_NAME,
+            zone=ZONE,
+            self_link=self_link,
+            metadata=compute.Metadata(
+                items=[
+                    compute.Items(
+                        key=main.MIG_METADATA_KEY, value=f"{MIG_PATH_PREFIX}{MIG_NAME}"
+                    )
+                ]
+            ),
+        )
+        self.instances_client.get.return_value = instance
 
-    mig = compute.InstanceGroupManager(
-        target_size=5,
-        status={
-            "is_stable": False,
-            "autoscaler": "autoscaler_link/autoscaler_name"
-        })
-    self.migs_client.get.return_value = mig
-    self.time.side_effect = [0, main.STABILIZE_TIMEOUT_SECONDS + 1]
+        mig = compute.InstanceGroupManager(
+            target_size=5,
+            status={
+                "is_stable": False,
+                "autoscaler": "autoscaler_link/autoscaler_name",
+            },
+        )
+        self.migs_client.get.return_value = mig
+        self.time.side_effect = [0, main.STABILIZE_TIMEOUT_SECONDS + 1]
 
-    with self.assertRaises(werkzeug.exceptions.GatewayTimeout):
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.GatewayTimeout):
+            main.delete_self(req)
 
-  def test_narrow_allowed_migs(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
+    def test_narrow_allowed_migs(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": f"{REGION}-a",
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": f"{REGION}-a",
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    mig_name = "github-runner-foo-bar"
-    self.environ[main.ALLOWED_MIG_PATTERN_ENV_VARIABLE] = "github-runner-.*"
-    self_link = f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}"
-    instance = compute.Instance(
-        id=ID1,
-        name=INSTANCE_NAME,
-        zone=ZONE,
-        self_link=self_link,
-        metadata=compute.Metadata(items=[
-            compute.Items(key=main.MIG_METADATA_KEY,
-                          value=f"{MIG_PATH_PREFIX}{mig_name}")
-        ]))
-    self.instances_client.get.return_value = instance
+        mig_name = "github-runner-foo-bar"
+        self.environ[main.ALLOWED_MIG_PATTERN_ENV_VARIABLE] = "github-runner-.*"
+        self_link = f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}"
+        instance = compute.Instance(
+            id=ID1,
+            name=INSTANCE_NAME,
+            zone=ZONE,
+            self_link=self_link,
+            metadata=compute.Metadata(
+                items=[
+                    compute.Items(
+                        key=main.MIG_METADATA_KEY, value=f"{MIG_PATH_PREFIX}{mig_name}"
+                    )
+                ]
+            ),
+        )
+        self.instances_client.get.return_value = instance
 
-    ext_operation = mock.MagicMock(
-        google.api_core.extended_operation.ExtendedOperation)
-    ext_operation.result.return_value = None
+        ext_operation = mock.MagicMock(
+            google.api_core.extended_operation.ExtendedOperation
+        )
+        ext_operation.result.return_value = None
 
-    response = main.delete_self(req)
+        response = main.delete_self(req)
 
-    self.assertIn(mig_name, response)
-    self.assertIn(INSTANCE_NAME, response)
+        self.assertIn(mig_name, response)
+        self.assertIn(INSTANCE_NAME, response)
 
-    self.migs_client.delete_instances.assert_called_once_with(
-        instance_group_manager=mig_name,
-        project=PROJECT,
-        region=REGION,
-        region_instance_group_managers_delete_instances_request_resource=compute
-        .RegionInstanceGroupManagersDeleteInstancesRequest(
-            instances=[instance.self_link]))
+        self.migs_client.delete_instances.assert_called_once_with(
+            instance_group_manager=mig_name,
+            project=PROJECT,
+            region=REGION,
+            region_instance_group_managers_delete_instances_request_resource=compute.RegionInstanceGroupManagersDeleteInstancesRequest(
+                instances=[instance.self_link]
+            ),
+        )
 
-  def test_bad_method(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "POST"
+    def test_bad_method(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "POST"
 
-    with self.assertRaises(werkzeug.exceptions.BadRequest) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.BadRequest) as ctx:
+            main.delete_self(req)
 
-    self.assertIn("Invalid method", get_message(ctx))
+        self.assertIn("Invalid method", get_message(ctx))
 
-  def test_bad_path(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
-    req.path = "/foo/bar"
+    def test_bad_path(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
+        req.path = "/foo/bar"
 
-    with self.assertRaises(werkzeug.exceptions.BadRequest) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.BadRequest) as ctx:
+            main.delete_self(req)
 
-    self.assertIn("Invalid request path", get_message(ctx))
+        self.assertIn("Invalid request path", get_message(ctx))
 
-  def test_missing_header(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
+    def test_missing_header(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-    with self.assertRaises(werkzeug.exceptions.Unauthorized) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.Unauthorized) as ctx:
+            main.delete_self(req)
 
-    self.assertIn("Authorization header is missing", get_message(ctx))
+        self.assertIn("Authorization header is missing", get_message(ctx))
 
-  def test_malformed_header(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
-    req.headers = {"Authorization": "UnknownScheme token"}
+    def test_malformed_header(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
+        req.headers = {"Authorization": "UnknownScheme token"}
 
-    with self.assertRaises(werkzeug.exceptions.Unauthorized) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.Unauthorized) as ctx:
+            main.delete_self(req)
 
-    self.assertIn("Authorization header does not start", get_message(ctx))
+        self.assertIn("Authorization header does not start", get_message(ctx))
 
-  def test_invalid_token(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
-    req.headers = {"Authorization": f"Bearer {INVALID_TOKEN}"}
+    def test_invalid_token(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
+        req.headers = {"Authorization": f"Bearer {INVALID_TOKEN}"}
 
-    with self.assertRaises(werkzeug.exceptions.Unauthorized) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.Unauthorized) as ctx:
+            main.delete_self(req)
 
-    self.assertIn("token", get_message(ctx))
+        self.assertIn("token", get_message(ctx))
 
-  def test_bad_token_payload(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
+    def test_bad_token_payload(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-    token = make_token({"aud": "localhost"})
+        token = make_token({"aud": "localhost"})
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    with self.assertRaises(werkzeug.exceptions.Unauthorized) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.Unauthorized) as ctx:
+            main.delete_self(req)
 
-    self.assertIn("token", get_message(ctx))
+        self.assertIn("token", get_message(ctx))
 
-  def test_nonexistent_instance(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
+    def test_nonexistent_instance(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": ZONE,
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": ZONE,
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    self.instances_client.get.side_effect = google.api_core.exceptions.NotFound(
-        "Instance not found")
+        self.instances_client.get.side_effect = google.api_core.exceptions.NotFound(
+            "Instance not found"
+        )
 
-    with self.assertRaises(werkzeug.exceptions.NotFound) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.NotFound) as ctx:
+            main.delete_self(req)
 
-    self.assertIn(INSTANCE_NAME, get_message(ctx))
+        self.assertIn(INSTANCE_NAME, get_message(ctx))
 
-  def test_id_mismatch(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
+    def test_id_mismatch(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": ZONE,
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": ZONE,
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    instance = compute.Instance(id=ID2, name=INSTANCE_NAME)
+        instance = compute.Instance(id=ID2, name=INSTANCE_NAME)
 
-    self.instances_client.get.return_value = instance
+        self.instances_client.get.return_value = instance
 
-    with self.assertRaises(werkzeug.exceptions.BadRequest) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.BadRequest) as ctx:
+            main.delete_self(req)
 
-    msg = get_message(ctx)
-    self.assertIn(str(ID1), msg)
-    self.assertIn(str(ID2), msg)
+        msg = get_message(ctx)
+        self.assertIn(str(ID1), msg)
+        self.assertIn(str(ID2), msg)
 
-  def test_missing_mig_metadata(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
+    def test_missing_mig_metadata(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": ZONE,
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": ZONE,
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    instance = compute.Instance(id=ID1,
-                                name=INSTANCE_NAME,
-                                zone=ZONE,
-                                self_link=f"http://foo/bar/{INSTANCE_NAME}")
+        instance = compute.Instance(
+            id=ID1,
+            name=INSTANCE_NAME,
+            zone=ZONE,
+            self_link=f"http://foo/bar/{INSTANCE_NAME}",
+        )
 
-    self.instances_client.get.return_value = instance
+        self.instances_client.get.return_value = instance
 
-    with self.assertRaises(werkzeug.exceptions.BadRequest) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.BadRequest) as ctx:
+            main.delete_self(req)
 
-    self.assertIn(main.MIG_METADATA_KEY, get_message(ctx))
+        self.assertIn(main.MIG_METADATA_KEY, get_message(ctx))
 
-  def test_mig_pattern_unset(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
+    def test_mig_pattern_unset(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": f"{REGION}-a",
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": f"{REGION}-a",
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    self_link = f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}"
-    instance = compute.Instance(
-        id=ID1,
-        name=INSTANCE_NAME,
-        zone=ZONE,
-        self_link=self_link,
-        metadata=compute.Metadata(items=[
-            compute.Items(key=main.MIG_METADATA_KEY,
-                          value=f"{MIG_PATH_PREFIX}{MIG_NAME}")
-        ]))
-    self.instances_client.get.return_value = instance
+        self_link = f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}"
+        instance = compute.Instance(
+            id=ID1,
+            name=INSTANCE_NAME,
+            zone=ZONE,
+            self_link=self_link,
+            metadata=compute.Metadata(
+                items=[
+                    compute.Items(
+                        key=main.MIG_METADATA_KEY, value=f"{MIG_PATH_PREFIX}{MIG_NAME}"
+                    )
+                ]
+            ),
+        )
+        self.instances_client.get.return_value = instance
 
-    del self.environ[main.ALLOWED_MIG_PATTERN_ENV_VARIABLE]
+        del self.environ[main.ALLOWED_MIG_PATTERN_ENV_VARIABLE]
 
-    with self.assertRaises(werkzeug.exceptions.InternalServerError) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.InternalServerError) as ctx:
+            main.delete_self(req)
 
-    self.assertIn(main.ALLOWED_MIG_PATTERN_ENV_VARIABLE, get_message(ctx))
+        self.assertIn(main.ALLOWED_MIG_PATTERN_ENV_VARIABLE, get_message(ctx))
 
-  def test_no_migs_allowed(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
+    def test_no_migs_allowed(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": f"{REGION}-a",
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": f"{REGION}-a",
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    instance = compute.Instance(
-        id=ID1,
-        name=INSTANCE_NAME,
-        zone=ZONE,
-        self_link=f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}",
-        metadata=compute.Metadata(items=[
-            compute.Items(key=main.MIG_METADATA_KEY,
-                          value=f"{MIG_PATH_PREFIX}{MIG_NAME}")
-        ]))
-    self.instances_client.get.return_value = instance
+        instance = compute.Instance(
+            id=ID1,
+            name=INSTANCE_NAME,
+            zone=ZONE,
+            self_link=f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}",
+            metadata=compute.Metadata(
+                items=[
+                    compute.Items(
+                        key=main.MIG_METADATA_KEY, value=f"{MIG_PATH_PREFIX}{MIG_NAME}"
+                    )
+                ]
+            ),
+        )
+        self.instances_client.get.return_value = instance
 
-    self.environ[main.ALLOWED_MIG_PATTERN_ENV_VARIABLE] = ""
+        self.environ[main.ALLOWED_MIG_PATTERN_ENV_VARIABLE] = ""
 
-    with self.assertRaises(werkzeug.exceptions.Forbidden) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.Forbidden) as ctx:
+            main.delete_self(req)
 
-    self.assertIn(MIG_NAME, get_message((ctx)))
+        self.assertIn(MIG_NAME, get_message((ctx)))
 
-  def test_mig_not_allowed(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
+    def test_mig_not_allowed(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": f"{REGION}-a",
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": f"{REGION}-a",
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    mig_name = "not-github-runner"
-    self.environ[main.ALLOWED_MIG_PATTERN_ENV_VARIABLE] = "github-runner-.*"
-    instance = compute.Instance(
-        id=ID1,
-        name=INSTANCE_NAME,
-        zone=ZONE,
-        self_link=f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}",
-        metadata=compute.Metadata(items=[
-            compute.Items(key=main.MIG_METADATA_KEY,
-                          value=f"{MIG_PATH_PREFIX}{mig_name}")
-        ]))
-    self.instances_client.get.return_value = instance
+        mig_name = "not-github-runner"
+        self.environ[main.ALLOWED_MIG_PATTERN_ENV_VARIABLE] = "github-runner-.*"
+        instance = compute.Instance(
+            id=ID1,
+            name=INSTANCE_NAME,
+            zone=ZONE,
+            self_link=f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}",
+            metadata=compute.Metadata(
+                items=[
+                    compute.Items(
+                        key=main.MIG_METADATA_KEY, value=f"{MIG_PATH_PREFIX}{mig_name}"
+                    )
+                ]
+            ),
+        )
+        self.instances_client.get.return_value = instance
 
-    with self.assertRaises(werkzeug.exceptions.Forbidden) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.Forbidden) as ctx:
+            main.delete_self(req)
 
-    self.assertIn(mig_name, get_message((ctx)))
+        self.assertIn(mig_name, get_message((ctx)))
 
-  def test_bad_deletion_request_server(self):
-    req = Request({}, populate_request=False, shallow=True)
-    req.method = "DELETE"
+    def test_bad_deletion_request_server(self):
+        req = Request({}, populate_request=False, shallow=True)
+        req.method = "DELETE"
 
-    token = make_token({
-        "google": {
-            "compute_engine": {
-                "project_id": PROJECT,
-                "zone": ZONE,
-                "instance_name": INSTANCE_NAME,
-                "instance_id": str(ID1),
+        token = make_token(
+            {
+                "google": {
+                    "compute_engine": {
+                        "project_id": PROJECT,
+                        "zone": ZONE,
+                        "instance_name": INSTANCE_NAME,
+                        "instance_id": str(ID1),
+                    }
+                }
             }
-        }
-    })
+        )
 
-    req.headers = {"Authorization": f"Bearer {token}"}
+        req.headers = {"Authorization": f"Bearer {token}"}
 
-    instance = compute.Instance(
-        id=ID1,
-        name=INSTANCE_NAME,
-        zone=ZONE,
-        self_link=f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}",
-        metadata=compute.Metadata(items=[
-            compute.Items(key=main.MIG_METADATA_KEY,
-                          value=f"{MIG_PATH_PREFIX}{MIG_NAME}")
-        ]))
+        instance = compute.Instance(
+            id=ID1,
+            name=INSTANCE_NAME,
+            zone=ZONE,
+            self_link=f"{INSTANCE_LINK_PREFIX}{INSTANCE_NAME}",
+            metadata=compute.Metadata(
+                items=[
+                    compute.Items(
+                        key=main.MIG_METADATA_KEY, value=f"{MIG_PATH_PREFIX}{MIG_NAME}"
+                    )
+                ]
+            ),
+        )
 
-    self.instances_client.get.return_value = instance
-    self.migs_client.delete_instances.side_effect = ValueError("Bad request")
+        self.instances_client.get.return_value = instance
+        self.migs_client.delete_instances.side_effect = ValueError("Bad request")
 
-    with self.assertRaises(werkzeug.exceptions.InternalServerError) as ctx:
-      main.delete_self(req)
+        with self.assertRaises(werkzeug.exceptions.InternalServerError) as ctx:
+            main.delete_self(req)
 
-    self.assertIn(MIG_NAME, get_message(ctx))
+        self.assertIn(MIG_NAME, get_message(ctx))
 
-  # Testing of server errors is unimplemented. ExtendedOperation is not
-  # documented well enough for me to produce a reasonable fake and a bad fake is
-  # worse than nothing.
+    # Testing of server errors is unimplemented. ExtendedOperation is not
+    # documented well enough for me to produce a reasonable fake and a bad fake is
+    # worse than nothing.
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/benchmark_suites/iree/adreno_benchmarks.py b/build_tools/python/benchmark_suites/iree/adreno_benchmarks.py
index 9e4f6ae..07a7588 100644
--- a/build_tools/python/benchmark_suites/iree/adreno_benchmarks.py
+++ b/build_tools/python/benchmark_suites/iree/adreno_benchmarks.py
@@ -15,83 +15,100 @@
 
 
 class Android_Adreno_Benchmarks(object):
-  """Benchmarks on Android devices with Adreno GPU."""
+    """Benchmarks on Android devices with Adreno GPU."""
 
-  ADRENO_GPU_COMPILE_TARGET = iree_definitions.CompileTarget(
-      target_backend=iree_definitions.TargetBackend.VULKAN_SPIRV,
-      target_architecture=common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
-      target_abi=iree_definitions.TargetABI.VULKAN_ANDROID31)
-  DEFAULT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_DEFAULTS,
-      tags=["default-flags"],
-      compile_targets=[ADRENO_GPU_COMPILE_TARGET])
-  FUSE_PADDING_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_FUSE_PADDING,
-      tags=["experimental-flags", "fuse-padding"],
-      compile_targets=[ADRENO_GPU_COMPILE_TARGET],
-      extra_flags=["--iree-flow-enable-fuse-padding-into-linalg-consumer-ops"])
-  FUSE_PADDING_REPEATED_KERNEL_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.
-      IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_FUSE_PADDING_REPEATED_KERNEL,
-      tags=["experimental-flags", "fuse-padding", "repeated-kernel"],
-      compile_targets=[ADRENO_GPU_COMPILE_TARGET],
-      extra_flags=FUSE_PADDING_COMPILE_CONFIG.extra_flags +
-      ["--iree-hal-benchmark-dispatch-repeat-count=16"])
+    ADRENO_GPU_COMPILE_TARGET = iree_definitions.CompileTarget(
+        target_backend=iree_definitions.TargetBackend.VULKAN_SPIRV,
+        target_architecture=common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
+        target_abi=iree_definitions.TargetABI.VULKAN_ANDROID31,
+    )
+    DEFAULT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_DEFAULTS,
+        tags=["default-flags"],
+        compile_targets=[ADRENO_GPU_COMPILE_TARGET],
+    )
+    FUSE_PADDING_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_FUSE_PADDING,
+        tags=["experimental-flags", "fuse-padding"],
+        compile_targets=[ADRENO_GPU_COMPILE_TARGET],
+        extra_flags=["--iree-flow-enable-fuse-padding-into-linalg-consumer-ops"],
+    )
+    FUSE_PADDING_REPEATED_KERNEL_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_FUSE_PADDING_REPEATED_KERNEL,
+        tags=["experimental-flags", "fuse-padding", "repeated-kernel"],
+        compile_targets=[ADRENO_GPU_COMPILE_TARGET],
+        extra_flags=FUSE_PADDING_COMPILE_CONFIG.extra_flags
+        + ["--iree-hal-benchmark-dispatch-repeat-count=16"],
+    )
 
-  def generate(
-      self
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    default_models = [
-        tflite_models.DEEPLABV3_FP32,
-        tflite_models.MOBILESSD_FP32,
-        tflite_models.POSENET_FP32,
-        tflite_models.MOBILEBERT_FP32,
-        tflite_models.MOBILENET_V2,
-        tflite_models.MOBILENET_V3SMALL,
-    ]
-    default_gen_configs = [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=self.DEFAULT_COMPILE_CONFIG,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in default_models
-    ]
-    fuse_padding_gen_configs = [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=self.FUSE_PADDING_COMPILE_CONFIG,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in default_models
-    ]
-    fuse_padding_repeated_kernel_gen_configs = [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=self.FUSE_PADDING_REPEATED_KERNEL_COMPILE_CONFIG,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in [
+    def generate(
+        self,
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        default_models = [
+            tflite_models.DEEPLABV3_FP32,
             tflite_models.MOBILESSD_FP32,
             tflite_models.POSENET_FP32,
+            tflite_models.MOBILEBERT_FP32,
             tflite_models.MOBILENET_V2,
             tflite_models.MOBILENET_V3SMALL,
         ]
-    ]
+        default_gen_configs = [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=self.DEFAULT_COMPILE_CONFIG,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+            )
+            for model in default_models
+        ]
+        fuse_padding_gen_configs = [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=self.FUSE_PADDING_COMPILE_CONFIG,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+            )
+            for model in default_models
+        ]
+        fuse_padding_repeated_kernel_gen_configs = [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=self.FUSE_PADDING_REPEATED_KERNEL_COMPILE_CONFIG,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+            )
+            for model in [
+                tflite_models.MOBILESSD_FP32,
+                tflite_models.POSENET_FP32,
+                tflite_models.MOBILENET_V2,
+                tflite_models.MOBILENET_V3SMALL,
+            ]
+        ]
 
-    adreno_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A)
-    run_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=default_gen_configs,
-        module_execution_configs=[module_execution_configs.VULKAN_CONFIG],
-        device_specs=adreno_devices)
-    run_configs += benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=fuse_padding_gen_configs,
-        module_execution_configs=[module_execution_configs.VULKAN_CONFIG],
-        device_specs=adreno_devices)
-    run_configs += benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=fuse_padding_repeated_kernel_gen_configs,
-        module_execution_configs=[
-            module_execution_configs.VULKAN_BATCH_SIZE_16_CONFIG
-        ],
-        device_specs=adreno_devices)
+        adreno_devices = (
+            device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
+                architecture=common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
+                host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            )
+        )
+        run_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=default_gen_configs,
+            module_execution_configs=[module_execution_configs.VULKAN_CONFIG],
+            device_specs=adreno_devices,
+        )
+        run_configs += benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=fuse_padding_gen_configs,
+            module_execution_configs=[module_execution_configs.VULKAN_CONFIG],
+            device_specs=adreno_devices,
+        )
+        run_configs += benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=fuse_padding_repeated_kernel_gen_configs,
+            module_execution_configs=[
+                module_execution_configs.VULKAN_BATCH_SIZE_16_CONFIG
+            ],
+            device_specs=adreno_devices,
+        )
 
-    gen_configs = (default_gen_configs + fuse_padding_gen_configs +
-                   fuse_padding_repeated_kernel_gen_configs)
-    return (gen_configs, run_configs)
+        gen_configs = (
+            default_gen_configs
+            + fuse_padding_gen_configs
+            + fuse_padding_repeated_kernel_gen_configs
+        )
+        return (gen_configs, run_configs)
diff --git a/build_tools/python/benchmark_suites/iree/armv8_a_benchmarks.py b/build_tools/python/benchmark_suites/iree/armv8_a_benchmarks.py
index 4d6279e..48d49d8 100644
--- a/build_tools/python/benchmark_suites/iree/armv8_a_benchmarks.py
+++ b/build_tools/python/benchmark_suites/iree/armv8_a_benchmarks.py
@@ -15,99 +15,115 @@
 
 
 class Android_ARMv8_A_Benchmarks(object):
-  """Benchmarks on ARMv8-A Android devices."""
-  NONQUANT_MODELS = [
-      tflite_models.DEEPLABV3_FP32,
-      tflite_models.MOBILESSD_FP32,
-      tflite_models.POSENET_FP32,
-      tflite_models.MOBILEBERT_FP32,
-      tflite_models.MOBILENET_V2,
-      tflite_models.MOBILENET_V3SMALL,
-  ]
-  QUANT_MODELS = [tflite_models.MOBILEBERT_INT8]
+    """Benchmarks on ARMv8-A Android devices."""
 
-  ARMV8_A_CPU_TARGET = iree_definitions.CompileTarget(
-      target_architecture=common_definitions.DeviceArchitecture.
-      ARMV8_2_A_GENERIC,
-      target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-      target_abi=iree_definitions.TargetABI.LINUX_ANDROID29)
-
-  DEFAULT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_DEFAULTS,
-      tags=["default-flags"],
-      compile_targets=[ARMV8_A_CPU_TARGET])
-  MMT4D_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_MMT4D,
-      tags=["experimental-flags", "mmt4d"],
-      compile_targets=[ARMV8_A_CPU_TARGET],
-      extra_flags=[
-          "--iree-flow-enable-data-tiling",
-          "--iree-flow-enable-fuse-padding-into-linalg-consumer-ops",
-          "--iree-llvmcpu-enable-pad-consumer-fusion"
-      ])
-  MMT4D_AND_DOTPROD_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_MMT4D_DOTPROD,
-      tags=["experimental-flags", "mmt4d", "dotprod"],
-      compile_targets=[ARMV8_A_CPU_TARGET],
-      extra_flags=[
-          "--iree-flow-enable-data-tiling",
-          "--iree-llvmcpu-target-cpu-features=+dotprod",
-          "--iree-flow-enable-fuse-padding-into-linalg-consumer-ops",
-          "--iree-llvmcpu-enable-pad-consumer-fusion"
-      ])
-
-  def generate(
-      self
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    """Generates IREE compile and run configs."""
-
-    local_sync_execution_configs = [
-        module_execution_configs.ELF_LOCAL_SYNC_CONFIG
+    NONQUANT_MODELS = [
+        tflite_models.DEEPLABV3_FP32,
+        tflite_models.MOBILESSD_FP32,
+        tflite_models.POSENET_FP32,
+        tflite_models.MOBILEBERT_FP32,
+        tflite_models.MOBILENET_V2,
+        tflite_models.MOBILENET_V3SMALL,
     ]
-    local_task_execution_configs = [
-        module_execution_configs.get_elf_system_scheduling_local_task_config(
-            thread_num) for thread_num in [1, 4]
-    ]
+    QUANT_MODELS = [tflite_models.MOBILEBERT_INT8]
 
-    default_gen_confings = [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=self.DEFAULT_COMPILE_CONFIG,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in self.NONQUANT_MODELS + self.QUANT_MODELS
-    ]
-    experimental_gen_confings = [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=self.MMT4D_COMPILE_CONFIG,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in self.NONQUANT_MODELS
-    ] + [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=self.MMT4D_AND_DOTPROD_COMPILE_CONFIG,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in self.QUANT_MODELS
-    ]
+    ARMV8_A_CPU_TARGET = iree_definitions.CompileTarget(
+        target_architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
+        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+        target_abi=iree_definitions.TargetABI.LINUX_ANDROID29,
+    )
 
-    all_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A)
-    big_cores_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        device_parameters={"big-cores"})
-    run_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=default_gen_confings,
-        module_execution_configs=local_sync_execution_configs +
-        local_task_execution_configs,
-        device_specs=all_devices)
-    run_configs += benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=experimental_gen_confings,
-        module_execution_configs=local_sync_execution_configs,
-        device_specs=all_devices)
-    run_configs += benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=experimental_gen_confings,
-        module_execution_configs=local_task_execution_configs,
-        device_specs=big_cores_devices)
+    DEFAULT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_DEFAULTS,
+        tags=["default-flags"],
+        compile_targets=[ARMV8_A_CPU_TARGET],
+    )
+    MMT4D_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_MMT4D,
+        tags=["experimental-flags", "mmt4d"],
+        compile_targets=[ARMV8_A_CPU_TARGET],
+        extra_flags=[
+            "--iree-flow-enable-data-tiling",
+            "--iree-flow-enable-fuse-padding-into-linalg-consumer-ops",
+            "--iree-llvmcpu-enable-pad-consumer-fusion",
+        ],
+    )
+    MMT4D_AND_DOTPROD_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_MMT4D_DOTPROD,
+        tags=["experimental-flags", "mmt4d", "dotprod"],
+        compile_targets=[ARMV8_A_CPU_TARGET],
+        extra_flags=[
+            "--iree-flow-enable-data-tiling",
+            "--iree-llvmcpu-target-cpu-features=+dotprod",
+            "--iree-flow-enable-fuse-padding-into-linalg-consumer-ops",
+            "--iree-llvmcpu-enable-pad-consumer-fusion",
+        ],
+    )
 
-    gen_confings = (default_gen_confings + experimental_gen_confings)
-    return (gen_confings, run_configs)
+    def generate(
+        self,
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        """Generates IREE compile and run configs."""
+
+        local_sync_execution_configs = [module_execution_configs.ELF_LOCAL_SYNC_CONFIG]
+        local_task_execution_configs = [
+            module_execution_configs.get_elf_system_scheduling_local_task_config(
+                thread_num
+            )
+            for thread_num in [1, 4]
+        ]
+
+        default_gen_confings = [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=self.DEFAULT_COMPILE_CONFIG,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+            )
+            for model in self.NONQUANT_MODELS + self.QUANT_MODELS
+        ]
+        experimental_gen_confings = [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=self.MMT4D_COMPILE_CONFIG,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+            )
+            for model in self.NONQUANT_MODELS
+        ] + [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=self.MMT4D_AND_DOTPROD_COMPILE_CONFIG,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+            )
+            for model in self.QUANT_MODELS
+        ]
+
+        all_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
+            architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+        )
+        big_cores_devices = (
+            device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
+                architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
+                host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+                device_parameters={"big-cores"},
+            )
+        )
+        run_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=default_gen_confings,
+            module_execution_configs=local_sync_execution_configs
+            + local_task_execution_configs,
+            device_specs=all_devices,
+        )
+        run_configs += benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=experimental_gen_confings,
+            module_execution_configs=local_sync_execution_configs,
+            device_specs=all_devices,
+        )
+        run_configs += benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=experimental_gen_confings,
+            module_execution_configs=local_task_execution_configs,
+            device_specs=big_cores_devices,
+        )
+
+        gen_confings = default_gen_confings + experimental_gen_confings
+        return (gen_confings, run_configs)
diff --git a/build_tools/python/benchmark_suites/iree/benchmark_collections.py b/build_tools/python/benchmark_suites/iree/benchmark_collections.py
index a2f3a14..d2e1cc2 100644
--- a/build_tools/python/benchmark_suites/iree/benchmark_collections.py
+++ b/build_tools/python/benchmark_suites/iree/benchmark_collections.py
@@ -9,60 +9,73 @@
 
 from e2e_test_artifacts import iree_artifacts
 from e2e_test_framework.definitions import iree_definitions
-from benchmark_suites.iree import (benchmark_tags, riscv_benchmarks,
-                                   x86_64_benchmarks, adreno_benchmarks,
-                                   armv8_a_benchmarks, cuda_benchmarks,
-                                   mali_benchmarks, vulkan_nvidia_benchmarks,
-                                   vmvx_benchmarks)
+from benchmark_suites.iree import (
+    benchmark_tags,
+    riscv_benchmarks,
+    x86_64_benchmarks,
+    adreno_benchmarks,
+    armv8_a_benchmarks,
+    cuda_benchmarks,
+    mali_benchmarks,
+    vulkan_nvidia_benchmarks,
+    vmvx_benchmarks,
+)
 
 COMPILE_STATS_ID_SUFFIX = "-compile-stats"
 
 
-def generate_benchmarks(
-) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-           List[iree_definitions.E2EModelRunConfig]]:
-  benchmarks = [
-      x86_64_benchmarks.Linux_x86_64_Benchmarks(),
-      cuda_benchmarks.Linux_CUDA_Benchmarks(),
-      riscv_benchmarks.Linux_RV64_Benchmarks(),
-      riscv_benchmarks.Linux_RV32_Benchmarks(),
-      armv8_a_benchmarks.Android_ARMv8_A_Benchmarks(),
-      adreno_benchmarks.Android_Adreno_Benchmarks(),
-      mali_benchmarks.Android_Mali_Benchmarks(),
-      vulkan_nvidia_benchmarks.Linux_Vulkan_NVIDIA_Benchmarks(),
-      vmvx_benchmarks.Android_VMVX_Benchmarks()
-  ]
-  all_gen_configs: List[iree_definitions.ModuleGenerationConfig] = []
-  all_run_configs: List[iree_definitions.E2EModelRunConfig] = []
-  for benchmark in benchmarks:
-    module_generation_configs, run_configs = benchmark.generate()
-    all_gen_configs += module_generation_configs
-    all_run_configs += run_configs
+def generate_benchmarks() -> (
+    Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]
+):
+    benchmarks = [
+        x86_64_benchmarks.Linux_x86_64_Benchmarks(),
+        cuda_benchmarks.Linux_CUDA_Benchmarks(),
+        riscv_benchmarks.Linux_RV64_Benchmarks(),
+        riscv_benchmarks.Linux_RV32_Benchmarks(),
+        armv8_a_benchmarks.Android_ARMv8_A_Benchmarks(),
+        adreno_benchmarks.Android_Adreno_Benchmarks(),
+        mali_benchmarks.Android_Mali_Benchmarks(),
+        vulkan_nvidia_benchmarks.Linux_Vulkan_NVIDIA_Benchmarks(),
+        vmvx_benchmarks.Android_VMVX_Benchmarks(),
+    ]
+    all_gen_configs: List[iree_definitions.ModuleGenerationConfig] = []
+    all_run_configs: List[iree_definitions.E2EModelRunConfig] = []
+    for benchmark in benchmarks:
+        module_generation_configs, run_configs = benchmark.generate()
+        all_gen_configs += module_generation_configs
+        all_run_configs += run_configs
 
-  compile_stats_gen_configs = []
-  # For now we simply track compilation statistics of all modules.
-  for gen_config in all_gen_configs:
-    compile_config = gen_config.compile_config
-    # Use POSIX path, see the comment of iree_definitions.MODULE_DIR_VARIABLE.
-    scheduling_stats_path = f"{iree_definitions.MODULE_DIR_VARIABLE}/{iree_artifacts.SCHEDULING_STATS_FILENAME}"
-    compile_stats_config = iree_definitions.CompileConfig.build(
-        id=compile_config.id + COMPILE_STATS_ID_SUFFIX,
-        tags=compile_config.tags + [benchmark_tags.COMPILE_STATS],
-        compile_targets=compile_config.compile_targets,
-        extra_flags=compile_config.extra_flags + [
-            # Enable zip polyglot to provide component sizes.
-            "--iree-vm-emit-polyglot-zip=true",
-            # Disable debug symbols to provide correct component sizes.
-            "--iree-llvmcpu-debug-symbols=false",
-            # Dump scheduling statistics
-            "--iree-scheduling-dump-statistics-format=json",
-            f"--iree-scheduling-dump-statistics-file={scheduling_stats_path}"
-        ])
-    compile_stats_gen_configs.append(
-        iree_definitions.ModuleGenerationConfig.build(
-            imported_model=gen_config.imported_model,
-            compile_config=compile_stats_config,
-            tags=gen_config.tags))
-  all_gen_configs += compile_stats_gen_configs
+    compile_stats_gen_configs = []
+    # For now we simply track compilation statistics of all modules.
+    for gen_config in all_gen_configs:
+        compile_config = gen_config.compile_config
+        # Use POSIX path, see the comment of iree_definitions.MODULE_DIR_VARIABLE.
+        scheduling_stats_path = f"{iree_definitions.MODULE_DIR_VARIABLE}/{iree_artifacts.SCHEDULING_STATS_FILENAME}"
+        compile_stats_config = iree_definitions.CompileConfig.build(
+            id=compile_config.id + COMPILE_STATS_ID_SUFFIX,
+            tags=compile_config.tags + [benchmark_tags.COMPILE_STATS],
+            compile_targets=compile_config.compile_targets,
+            extra_flags=compile_config.extra_flags
+            + [
+                # Enable zip polyglot to provide component sizes.
+                "--iree-vm-emit-polyglot-zip=true",
+                # Disable debug symbols to provide correct component sizes.
+                "--iree-llvmcpu-debug-symbols=false",
+                # Dump scheduling statistics
+                "--iree-scheduling-dump-statistics-format=json",
+                f"--iree-scheduling-dump-statistics-file={scheduling_stats_path}",
+            ],
+        )
+        compile_stats_gen_configs.append(
+            iree_definitions.ModuleGenerationConfig.build(
+                imported_model=gen_config.imported_model,
+                compile_config=compile_stats_config,
+                tags=gen_config.tags,
+            )
+        )
+    all_gen_configs += compile_stats_gen_configs
 
-  return (all_gen_configs, all_run_configs)
+    return (all_gen_configs, all_run_configs)
diff --git a/build_tools/python/benchmark_suites/iree/cuda_benchmarks.py b/build_tools/python/benchmark_suites/iree/cuda_benchmarks.py
index 1174235..fd62c1a 100644
--- a/build_tools/python/benchmark_suites/iree/cuda_benchmarks.py
+++ b/build_tools/python/benchmark_suites/iree/cuda_benchmarks.py
@@ -15,81 +15,103 @@
 
 
 class Linux_CUDA_Benchmarks(object):
-  """Benchmarks on CUDA Linux devices."""
+    """Benchmarks on CUDA Linux devices."""
 
-  SM_80_GPU_TARGET = iree_definitions.CompileTarget(
-      target_architecture=common_definitions.DeviceArchitecture.CUDA_SM80,
-      target_backend=iree_definitions.TargetBackend.CUDA,
-      target_abi=iree_definitions.TargetABI.LINUX_GNU)
-  SM_80_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_DEFAULTS,
-      tags=["default-flags"],
-      compile_targets=[SM_80_GPU_TARGET])
-  SM_80_UBENCH_MATMUL_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_MATMUL_UBENCH,
-      tags=["ukernel", "matmul"],
-      compile_targets=[SM_80_GPU_TARGET],
-      extra_flags=["--iree-hal-benchmark-dispatch-repeat-count=100"])
-  SM_80_UBENCH_MATMUL_SPLITK_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_MATMUL_SPLITK_UBENCH,
-      tags=["ukernel", "matmul", "splitk"],
-      compile_targets=[SM_80_GPU_TARGET],
-      extra_flags=[
-          "--iree-hal-benchmark-dispatch-repeat-count=100",
-          "--iree-flow-split-matmul-reduction=4",
-          "--iree-codegen-llvmgpu-use-wmma"
-      ])
+    SM_80_GPU_TARGET = iree_definitions.CompileTarget(
+        target_architecture=common_definitions.DeviceArchitecture.CUDA_SM80,
+        target_backend=iree_definitions.TargetBackend.CUDA,
+        target_abi=iree_definitions.TargetABI.LINUX_GNU,
+    )
+    SM_80_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_DEFAULTS,
+        tags=["default-flags"],
+        compile_targets=[SM_80_GPU_TARGET],
+    )
+    SM_80_UBENCH_MATMUL_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_MATMUL_UBENCH,
+        tags=["ukernel", "matmul"],
+        compile_targets=[SM_80_GPU_TARGET],
+        extra_flags=["--iree-hal-benchmark-dispatch-repeat-count=100"],
+    )
+    SM_80_UBENCH_MATMUL_SPLITK_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_MATMUL_SPLITK_UBENCH,
+        tags=["ukernel", "matmul", "splitk"],
+        compile_targets=[SM_80_GPU_TARGET],
+        extra_flags=[
+            "--iree-hal-benchmark-dispatch-repeat-count=100",
+            "--iree-flow-split-matmul-reduction=4",
+            "--iree-codegen-llvmgpu-use-wmma",
+        ],
+    )
 
-  def _generate_configs(
-      self,
-      models: Sequence[common_definitions.Model],
-      compile_config: iree_definitions.CompileConfig,
-      execution_config: iree_definitions.
-      ModuleExecutionConfig = module_execution_configs.CUDA_CONFIG,
-      tags: Sequence[str] = (),
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    gen_configs = [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=compile_config,
-            imported_model=iree_definitions.ImportedModel.from_model(model),
-            tags=tags) for model in models
-    ]
-    sm80_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64)
-    run_module_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=gen_configs,
-        module_execution_configs=[execution_config],
-        device_specs=sm80_devices,
-        tags=tags)
+    def _generate_configs(
+        self,
+        models: Sequence[common_definitions.Model],
+        compile_config: iree_definitions.CompileConfig,
+        execution_config: iree_definitions.ModuleExecutionConfig = module_execution_configs.CUDA_CONFIG,
+        tags: Sequence[str] = (),
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        gen_configs = [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=compile_config,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+                tags=tags,
+            )
+            for model in models
+        ]
+        sm80_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
+            architecture=common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+        )
+        run_module_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=gen_configs,
+            module_execution_configs=[execution_config],
+            device_specs=sm80_devices,
+            tags=tags,
+        )
 
-    return (gen_configs, run_module_configs)
+        return (gen_configs, run_module_configs)
 
-  def generate(
-      self
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    """Generates IREE compile and run configs."""
-    # The CUDA tag is required to put them into the CUDA benchmark preset.
-    gen_configs, run_configs = self._generate_configs(
-        model_groups.CUDA_MODELS,
-        self.SM_80_COMPILE_CONFIG,
-        tags=[benchmark_tags.CUDA])
-    ubench_gen_configs, ubench_run_configs = self._generate_configs(
-        model_groups.MICRO_MATMUL,
-        self.SM_80_UBENCH_MATMUL_COMPILE_CONFIG,
-        execution_config=module_execution_configs.CUDA_BATCH_SIZE_100_CONFIG,
-        tags=[benchmark_tags.CUDA])
-    ubench_splitk_gen_configs, ubench_splitk_run_configs = self._generate_configs(
-        model_groups.MICRO_MATMUL_SPLITK,
-        self.SM_80_UBENCH_MATMUL_SPLITK_COMPILE_CONFIG,
-        execution_config=module_execution_configs.CUDA_BATCH_SIZE_100_CONFIG,
-        tags=[benchmark_tags.CUDA])
-    large_gen_configs, large_module_configs = self._generate_configs(
-        model_groups.CUDA_MODELS_LONG,
-        self.SM_80_COMPILE_CONFIG,
-        tags=[benchmark_tags.CUDA, benchmark_tags.LARGE])
-    return (gen_configs + ubench_gen_configs + ubench_splitk_gen_configs +
-            large_gen_configs, run_configs + ubench_run_configs +
-            ubench_splitk_run_configs + large_module_configs)
+    def generate(
+        self,
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        """Generates IREE compile and run configs."""
+        # The CUDA tag is required to put them into the CUDA benchmark preset.
+        gen_configs, run_configs = self._generate_configs(
+            model_groups.CUDA_MODELS,
+            self.SM_80_COMPILE_CONFIG,
+            tags=[benchmark_tags.CUDA],
+        )
+        ubench_gen_configs, ubench_run_configs = self._generate_configs(
+            model_groups.MICRO_MATMUL,
+            self.SM_80_UBENCH_MATMUL_COMPILE_CONFIG,
+            execution_config=module_execution_configs.CUDA_BATCH_SIZE_100_CONFIG,
+            tags=[benchmark_tags.CUDA],
+        )
+        ubench_splitk_gen_configs, ubench_splitk_run_configs = self._generate_configs(
+            model_groups.MICRO_MATMUL_SPLITK,
+            self.SM_80_UBENCH_MATMUL_SPLITK_COMPILE_CONFIG,
+            execution_config=module_execution_configs.CUDA_BATCH_SIZE_100_CONFIG,
+            tags=[benchmark_tags.CUDA],
+        )
+        large_gen_configs, large_module_configs = self._generate_configs(
+            model_groups.CUDA_MODELS_LONG,
+            self.SM_80_COMPILE_CONFIG,
+            tags=[benchmark_tags.CUDA, benchmark_tags.LARGE],
+        )
+        return (
+            gen_configs
+            + ubench_gen_configs
+            + ubench_splitk_gen_configs
+            + large_gen_configs,
+            run_configs
+            + ubench_run_configs
+            + ubench_splitk_run_configs
+            + large_module_configs,
+        )
diff --git a/build_tools/python/benchmark_suites/iree/mali_benchmarks.py b/build_tools/python/benchmark_suites/iree/mali_benchmarks.py
index 2cd371b..dd5e044 100644
--- a/build_tools/python/benchmark_suites/iree/mali_benchmarks.py
+++ b/build_tools/python/benchmark_suites/iree/mali_benchmarks.py
@@ -15,123 +15,146 @@
 
 
 class Android_Mali_Benchmarks(object):
-  """Benchmarks on Android devices with Mali GPU."""
+    """Benchmarks on Android devices with Mali GPU."""
 
-  ARM_VALHALL_GPU_TARGET = iree_definitions.CompileTarget(
-      target_backend=iree_definitions.TargetBackend.VULKAN_SPIRV,
-      target_architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
-      target_abi=iree_definitions.TargetABI.VULKAN_ANDROID31)
-  DEFAULT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_DEFAULTS,
-      tags=["default-flags"],
-      compile_targets=[ARM_VALHALL_GPU_TARGET])
-  EXPERIMENTAL_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_EXPERIMENTAL,
-      tags=["experimental-flags", "fuse-padding", "max-concurrency"],
-      compile_targets=[ARM_VALHALL_GPU_TARGET],
-      extra_flags=[
-          "--iree-flow-enable-fuse-padding-into-linalg-consumer-ops",
-          "--iree-stream-partitioning-favor=max-concurrency"
-      ])
-  # Kernel execution
-  # Note that for kernel-execution benchmarks batch_size/repeat-count need to be
-  # low enough that the whole dispatch completes within an OS-specific timeout.
-  # Otherwise you'll get error like:
-  # ```
-  # INTERNAL; VK_ERROR_DEVICE_LOST; vkQueueSubmit; while invoking native function
-  # hal.fence.await; while calling import;
-  # ```
-  EXPERIMENTAL_REPEATED_KERNEL_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.
-      IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_EXPERIMENTAL_REPEATED_KERNEL,
-      tags=[
-          "experimental-flags", "fuse-padding", "max-concurrency",
-          "repeated-kernel"
-      ],
-      compile_targets=[ARM_VALHALL_GPU_TARGET],
-      extra_flags=EXPERIMENTAL_COMPILE_CONFIG.extra_flags +
-      ["--iree-hal-benchmark-dispatch-repeat-count=32"])
-  EXPERIMENTAL_REPEATED_KERNEL_RUN_FLAGS = ["--batch_size=32"]
-
-  FP32_MODELS = [
-      tflite_models.DEEPLABV3_FP32,
-      tflite_models.MOBILESSD_FP32,
-      tflite_models.POSENET_FP32,
-      tflite_models.MOBILEBERT_FP32,
-      tflite_models.MOBILENET_V2,
-      tflite_models.MOBILENET_V3SMALL,
-  ]
-  FP16_MODELS = [tflite_models.MOBILEBERT_FP16]
-  QUANT_MODELS = [
-      tflite_models.MOBILEBERT_INT8,
-      tflite_models.EFFICIENTNET_INT8,
-      tflite_models.PERSON_DETECT_INT8,
-  ]
-
-  def generate(
-      self
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    default_gen_configs = self._get_module_generation_configs(
-        compile_config=self.DEFAULT_COMPILE_CONFIG,
-        fp32_models=self.FP32_MODELS,
-        fp16_models=self.FP16_MODELS,
-        quant_models=self.QUANT_MODELS)
-    experimental_gen_configs = self._get_module_generation_configs(
-        compile_config=self.EXPERIMENTAL_COMPILE_CONFIG,
-        fp32_models=self.FP32_MODELS,
-        fp16_models=self.FP16_MODELS,
-        quant_models=self.QUANT_MODELS)
-    experimental_repeated_kernel_gen_configs = self._get_module_generation_configs(
-        compile_config=self.EXPERIMENTAL_REPEATED_KERNEL_COMPILE_CONFIG,
-        fp32_models=self.FP32_MODELS,
-        fp16_models=self.FP16_MODELS,
-        quant_models=self.QUANT_MODELS)
-
-    mali_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A)
-    run_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=default_gen_configs +
-        experimental_gen_configs,
-        module_execution_configs=[module_execution_configs.VULKAN_CONFIG],
-        device_specs=mali_devices)
-    run_configs += benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=experimental_repeated_kernel_gen_configs,
-        module_execution_configs=[
-            module_execution_configs.VULKAN_BATCH_SIZE_32_CONFIG
+    ARM_VALHALL_GPU_TARGET = iree_definitions.CompileTarget(
+        target_backend=iree_definitions.TargetBackend.VULKAN_SPIRV,
+        target_architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
+        target_abi=iree_definitions.TargetABI.VULKAN_ANDROID31,
+    )
+    DEFAULT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_DEFAULTS,
+        tags=["default-flags"],
+        compile_targets=[ARM_VALHALL_GPU_TARGET],
+    )
+    EXPERIMENTAL_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_EXPERIMENTAL,
+        tags=["experimental-flags", "fuse-padding", "max-concurrency"],
+        compile_targets=[ARM_VALHALL_GPU_TARGET],
+        extra_flags=[
+            "--iree-flow-enable-fuse-padding-into-linalg-consumer-ops",
+            "--iree-stream-partitioning-favor=max-concurrency",
         ],
-        device_specs=mali_devices)
+    )
+    # Kernel execution
+    # Note that for kernel-execution benchmarks batch_size/repeat-count need to be
+    # low enough that the whole dispatch completes within an OS-specific timeout.
+    # Otherwise you'll get error like:
+    # ```
+    # INTERNAL; VK_ERROR_DEVICE_LOST; vkQueueSubmit; while invoking native function
+    # hal.fence.await; while calling import;
+    # ```
+    EXPERIMENTAL_REPEATED_KERNEL_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_EXPERIMENTAL_REPEATED_KERNEL,
+        tags=[
+            "experimental-flags",
+            "fuse-padding",
+            "max-concurrency",
+            "repeated-kernel",
+        ],
+        compile_targets=[ARM_VALHALL_GPU_TARGET],
+        extra_flags=EXPERIMENTAL_COMPILE_CONFIG.extra_flags
+        + ["--iree-hal-benchmark-dispatch-repeat-count=32"],
+    )
+    EXPERIMENTAL_REPEATED_KERNEL_RUN_FLAGS = ["--batch_size=32"]
 
-    gen_configs = (default_gen_configs + experimental_gen_configs +
-                   experimental_repeated_kernel_gen_configs)
-    return (gen_configs, run_configs)
-
-  def _get_module_generation_configs(
-      self, compile_config: iree_definitions.CompileConfig,
-      fp32_models: Sequence[common_definitions.Model],
-      fp16_models: Sequence[common_definitions.Model],
-      quant_models: Sequence[common_definitions.Model]
-  ) -> List[iree_definitions.ModuleGenerationConfig]:
-    demote_compile_config = iree_definitions.CompileConfig.build(
-        id=compile_config.id + "-demote-f32-to-16",
-        tags=compile_config.tags + ["demote-f32-to-f16"],
-        compile_targets=compile_config.compile_targets,
-        extra_flags=compile_config.extra_flags +
-        ["--iree-flow-demote-f32-to-f16"])
-    return [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=compile_config,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in fp32_models
-    ] + [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=demote_compile_config,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in fp16_models
-    ] + [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=compile_config,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in quant_models
+    FP32_MODELS = [
+        tflite_models.DEEPLABV3_FP32,
+        tflite_models.MOBILESSD_FP32,
+        tflite_models.POSENET_FP32,
+        tflite_models.MOBILEBERT_FP32,
+        tflite_models.MOBILENET_V2,
+        tflite_models.MOBILENET_V3SMALL,
     ]
+    FP16_MODELS = [tflite_models.MOBILEBERT_FP16]
+    QUANT_MODELS = [
+        tflite_models.MOBILEBERT_INT8,
+        tflite_models.EFFICIENTNET_INT8,
+        tflite_models.PERSON_DETECT_INT8,
+    ]
+
+    def generate(
+        self,
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        default_gen_configs = self._get_module_generation_configs(
+            compile_config=self.DEFAULT_COMPILE_CONFIG,
+            fp32_models=self.FP32_MODELS,
+            fp16_models=self.FP16_MODELS,
+            quant_models=self.QUANT_MODELS,
+        )
+        experimental_gen_configs = self._get_module_generation_configs(
+            compile_config=self.EXPERIMENTAL_COMPILE_CONFIG,
+            fp32_models=self.FP32_MODELS,
+            fp16_models=self.FP16_MODELS,
+            quant_models=self.QUANT_MODELS,
+        )
+        experimental_repeated_kernel_gen_configs = self._get_module_generation_configs(
+            compile_config=self.EXPERIMENTAL_REPEATED_KERNEL_COMPILE_CONFIG,
+            fp32_models=self.FP32_MODELS,
+            fp16_models=self.FP16_MODELS,
+            quant_models=self.QUANT_MODELS,
+        )
+
+        mali_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
+            architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+        )
+        run_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=default_gen_configs + experimental_gen_configs,
+            module_execution_configs=[module_execution_configs.VULKAN_CONFIG],
+            device_specs=mali_devices,
+        )
+        run_configs += benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=experimental_repeated_kernel_gen_configs,
+            module_execution_configs=[
+                module_execution_configs.VULKAN_BATCH_SIZE_32_CONFIG
+            ],
+            device_specs=mali_devices,
+        )
+
+        gen_configs = (
+            default_gen_configs
+            + experimental_gen_configs
+            + experimental_repeated_kernel_gen_configs
+        )
+        return (gen_configs, run_configs)
+
+    def _get_module_generation_configs(
+        self,
+        compile_config: iree_definitions.CompileConfig,
+        fp32_models: Sequence[common_definitions.Model],
+        fp16_models: Sequence[common_definitions.Model],
+        quant_models: Sequence[common_definitions.Model],
+    ) -> List[iree_definitions.ModuleGenerationConfig]:
+        demote_compile_config = iree_definitions.CompileConfig.build(
+            id=compile_config.id + "-demote-f32-to-16",
+            tags=compile_config.tags + ["demote-f32-to-f16"],
+            compile_targets=compile_config.compile_targets,
+            extra_flags=compile_config.extra_flags + ["--iree-flow-demote-f32-to-f16"],
+        )
+        return (
+            [
+                iree_definitions.ModuleGenerationConfig.build(
+                    compile_config=compile_config,
+                    imported_model=iree_definitions.ImportedModel.from_model(model),
+                )
+                for model in fp32_models
+            ]
+            + [
+                iree_definitions.ModuleGenerationConfig.build(
+                    compile_config=demote_compile_config,
+                    imported_model=iree_definitions.ImportedModel.from_model(model),
+                )
+                for model in fp16_models
+            ]
+            + [
+                iree_definitions.ModuleGenerationConfig.build(
+                    compile_config=compile_config,
+                    imported_model=iree_definitions.ImportedModel.from_model(model),
+                )
+                for model in quant_models
+            ]
+        )
diff --git a/build_tools/python/benchmark_suites/iree/module_execution_configs.py b/build_tools/python/benchmark_suites/iree/module_execution_configs.py
index a8ffcf7..3a0dc03 100644
--- a/build_tools/python/benchmark_suites/iree/module_execution_configs.py
+++ b/build_tools/python/benchmark_suites/iree/module_execution_configs.py
@@ -16,92 +16,107 @@
     tags: List[str],
     loader: iree_definitions.RuntimeLoader,
     driver: iree_definitions.RuntimeDriver,
-    extra_flags: Optional[Sequence[str]] = None
+    extra_flags: Optional[Sequence[str]] = None,
 ) -> iree_definitions.ModuleExecutionConfig:
-  extra_flags = [] if extra_flags is None else list(extra_flags)
-  return iree_definitions.ModuleExecutionConfig.build(
-      id=id,
-      tags=tags,
-      loader=loader,
-      driver=driver,
-      extra_flags=["--device_allocator=caching"] + extra_flags)
+    extra_flags = [] if extra_flags is None else list(extra_flags)
+    return iree_definitions.ModuleExecutionConfig.build(
+        id=id,
+        tags=tags,
+        loader=loader,
+        driver=driver,
+        extra_flags=["--device_allocator=caching"] + extra_flags,
+    )
 
 
 ELF_LOCAL_SYNC_CONFIG = _with_caching_allocator(
     id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_LOCAL_SYNC,
     tags=["full-inference", "default-flags"],
     loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-    driver=iree_definitions.RuntimeDriver.LOCAL_SYNC)
+    driver=iree_definitions.RuntimeDriver.LOCAL_SYNC,
+)
 
 CUDA_CONFIG = _with_caching_allocator(
     id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_CUDA,
     tags=["full-inference", "default-flags"],
     loader=iree_definitions.RuntimeLoader.NONE,
-    driver=iree_definitions.RuntimeDriver.CUDA)
+    driver=iree_definitions.RuntimeDriver.CUDA,
+)
 
 CUDA_BATCH_SIZE_100_CONFIG = _with_caching_allocator(
     id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_CUDA,
     tags=["full-inference", "default-flags"],
     loader=iree_definitions.RuntimeLoader.NONE,
     driver=iree_definitions.RuntimeDriver.CUDA,
-    extra_flags=["--batch_size=100"])
+    extra_flags=["--batch_size=100"],
+)
 
 VULKAN_CONFIG = _with_caching_allocator(
     id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_VULKAN,
     tags=["full-inference", "default-flags"],
     loader=iree_definitions.RuntimeLoader.NONE,
-    driver=iree_definitions.RuntimeDriver.VULKAN)
+    driver=iree_definitions.RuntimeDriver.VULKAN,
+)
 
 VULKAN_BATCH_SIZE_16_CONFIG = _with_caching_allocator(
     id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_VULKAN_BATCH_SIZE_16,
     tags=["full-inference", "experimental-flags"],
     loader=iree_definitions.RuntimeLoader.NONE,
     driver=iree_definitions.RuntimeDriver.VULKAN,
-    extra_flags=["--batch_size=16"])
+    extra_flags=["--batch_size=16"],
+)
 
 VULKAN_BATCH_SIZE_32_CONFIG = _with_caching_allocator(
     id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_VULKAN_BATCH_SIZE_32,
     tags=["full-inference", "experimental-flags"],
     loader=iree_definitions.RuntimeLoader.NONE,
     driver=iree_definitions.RuntimeDriver.VULKAN,
-    extra_flags=["--batch_size=32"])
+    extra_flags=["--batch_size=32"],
+)
 
 
 def get_elf_local_task_config(thread_num: int):
-  config_id = f"{unique_ids.IREE_MODULE_EXECUTION_CONFIG_LOCAL_TASK_BASE}-{thread_num}"
-  return _with_caching_allocator(
-      id=config_id,
-      tags=[f"{thread_num}-thread", "full-inference", "default-flags"],
-      loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-      driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
-      extra_flags=[f"--task_topology_max_group_count={thread_num}"])
+    config_id = (
+        f"{unique_ids.IREE_MODULE_EXECUTION_CONFIG_LOCAL_TASK_BASE}-{thread_num}"
+    )
+    return _with_caching_allocator(
+        id=config_id,
+        tags=[f"{thread_num}-thread", "full-inference", "default-flags"],
+        loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+        driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
+        extra_flags=[f"--task_topology_max_group_count={thread_num}"],
+    )
 
 
 def get_vmvx_local_task_config(thread_num: int):
-  config_id = f"{unique_ids.IREE_MODULE_EXECUTION_CONFIG_VMVX_LOCAL_TASK_BASE}-{thread_num}"
-  return _with_caching_allocator(
-      id=config_id,
-      tags=[f"{thread_num}-thread", "full-inference", "default-flags"],
-      loader=iree_definitions.RuntimeLoader.VMVX_MODULE,
-      driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
-      extra_flags=[f"--task_topology_max_group_count={thread_num}"])
+    config_id = (
+        f"{unique_ids.IREE_MODULE_EXECUTION_CONFIG_VMVX_LOCAL_TASK_BASE}-{thread_num}"
+    )
+    return _with_caching_allocator(
+        id=config_id,
+        tags=[f"{thread_num}-thread", "full-inference", "default-flags"],
+        loader=iree_definitions.RuntimeLoader.VMVX_MODULE,
+        driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
+        extra_flags=[f"--task_topology_max_group_count={thread_num}"],
+    )
 
 
 def get_elf_system_scheduling_local_task_config(thread_num: int):
-  config_id = f"{unique_ids.IREE_MODULE_EXECUTION_CONFIG_SYS_SCHED_LOCAL_TASK_BASE}-{thread_num}"
-  return _with_caching_allocator(
-      id=config_id,
-      tags=[f"{thread_num}-thread", "full-inference", "system-scheduling"],
-      loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-      driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
-      extra_flags=[f"--task_topology_group_count={thread_num}"])
+    config_id = f"{unique_ids.IREE_MODULE_EXECUTION_CONFIG_SYS_SCHED_LOCAL_TASK_BASE}-{thread_num}"
+    return _with_caching_allocator(
+        id=config_id,
+        tags=[f"{thread_num}-thread", "full-inference", "system-scheduling"],
+        loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+        driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
+        extra_flags=[f"--task_topology_group_count={thread_num}"],
+    )
 
 
 def get_vmvx_system_scheduling_local_task_config(thread_num: int):
-  config_id = f"{unique_ids.IREE_MODULE_EXECUTION_CONFIG_VMVX_SYS_SCHED_LOCAL_TASK_BASE}-{thread_num}"
-  return _with_caching_allocator(
-      id=config_id,
-      tags=[f"{thread_num}-thread", "full-inference", "system-scheduling"],
-      loader=iree_definitions.RuntimeLoader.VMVX_MODULE,
-      driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
-      extra_flags=[f"--task_topology_group_count={thread_num}"])
+    config_id = f"{unique_ids.IREE_MODULE_EXECUTION_CONFIG_VMVX_SYS_SCHED_LOCAL_TASK_BASE}-{thread_num}"
+    return _with_caching_allocator(
+        id=config_id,
+        tags=[f"{thread_num}-thread", "full-inference", "system-scheduling"],
+        loader=iree_definitions.RuntimeLoader.VMVX_MODULE,
+        driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
+        extra_flags=[f"--task_topology_group_count={thread_num}"],
+    )
diff --git a/build_tools/python/benchmark_suites/iree/riscv_benchmarks.py b/build_tools/python/benchmark_suites/iree/riscv_benchmarks.py
index 477fa46..c34339e 100644
--- a/build_tools/python/benchmark_suites/iree/riscv_benchmarks.py
+++ b/build_tools/python/benchmark_suites/iree/riscv_benchmarks.py
@@ -12,67 +12,77 @@
 
 
 class Linux_RV64_Benchmarks(object):
-  """Benchmarks RV64 on Linux devices."""
+    """Benchmarks RV64 on Linux devices."""
 
-  RV64_CPU_TARGET = iree_definitions.CompileTarget(
-      target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-      target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-      target_abi=iree_definitions.TargetABI.LINUX_GNU)
-  DEFAULT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_LINUX_RV64_GENERIC_DEFAULTS,
-      tags=["default-flags"],
-      compile_targets=[RV64_CPU_TARGET])
-  MODELS = [
-      tflite_models.DEEPLABV3_FP32,
-      tflite_models.MOBILEBERT_FP32,
-      tflite_models.MOBILENET_V1,
-      tflite_models.MOBILEBERT_INT8,
-      tflite_models.PERSON_DETECT_INT8,
-      tflite_models.EFFICIENTNET_INT8,
-      tflite_models.MOBILENET_V2_INT8,
-  ]
-
-  def generate(
-      self
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    """Generates IREE compile and run configs."""
-    gen_configs = [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=self.DEFAULT_COMPILE_CONFIG,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in self.MODELS
+    RV64_CPU_TARGET = iree_definitions.CompileTarget(
+        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+        target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+        target_abi=iree_definitions.TargetABI.LINUX_GNU,
+    )
+    DEFAULT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_LINUX_RV64_GENERIC_DEFAULTS,
+        tags=["default-flags"],
+        compile_targets=[RV64_CPU_TARGET],
+    )
+    MODELS = [
+        tflite_models.DEEPLABV3_FP32,
+        tflite_models.MOBILEBERT_FP32,
+        tflite_models.MOBILENET_V1,
+        tflite_models.MOBILEBERT_INT8,
+        tflite_models.PERSON_DETECT_INT8,
+        tflite_models.EFFICIENTNET_INT8,
+        tflite_models.MOBILENET_V2_INT8,
     ]
-    return (gen_configs, [])
+
+    def generate(
+        self,
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        """Generates IREE compile and run configs."""
+        gen_configs = [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=self.DEFAULT_COMPILE_CONFIG,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+            )
+            for model in self.MODELS
+        ]
+        return (gen_configs, [])
 
 
 class Linux_RV32_Benchmarks(object):
-  """Benchmarks RV32 on Linux devices."""
+    """Benchmarks RV32 on Linux devices."""
 
-  RV32_CPU_TARGET = iree_definitions.CompileTarget(
-      target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-      target_architecture=common_definitions.DeviceArchitecture.RV32_GENERIC,
-      target_abi=iree_definitions.TargetABI.LINUX_GNU)
-  DEFAULT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_LINUX_RV32_GENERIC_DEFAULTS,
-      tags=["default-flags"],
-      compile_targets=[RV32_CPU_TARGET])
-  MODELS = [
-      tflite_models.EFFICIENTNET_INT8,
-      tflite_models.MOBILEBERT_INT8,
-      tflite_models.PERSON_DETECT_INT8,
-      tflite_models.MOBILENET_V2_INT8,
-  ]
-
-  def generate(
-      self
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    """Generates IREE compile and run configs."""
-    gen_configs = [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=self.DEFAULT_COMPILE_CONFIG,
-            imported_model=iree_definitions.ImportedModel.from_model(model))
-        for model in self.MODELS
+    RV32_CPU_TARGET = iree_definitions.CompileTarget(
+        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+        target_architecture=common_definitions.DeviceArchitecture.RV32_GENERIC,
+        target_abi=iree_definitions.TargetABI.LINUX_GNU,
+    )
+    DEFAULT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_LINUX_RV32_GENERIC_DEFAULTS,
+        tags=["default-flags"],
+        compile_targets=[RV32_CPU_TARGET],
+    )
+    MODELS = [
+        tflite_models.EFFICIENTNET_INT8,
+        tflite_models.MOBILEBERT_INT8,
+        tflite_models.PERSON_DETECT_INT8,
+        tflite_models.MOBILENET_V2_INT8,
     ]
-    return (gen_configs, [])
+
+    def generate(
+        self,
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        """Generates IREE compile and run configs."""
+        gen_configs = [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=self.DEFAULT_COMPILE_CONFIG,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+            )
+            for model in self.MODELS
+        ]
+        return (gen_configs, [])
diff --git a/build_tools/python/benchmark_suites/iree/utils.py b/build_tools/python/benchmark_suites/iree/utils.py
index 0b614d1..c800cf0 100644
--- a/build_tools/python/benchmark_suites/iree/utils.py
+++ b/build_tools/python/benchmark_suites/iree/utils.py
@@ -11,24 +11,23 @@
 
 
 def generate_e2e_model_run_configs(
-    module_generation_configs: Sequence[
-        iree_definitions.ModuleGenerationConfig],
+    module_generation_configs: Sequence[iree_definitions.ModuleGenerationConfig],
     module_execution_configs: Sequence[iree_definitions.ModuleExecutionConfig],
     device_specs: Sequence[common_definitions.DeviceSpec],
     tags: Optional[Sequence[str]] = None,
-    tool: iree_definitions.E2EModelRunTool = iree_definitions.E2EModelRunTool.
-    IREE_BENCHMARK_MODULE
+    tool: iree_definitions.E2EModelRunTool = iree_definitions.E2EModelRunTool.IREE_BENCHMARK_MODULE,
 ) -> List[iree_definitions.E2EModelRunConfig]:
-  """Generates the run specs from the product of compile specs and run configs.
-  """
-  return [
-      iree_definitions.E2EModelRunConfig.build(
-          module_generation_config=module_generation_config,
-          module_execution_config=module_execution_config,
-          target_device_spec=device_spec,
-          input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-          tool=tool,
-          tags=tags) for module_generation_config,
-      module_execution_config, device_spec in itertools.product(
-          module_generation_configs, module_execution_configs, device_specs)
-  ]
+    """Generates the run specs from the product of compile specs and run configs."""
+    return [
+        iree_definitions.E2EModelRunConfig.build(
+            module_generation_config=module_generation_config,
+            module_execution_config=module_execution_config,
+            target_device_spec=device_spec,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            tool=tool,
+            tags=tags,
+        )
+        for module_generation_config, module_execution_config, device_spec in itertools.product(
+            module_generation_configs, module_execution_configs, device_specs
+        )
+    ]
diff --git a/build_tools/python/benchmark_suites/iree/vmvx_benchmarks.py b/build_tools/python/benchmark_suites/iree/vmvx_benchmarks.py
index 0bfd739..6461a5d 100644
--- a/build_tools/python/benchmark_suites/iree/vmvx_benchmarks.py
+++ b/build_tools/python/benchmark_suites/iree/vmvx_benchmarks.py
@@ -15,40 +15,50 @@
 
 
 class Android_VMVX_Benchmarks(object):
-  """Benchmarks VMVX on Android devices."""
+    """Benchmarks VMVX on Android devices."""
 
-  VMVX_CPU_TARGET = iree_definitions.CompileTarget(
-      target_backend=iree_definitions.TargetBackend.VMVX,
-      target_architecture=common_definitions.DeviceArchitecture.VMVX_GENERIC,
-      target_abi=iree_definitions.TargetABI.VMVX)
-  EXPERIMENTAL_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_VMVX_GENERIC_EXPERIMENTAL,
-      tags=["experimental-flags"],
-      compile_targets=[VMVX_CPU_TARGET])
+    VMVX_CPU_TARGET = iree_definitions.CompileTarget(
+        target_backend=iree_definitions.TargetBackend.VMVX,
+        target_architecture=common_definitions.DeviceArchitecture.VMVX_GENERIC,
+        target_abi=iree_definitions.TargetABI.VMVX,
+    )
+    EXPERIMENTAL_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_VMVX_GENERIC_EXPERIMENTAL,
+        tags=["experimental-flags"],
+        compile_targets=[VMVX_CPU_TARGET],
+    )
 
-  def generate(
-      self
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    """Generates IREE compile and run configs."""
+    def generate(
+        self,
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        """Generates IREE compile and run configs."""
 
-    gen_configs = [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=self.EXPERIMENTAL_COMPILE_CONFIG,
-            imported_model=iree_definitions.ImportedModel.from_model(model)) for
-        model in [tflite_models.MOBILENET_V2, tflite_models.MOBILENET_V3SMALL]
-    ]
-    default_execution_configs = [
-        benchmark_suites.iree.module_execution_configs.
-        get_vmvx_system_scheduling_local_task_config(thread_num=4)
-    ]
-    big_cores_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        device_parameters={"big-cores"})
-    run_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=gen_configs,
-        module_execution_configs=default_execution_configs,
-        device_specs=big_cores_devices)
+        gen_configs = [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=self.EXPERIMENTAL_COMPILE_CONFIG,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+            )
+            for model in [tflite_models.MOBILENET_V2, tflite_models.MOBILENET_V3SMALL]
+        ]
+        default_execution_configs = [
+            benchmark_suites.iree.module_execution_configs.get_vmvx_system_scheduling_local_task_config(
+                thread_num=4
+            )
+        ]
+        big_cores_devices = (
+            device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
+                architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
+                host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+                device_parameters={"big-cores"},
+            )
+        )
+        run_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=gen_configs,
+            module_execution_configs=default_execution_configs,
+            device_specs=big_cores_devices,
+        )
 
-    return (gen_configs, run_configs)
+        return (gen_configs, run_configs)
diff --git a/build_tools/python/benchmark_suites/iree/vulkan_nvidia_benchmarks.py b/build_tools/python/benchmark_suites/iree/vulkan_nvidia_benchmarks.py
index 1bf0794..80fa8be 100644
--- a/build_tools/python/benchmark_suites/iree/vulkan_nvidia_benchmarks.py
+++ b/build_tools/python/benchmark_suites/iree/vulkan_nvidia_benchmarks.py
@@ -15,88 +15,107 @@
 
 
 def _get_compile_flag():
-  preprocess_passes = [
-      "iree-flow-detach-elementwise-from-named-ops",
-      "iree-preprocessing-convert-conv2d-to-img2col",
-      "iree-flow-convert-1x1-filter-conv2d-to-matmul",
-      "iree-preprocessing-pad-linalg-ops{pad-size=32}",
-  ]
-  preprocess_flag_template = \
-    "--iree-preprocessing-pass-pipeline=builtin.module(func.func({}))"
-  return [
-      "--iree-stream-resource-index-bits=64", "--iree-vm-target-index-bits=64",
-      preprocess_flag_template.format(",".join(preprocess_passes))
-  ]
+    preprocess_passes = [
+        "iree-flow-detach-elementwise-from-named-ops",
+        "iree-preprocessing-convert-conv2d-to-img2col",
+        "iree-flow-convert-1x1-filter-conv2d-to-matmul",
+        "iree-preprocessing-pad-linalg-ops{pad-size=32}",
+    ]
+    preprocess_flag_template = (
+        "--iree-preprocessing-pass-pipeline=builtin.module(func.func({}))"
+    )
+    return [
+        "--iree-stream-resource-index-bits=64",
+        "--iree-vm-target-index-bits=64",
+        preprocess_flag_template.format(",".join(preprocess_passes)),
+    ]
 
 
 class Linux_Vulkan_NVIDIA_Benchmarks(object):
-  """Benchmarks on Linux Vulkan NVIDIA devices."""
+    """Benchmarks on Linux Vulkan NVIDIA devices."""
 
-  AMPERE_TARGET = iree_definitions.CompileTarget(
-      target_architecture=common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
-      target_backend=iree_definitions.TargetBackend.VULKAN_SPIRV,
-      target_abi=iree_definitions.TargetABI.VULKAN_LINUX)
-  PASCAL_TARGET = iree_definitions.CompileTarget(
-      target_architecture=common_definitions.DeviceArchitecture.NVIDIA_PASCAL,
-      target_backend=iree_definitions.TargetBackend.VULKAN_SPIRV,
-      target_abi=iree_definitions.TargetABI.VULKAN_LINUX)
+    AMPERE_TARGET = iree_definitions.CompileTarget(
+        target_architecture=common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
+        target_backend=iree_definitions.TargetBackend.VULKAN_SPIRV,
+        target_abi=iree_definitions.TargetABI.VULKAN_LINUX,
+    )
+    PASCAL_TARGET = iree_definitions.CompileTarget(
+        target_architecture=common_definitions.DeviceArchitecture.NVIDIA_PASCAL,
+        target_backend=iree_definitions.TargetBackend.VULKAN_SPIRV,
+        target_abi=iree_definitions.TargetABI.VULKAN_LINUX,
+    )
 
-  SIMT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_LINUX_VULKAN_SD_SIMT,
-      tags=["experimental-flags", "simt"],
-      compile_targets=[PASCAL_TARGET],
-      extra_flags=_get_compile_flag())
-  TENSORCORE_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_LINUX_VULKAN_SD_TENSORCORE,
-      tags=["experimental-flags", "tensorcore"],
-      compile_targets=[AMPERE_TARGET],
-      extra_flags=_get_compile_flag())
+    SIMT_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_LINUX_VULKAN_SD_SIMT,
+        tags=["experimental-flags", "simt"],
+        compile_targets=[PASCAL_TARGET],
+        extra_flags=_get_compile_flag(),
+    )
+    TENSORCORE_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_LINUX_VULKAN_SD_TENSORCORE,
+        tags=["experimental-flags", "tensorcore"],
+        compile_targets=[AMPERE_TARGET],
+        extra_flags=_get_compile_flag(),
+    )
 
-  def _generate_configs(
-      self,
-      models: Sequence[common_definitions.Model],
-      compile_config: iree_definitions.CompileConfig,
-      execution_config: iree_definitions.
-      ModuleExecutionConfig = module_execution_configs.VULKAN_CONFIG,
-      tags: Sequence[str] = [],
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    gen_configs = [
-        iree_definitions.ModuleGenerationConfig.build(
-            compile_config=compile_config,
-            imported_model=iree_definitions.ImportedModel.from_model(model),
-            tags=tags) for model in models
-    ]
-    # We use the same NVIDIA Ampere GPU for benchmarking code generated for
-    # both Pascal and Ampere architectures. What we care is not exactly these
-    # two architectures per se; they represent SIMT and tensorcore CodeGen
-    # paths that we would want both to work. Ampere is able to run both SIMT
-    # and tensorcore cases.
-    ampere_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64)
-    run_module_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-        module_generation_configs=gen_configs,
-        module_execution_configs=[execution_config],
-        device_specs=ampere_devices,
-        tags=tags)
+    def _generate_configs(
+        self,
+        models: Sequence[common_definitions.Model],
+        compile_config: iree_definitions.CompileConfig,
+        execution_config: iree_definitions.ModuleExecutionConfig = module_execution_configs.VULKAN_CONFIG,
+        tags: Sequence[str] = [],
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        gen_configs = [
+            iree_definitions.ModuleGenerationConfig.build(
+                compile_config=compile_config,
+                imported_model=iree_definitions.ImportedModel.from_model(model),
+                tags=tags,
+            )
+            for model in models
+        ]
+        # We use the same NVIDIA Ampere GPU for benchmarking code generated for
+        # both Pascal and Ampere architectures. What we care is not exactly these
+        # two architectures per se; they represent SIMT and tensorcore CodeGen
+        # paths that we would want both to work. Ampere is able to run both SIMT
+        # and tensorcore cases.
+        ampere_devices = (
+            device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
+                architecture=common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
+                host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+            )
+        )
+        run_module_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+            module_generation_configs=gen_configs,
+            module_execution_configs=[execution_config],
+            device_specs=ampere_devices,
+            tags=tags,
+        )
 
-    return (gen_configs, run_module_configs)
+        return (gen_configs, run_module_configs)
 
-  def generate(
-      self
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    """Generates IREE compile and run configs."""
-    # The `vulkan-nvidia`` tag is required to put them into the Vulkan NVIDIA
-    # benchmark preset.
-    tensorcore_gen_configs, tensorcore_run_configs = self._generate_configs(
-        model_groups.VULKAN_MODELS,
-        self.TENSORCORE_COMPILE_CONFIG,
-        tags=[benchmark_tags.VULKAN_NVIDIA])
-    simt_gen_configs, simt_run_configs = self._generate_configs(
-        model_groups.VULKAN_MODELS,
-        self.SIMT_COMPILE_CONFIG,
-        tags=[benchmark_tags.VULKAN_NVIDIA])
-    return (tensorcore_gen_configs + simt_gen_configs,
-            tensorcore_run_configs + simt_run_configs)
+    def generate(
+        self,
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        """Generates IREE compile and run configs."""
+        # The `vulkan-nvidia`` tag is required to put them into the Vulkan NVIDIA
+        # benchmark preset.
+        tensorcore_gen_configs, tensorcore_run_configs = self._generate_configs(
+            model_groups.VULKAN_MODELS,
+            self.TENSORCORE_COMPILE_CONFIG,
+            tags=[benchmark_tags.VULKAN_NVIDIA],
+        )
+        simt_gen_configs, simt_run_configs = self._generate_configs(
+            model_groups.VULKAN_MODELS,
+            self.SIMT_COMPILE_CONFIG,
+            tags=[benchmark_tags.VULKAN_NVIDIA],
+        )
+        return (
+            tensorcore_gen_configs + simt_gen_configs,
+            tensorcore_run_configs + simt_run_configs,
+        )
diff --git a/build_tools/python/benchmark_suites/iree/x86_64_benchmarks.py b/build_tools/python/benchmark_suites/iree/x86_64_benchmarks.py
index 8bc7ebf..96be90e 100644
--- a/build_tools/python/benchmark_suites/iree/x86_64_benchmarks.py
+++ b/build_tools/python/benchmark_suites/iree/x86_64_benchmarks.py
@@ -16,101 +16,122 @@
 
 
 class Linux_x86_64_Benchmarks(object):
-  """Benchmarks on x86_64 linux devices."""
+    """Benchmarks on x86_64 linux devices."""
 
-  CASCADELAKE_CPU_TARGET = iree_definitions.CompileTarget(
-      target_architecture=common_definitions.DeviceArchitecture.
-      X86_64_CASCADELAKE,
-      target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-      target_abi=iree_definitions.TargetABI.LINUX_GNU)
+    CASCADELAKE_CPU_TARGET = iree_definitions.CompileTarget(
+        target_architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+        target_abi=iree_definitions.TargetABI.LINUX_GNU,
+    )
 
-  CASCADELAKE_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_LINUX_CASCADELAKE,
-      tags=["default-flags"],
-      compile_targets=[CASCADELAKE_CPU_TARGET])
-  CASCADELAKE_FUSE_PADDING_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
-      id=unique_ids.IREE_COMPILE_CONFIG_LINUX_CASCADELAKE_FUSE_PADDING,
-      tags=["experimental-flags", "fuse-padding"],
-      compile_targets=[CASCADELAKE_CPU_TARGET],
-      extra_flags=[
-          "--iree-flow-enable-fuse-padding-into-linalg-consumer-ops",
-          "--iree-llvmcpu-enable-pad-consumer-fusion"
-      ])
+    CASCADELAKE_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_LINUX_CASCADELAKE,
+        tags=["default-flags"],
+        compile_targets=[CASCADELAKE_CPU_TARGET],
+    )
+    CASCADELAKE_FUSE_PADDING_COMPILE_CONFIG = iree_definitions.CompileConfig.build(
+        id=unique_ids.IREE_COMPILE_CONFIG_LINUX_CASCADELAKE_FUSE_PADDING,
+        tags=["experimental-flags", "fuse-padding"],
+        compile_targets=[CASCADELAKE_CPU_TARGET],
+        extra_flags=[
+            "--iree-flow-enable-fuse-padding-into-linalg-consumer-ops",
+            "--iree-llvmcpu-enable-pad-consumer-fusion",
+        ],
+    )
 
-  def _generate(
-      self,
-      benchmark_configs: List[common_definitions.CpuBenchmarkConfig],
-      compile_config: iree_definitions.CompileConfig,
-      device_specs: List[common_definitions.DeviceSpec],
-      tags: Sequence[str] = [],
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    gen_configs_all = []
-    run_configs_all = []
+    def _generate(
+        self,
+        benchmark_configs: List[common_definitions.CpuBenchmarkConfig],
+        compile_config: iree_definitions.CompileConfig,
+        device_specs: List[common_definitions.DeviceSpec],
+        tags: Sequence[str] = [],
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        gen_configs_all = []
+        run_configs_all = []
 
-    # We avoid the full combinatorial explosion of testing all models with all
-    # thread counts and instead test each model with a number of threads
-    # appropriate for its size and configurations we're interested in.
-    for config in benchmark_configs:
-      gen_config = iree_definitions.ModuleGenerationConfig.build(
-          compile_config=compile_config,
-          imported_model=iree_definitions.ImportedModel.from_model(
-              config.model),
-          tags=tags)
+        # We avoid the full combinatorial explosion of testing all models with all
+        # thread counts and instead test each model with a number of threads
+        # appropriate for its size and configurations we're interested in.
+        for config in benchmark_configs:
+            gen_config = iree_definitions.ModuleGenerationConfig.build(
+                compile_config=compile_config,
+                imported_model=iree_definitions.ImportedModel.from_model(config.model),
+                tags=tags,
+            )
 
-      execution_configs = []
-      for thread in config.threads:
-        if thread == 0:
-          execution_configs.append(
-              module_execution_configs.ELF_LOCAL_SYNC_CONFIG)
-        else:
-          execution_configs.append(
-              module_execution_configs.get_elf_local_task_config(thread))
+            execution_configs = []
+            for thread in config.threads:
+                if thread == 0:
+                    execution_configs.append(
+                        module_execution_configs.ELF_LOCAL_SYNC_CONFIG
+                    )
+                else:
+                    execution_configs.append(
+                        module_execution_configs.get_elf_local_task_config(thread)
+                    )
 
-      run_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
-          module_generation_configs=[gen_config],
-          module_execution_configs=execution_configs,
-          device_specs=device_specs,
-          tags=tags)
+            run_configs = benchmark_suites.iree.utils.generate_e2e_model_run_configs(
+                module_generation_configs=[gen_config],
+                module_execution_configs=execution_configs,
+                device_specs=device_specs,
+                tags=tags,
+            )
 
-      gen_configs_all.append(gen_config)
-      run_configs_all.extend(run_configs)
+            gen_configs_all.append(gen_config)
+            run_configs_all.extend(run_configs)
 
-    return (gen_configs_all, run_configs_all)
+        return (gen_configs_all, run_configs_all)
 
-  def generate(
-      self
-  ) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-             List[iree_definitions.E2EModelRunConfig]]:
-    """Generates IREE compile and run configs."""
+    def generate(
+        self,
+    ) -> Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]:
+        """Generates IREE compile and run configs."""
 
-    cascadelake_devices = device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64)
+        cascadelake_devices = (
+            device_collections.DEFAULT_DEVICE_COLLECTION.query_device_specs(
+                architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+                host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+            )
+        )
 
-    # The X86_64 tag is required to put them into the X86_64 benchmark preset.
-    default_gen_configs, default_run_configs = self._generate(
-        model_groups.X86_64_BENCHMARK_CONFIG,
-        self.CASCADELAKE_COMPILE_CONFIG,
-        cascadelake_devices,
-        tags=[benchmark_tags.X86_64])
-    experimental_gen_configs, experimental_run_configs = self._generate(
-        model_groups.X86_64_BENCHMARK_CONFIG_EXPERIMENTAL,
-        self.CASCADELAKE_FUSE_PADDING_COMPILE_CONFIG,
-        cascadelake_devices,
-        tags=[benchmark_tags.X86_64])
+        # The X86_64 tag is required to put them into the X86_64 benchmark preset.
+        default_gen_configs, default_run_configs = self._generate(
+            model_groups.X86_64_BENCHMARK_CONFIG,
+            self.CASCADELAKE_COMPILE_CONFIG,
+            cascadelake_devices,
+            tags=[benchmark_tags.X86_64],
+        )
+        experimental_gen_configs, experimental_run_configs = self._generate(
+            model_groups.X86_64_BENCHMARK_CONFIG_EXPERIMENTAL,
+            self.CASCADELAKE_FUSE_PADDING_COMPILE_CONFIG,
+            cascadelake_devices,
+            tags=[benchmark_tags.X86_64],
+        )
 
-    large_gen_configs, large_run_configs = self._generate(
-        model_groups.X86_64_BENCHMARK_CONFIG_LONG,
-        self.CASCADELAKE_COMPILE_CONFIG,
-        cascadelake_devices,
-        tags=[benchmark_tags.X86_64, benchmark_tags.LARGE])
+        large_gen_configs, large_run_configs = self._generate(
+            model_groups.X86_64_BENCHMARK_CONFIG_LONG,
+            self.CASCADELAKE_COMPILE_CONFIG,
+            cascadelake_devices,
+            tags=[benchmark_tags.X86_64, benchmark_tags.LARGE],
+        )
 
-    return (default_gen_configs + experimental_gen_configs + large_gen_configs,
-            default_run_configs + experimental_run_configs + large_run_configs)
+        return (
+            default_gen_configs + experimental_gen_configs + large_gen_configs,
+            default_run_configs + experimental_run_configs + large_run_configs,
+        )
 
 
-def generate() -> Tuple[List[iree_definitions.ModuleGenerationConfig],
-                        List[iree_definitions.E2EModelRunConfig]]:
-  """Generates all compile and run configs for IREE benchmarks."""
-  return Linux_x86_64_Benchmarks().generate()
+def generate() -> (
+    Tuple[
+        List[iree_definitions.ModuleGenerationConfig],
+        List[iree_definitions.E2EModelRunConfig],
+    ]
+):
+    """Generates all compile and run configs for IREE benchmarks."""
+    return Linux_x86_64_Benchmarks().generate()
diff --git a/build_tools/python/cmake_builder/rules.py b/build_tools/python/cmake_builder/rules.py
index ece750d..33d1524 100644
--- a/build_tools/python/cmake_builder/rules.py
+++ b/build_tools/python/cmake_builder/rules.py
@@ -36,134 +36,157 @@
 
 
 def _get_string_list(values: Sequence[str], quote: bool = True) -> List[str]:
-  if quote:
-    return [f'"{value}"' for value in values]
-  return list(values)
+    if quote:
+        return [f'"{value}"' for value in values]
+    return list(values)
 
 
 def _get_block_body(body: List[str]) -> List[str]:
-  return [INDENT_SPACES + line for line in body]
+    return [INDENT_SPACES + line for line in body]
 
 
-def _get_string_arg_block(keyword: str,
-                          value: Optional[str],
-                          quote: bool = True) -> List[str]:
-  if value is None:
-    return []
-  if quote:
-    value = f'"{value}"'
-  return [f"{keyword} {value}"]
+def _get_string_arg_block(
+    keyword: str, value: Optional[str], quote: bool = True
+) -> List[str]:
+    if value is None:
+        return []
+    if quote:
+        value = f'"{value}"'
+    return [f"{keyword} {value}"]
 
 
-def _get_string_list_arg_block(keyword: str,
-                               values: Sequence[str],
-                               quote: bool = True) -> List[str]:
-  if len(values) == 0:
-    return []
-  body = _get_string_list(values, quote)
-  return [keyword] + _get_block_body(body)
+def _get_string_list_arg_block(
+    keyword: str, values: Sequence[str], quote: bool = True
+) -> List[str]:
+    if len(values) == 0:
+        return []
+    body = _get_string_list(values, quote)
+    return [keyword] + _get_block_body(body)
 
 
 def _get_option_arg_block(keyword: str, value: Optional[bool]) -> List[str]:
-  if value is True:
-    return [keyword]
-  return []
+    if value is True:
+        return [keyword]
+    return []
 
 
-def _build_call_rule(rule_name: str,
-                     parameter_blocks: Sequence[List[str]]) -> List[str]:
-  output = [f"{rule_name}("]
-  for block in parameter_blocks:
-    if len(block) == 0:
-      continue
-    output.extend(_get_block_body(block))
-  output.append(")")
-  return output
+def _build_call_rule(
+    rule_name: str, parameter_blocks: Sequence[List[str]]
+) -> List[str]:
+    output = [f"{rule_name}("]
+    for block in parameter_blocks:
+        if len(block) == 0:
+            continue
+        output.extend(_get_block_body(block))
+    output.append(")")
+    return output
 
 
 def _convert_block_to_string(block: List[str]) -> str:
-  # Hack to append the terminating newline and only copies the list instead of
-  # the whole string.
-  return "\n".join(block + [""])
+    # Hack to append the terminating newline and only copies the list instead of
+    # the whole string.
+    return "\n".join(block + [""])
 
 
-def build_iree_bytecode_module(target_name: str,
-                               src: str,
-                               module_name: str,
-                               flags: List[str] = [],
-                               compile_tool_target: Optional[str] = None,
-                               c_identifier: Optional[str] = None,
-                               static_lib_path: Optional[str] = None,
-                               deps: List[str] = [],
-                               friendly_name: Optional[str] = None,
-                               testonly: bool = False,
-                               public: bool = True) -> str:
-  name_block = _get_string_arg_block("NAME", target_name)
-  src_block = _get_string_arg_block("SRC", src)
-  module_name_block = _get_string_arg_block("MODULE_FILE_NAME", module_name)
-  c_identifier_block = _get_string_arg_block("C_IDENTIFIER", c_identifier)
-  static_lib_block = _get_string_arg_block("STATIC_LIB_PATH", static_lib_path)
-  compile_tool_target_block = _get_string_arg_block("COMPILE_TOOL",
-                                                    compile_tool_target)
-  flags_block = _get_string_list_arg_block("FLAGS", flags)
-  deps_block = _get_string_list_arg_block("DEPS", deps)
-  friendly_name_block = _get_string_arg_block("FRIENDLY_NAME", friendly_name)
-  testonly_block = _get_option_arg_block("TESTONLY", testonly)
-  public_block = _get_option_arg_block("PUBLIC", public)
-  return _convert_block_to_string(
-      _build_call_rule(rule_name="iree_bytecode_module",
-                       parameter_blocks=[
-                           name_block, src_block, module_name_block,
-                           c_identifier_block, compile_tool_target_block,
-                           static_lib_block, flags_block, friendly_name_block,
-                           deps_block, testonly_block, public_block
-                       ]))
+def build_iree_bytecode_module(
+    target_name: str,
+    src: str,
+    module_name: str,
+    flags: List[str] = [],
+    compile_tool_target: Optional[str] = None,
+    c_identifier: Optional[str] = None,
+    static_lib_path: Optional[str] = None,
+    deps: List[str] = [],
+    friendly_name: Optional[str] = None,
+    testonly: bool = False,
+    public: bool = True,
+) -> str:
+    name_block = _get_string_arg_block("NAME", target_name)
+    src_block = _get_string_arg_block("SRC", src)
+    module_name_block = _get_string_arg_block("MODULE_FILE_NAME", module_name)
+    c_identifier_block = _get_string_arg_block("C_IDENTIFIER", c_identifier)
+    static_lib_block = _get_string_arg_block("STATIC_LIB_PATH", static_lib_path)
+    compile_tool_target_block = _get_string_arg_block(
+        "COMPILE_TOOL", compile_tool_target
+    )
+    flags_block = _get_string_list_arg_block("FLAGS", flags)
+    deps_block = _get_string_list_arg_block("DEPS", deps)
+    friendly_name_block = _get_string_arg_block("FRIENDLY_NAME", friendly_name)
+    testonly_block = _get_option_arg_block("TESTONLY", testonly)
+    public_block = _get_option_arg_block("PUBLIC", public)
+    return _convert_block_to_string(
+        _build_call_rule(
+            rule_name="iree_bytecode_module",
+            parameter_blocks=[
+                name_block,
+                src_block,
+                module_name_block,
+                c_identifier_block,
+                compile_tool_target_block,
+                static_lib_block,
+                flags_block,
+                friendly_name_block,
+                deps_block,
+                testonly_block,
+                public_block,
+            ],
+        )
+    )
 
 
-def build_iree_fetch_artifact(target_name: str, source_url: str, output: str,
-                              unpack: bool) -> str:
-  name_block = _get_string_arg_block("NAME", target_name)
-  source_url_block = _get_string_arg_block("SOURCE_URL", source_url)
-  output_block = _get_string_arg_block("OUTPUT", output)
-  unpack_block = _get_option_arg_block("UNPACK", unpack)
-  return _convert_block_to_string(
-      _build_call_rule(rule_name="iree_fetch_artifact",
-                       parameter_blocks=[
-                           name_block, source_url_block, output_block,
-                           unpack_block
-                       ]))
+def build_iree_fetch_artifact(
+    target_name: str, source_url: str, output: str, unpack: bool
+) -> str:
+    name_block = _get_string_arg_block("NAME", target_name)
+    source_url_block = _get_string_arg_block("SOURCE_URL", source_url)
+    output_block = _get_string_arg_block("OUTPUT", output)
+    unpack_block = _get_option_arg_block("UNPACK", unpack)
+    return _convert_block_to_string(
+        _build_call_rule(
+            rule_name="iree_fetch_artifact",
+            parameter_blocks=[name_block, source_url_block, output_block, unpack_block],
+        )
+    )
 
 
-def build_iree_import_tf_model(target_path: str, source: str,
-                               import_flags: List[str],
-                               output_mlir_file: str) -> str:
-  target_name_block = _get_string_arg_block("TARGET_NAME", target_path)
-  source_block = _get_string_arg_block("SOURCE", source)
-  import_flags_block = _get_string_list_arg_block("IMPORT_FLAGS", import_flags)
-  output_mlir_file_block = _get_string_arg_block("OUTPUT_MLIR_FILE",
-                                                 output_mlir_file)
-  return _convert_block_to_string(
-      _build_call_rule(rule_name="iree_import_tf_model",
-                       parameter_blocks=[
-                           target_name_block, source_block, import_flags_block,
-                           output_mlir_file_block
-                       ]))
+def build_iree_import_tf_model(
+    target_path: str, source: str, import_flags: List[str], output_mlir_file: str
+) -> str:
+    target_name_block = _get_string_arg_block("TARGET_NAME", target_path)
+    source_block = _get_string_arg_block("SOURCE", source)
+    import_flags_block = _get_string_list_arg_block("IMPORT_FLAGS", import_flags)
+    output_mlir_file_block = _get_string_arg_block("OUTPUT_MLIR_FILE", output_mlir_file)
+    return _convert_block_to_string(
+        _build_call_rule(
+            rule_name="iree_import_tf_model",
+            parameter_blocks=[
+                target_name_block,
+                source_block,
+                import_flags_block,
+                output_mlir_file_block,
+            ],
+        )
+    )
 
 
-def build_iree_import_tflite_model(target_path: str, source: str,
-                                   import_flags: List[str],
-                                   output_mlir_file: str) -> str:
-  target_name_block = _get_string_arg_block("TARGET_NAME", target_path)
-  source_block = _get_string_arg_block("SOURCE", source)
-  import_flags_block = _get_string_list_arg_block("IMPORT_FLAGS", import_flags)
-  output_mlir_file_block = _get_string_arg_block("OUTPUT_MLIR_FILE",
-                                                 output_mlir_file)
-  return _convert_block_to_string(
-      _build_call_rule(rule_name="iree_import_tflite_model",
-                       parameter_blocks=[
-                           target_name_block, source_block, import_flags_block,
-                           output_mlir_file_block
-                       ]))
+def build_iree_import_tflite_model(
+    target_path: str, source: str, import_flags: List[str], output_mlir_file: str
+) -> str:
+    target_name_block = _get_string_arg_block("TARGET_NAME", target_path)
+    source_block = _get_string_arg_block("SOURCE", source)
+    import_flags_block = _get_string_list_arg_block("IMPORT_FLAGS", import_flags)
+    output_mlir_file_block = _get_string_arg_block("OUTPUT_MLIR_FILE", output_mlir_file)
+    return _convert_block_to_string(
+        _build_call_rule(
+            rule_name="iree_import_tflite_model",
+            parameter_blocks=[
+                target_name_block,
+                source_block,
+                import_flags_block,
+                output_mlir_file_block,
+            ],
+        )
+    )
 
 
 def build_iree_benchmark_suite_module_test(
@@ -174,38 +197,50 @@
     runner_args: Sequence[str],
     timeout_secs: Optional[int] = None,
     labels: Sequence[str] = [],
-    xfail_platforms: Sequence[str] = []) -> str:
-  name_block = _get_string_arg_block("NAME", target_name)
-  driver_block = _get_string_arg_block("DRIVER", driver)
-  expected_output_block = _get_string_arg_block("EXPECTED_OUTPUT",
-                                                expected_output)
-  modules_block = _get_string_list_arg_block(
-      "MODULES",
-      [f"{platform}={path}" for platform, path in platform_module_map.items()])
-  timeout_block = _get_string_arg_block(
-      "TIMEOUT",
-      str(timeout_secs) if timeout_secs is not None else None)
-  runner_args_block = _get_string_list_arg_block("RUNNER_ARGS", runner_args)
-  labels_block = _get_string_list_arg_block("LABELS", labels)
-  xfail_platforms_block = _get_string_list_arg_block("XFAIL_PLATFORMS",
-                                                     xfail_platforms)
-  return _convert_block_to_string(
-      _build_call_rule(rule_name="iree_benchmark_suite_module_test",
-                       parameter_blocks=[
-                           name_block, driver_block, expected_output_block,
-                           timeout_block, modules_block, runner_args_block,
-                           labels_block, xfail_platforms_block
-                       ]))
+    xfail_platforms: Sequence[str] = [],
+) -> str:
+    name_block = _get_string_arg_block("NAME", target_name)
+    driver_block = _get_string_arg_block("DRIVER", driver)
+    expected_output_block = _get_string_arg_block("EXPECTED_OUTPUT", expected_output)
+    modules_block = _get_string_list_arg_block(
+        "MODULES",
+        [f"{platform}={path}" for platform, path in platform_module_map.items()],
+    )
+    timeout_block = _get_string_arg_block(
+        "TIMEOUT", str(timeout_secs) if timeout_secs is not None else None
+    )
+    runner_args_block = _get_string_list_arg_block("RUNNER_ARGS", runner_args)
+    labels_block = _get_string_list_arg_block("LABELS", labels)
+    xfail_platforms_block = _get_string_list_arg_block(
+        "XFAIL_PLATFORMS", xfail_platforms
+    )
+    return _convert_block_to_string(
+        _build_call_rule(
+            rule_name="iree_benchmark_suite_module_test",
+            parameter_blocks=[
+                name_block,
+                driver_block,
+                expected_output_block,
+                timeout_block,
+                modules_block,
+                runner_args_block,
+                labels_block,
+                xfail_platforms_block,
+            ],
+        )
+    )
 
 
 def build_add_dependencies(target: str, deps: List[str]) -> str:
-  if len(deps) == 0:
-    raise ValueError("Target dependencies can't be empty.")
-  deps_list = _get_string_list(deps, quote=False)
-  return _convert_block_to_string([f"add_dependencies({target}"] +
-                                  _get_block_body(deps_list) + [")"])
+    if len(deps) == 0:
+        raise ValueError("Target dependencies can't be empty.")
+    deps_list = _get_string_list(deps, quote=False)
+    return _convert_block_to_string(
+        [f"add_dependencies({target}"] + _get_block_body(deps_list) + [")"]
+    )
 
 
 def build_set(variable_name: str, value: str) -> str:
-  return _convert_block_to_string([f"set({variable_name}"] +
-                                  _get_block_body([value]) + [")"])
+    return _convert_block_to_string(
+        [f"set({variable_name}"] + _get_block_body([value]) + [")"]
+    )
diff --git a/build_tools/python/cmake_builder/rules_test.py b/build_tools/python/cmake_builder/rules_test.py
index be3f25f..ad44e7e 100644
--- a/build_tools/python/cmake_builder/rules_test.py
+++ b/build_tools/python/cmake_builder/rules_test.py
@@ -11,23 +11,24 @@
 
 
 class RulesTest(unittest.TestCase):
+    def test_build_iree_bytecode_module(self):
+        rule = cmake_builder.rules.build_iree_bytecode_module(
+            target_name="abcd",
+            src="abcd.mlir",
+            module_name="abcd.vmfb",
+            flags=["--backend=cpu", "--opt=3"],
+            compile_tool_target="iree_iree-compile2",
+            c_identifier="abcd.c",
+            static_lib_path="libx.a",
+            deps=["iree_libx", "iree_liby"],
+            testonly=True,
+            public=False,
+        )
 
-  def test_build_iree_bytecode_module(self):
-    rule = cmake_builder.rules.build_iree_bytecode_module(
-        target_name="abcd",
-        src="abcd.mlir",
-        module_name="abcd.vmfb",
-        flags=["--backend=cpu", "--opt=3"],
-        compile_tool_target="iree_iree-compile2",
-        c_identifier="abcd.c",
-        static_lib_path="libx.a",
-        deps=["iree_libx", "iree_liby"],
-        testonly=True,
-        public=False)
-
-    self.assertEqual(
-        rule,
-        textwrap.dedent("""\
+        self.assertEqual(
+            rule,
+            textwrap.dedent(
+                """\
         iree_bytecode_module(
           NAME "abcd"
           SRC "abcd.mlir"
@@ -43,18 +44,22 @@
             "iree_liby"
           TESTONLY
         )
-        """))
+        """
+            ),
+        )
 
-  def test_build_iree_bytecode_module_with_defaults(self):
-    rule = cmake_builder.rules.build_iree_bytecode_module(
-        target_name="abcd",
-        src="abcd.mlir",
-        module_name="abcd.vmfb",
-        flags=["--backend=cpu", "--opt=3"])
+    def test_build_iree_bytecode_module_with_defaults(self):
+        rule = cmake_builder.rules.build_iree_bytecode_module(
+            target_name="abcd",
+            src="abcd.mlir",
+            module_name="abcd.vmfb",
+            flags=["--backend=cpu", "--opt=3"],
+        )
 
-    self.assertEqual(
-        rule,
-        textwrap.dedent("""\
+        self.assertEqual(
+            rule,
+            textwrap.dedent(
+                """\
         iree_bytecode_module(
           NAME "abcd"
           SRC "abcd.mlir"
@@ -64,39 +69,47 @@
             "--opt=3"
           PUBLIC
         )
-        """))
+        """
+            ),
+        )
 
-  def test_build_iree_fetch_artifact(self):
-    rule = cmake_builder.rules.build_iree_fetch_artifact(
-        target_name="abcd",
-        source_url="https://example.com/abcd.tflite",
-        output="./abcd.tflite",
-        unpack=True)
+    def test_build_iree_fetch_artifact(self):
+        rule = cmake_builder.rules.build_iree_fetch_artifact(
+            target_name="abcd",
+            source_url="https://example.com/abcd.tflite",
+            output="./abcd.tflite",
+            unpack=True,
+        )
 
-    self.assertEqual(
-        rule,
-        textwrap.dedent("""\
+        self.assertEqual(
+            rule,
+            textwrap.dedent(
+                """\
         iree_fetch_artifact(
           NAME "abcd"
           SOURCE_URL "https://example.com/abcd.tflite"
           OUTPUT "./abcd.tflite"
           UNPACK
         )
-        """))
+        """
+            ),
+        )
 
-  def test_build_iree_import_tf_model(self):
-    rule = cmake_builder.rules.build_iree_import_tf_model(
-        target_path="pkg_abcd",
-        source="abcd/model",
-        import_flags=[
-            "--tf-savedmodel-exported-names=main",
-            "--tf-import-type=savedmodel_v1"
-        ],
-        output_mlir_file="abcd.mlir")
+    def test_build_iree_import_tf_model(self):
+        rule = cmake_builder.rules.build_iree_import_tf_model(
+            target_path="pkg_abcd",
+            source="abcd/model",
+            import_flags=[
+                "--tf-savedmodel-exported-names=main",
+                "--tf-import-type=savedmodel_v1",
+            ],
+            output_mlir_file="abcd.mlir",
+        )
 
-    self.assertEqual(
-        rule,
-        textwrap.dedent("""\
+        self.assertEqual(
+            rule,
+            textwrap.dedent(
+                """\
         iree_import_tf_model(
           TARGET_NAME "pkg_abcd"
           SOURCE "abcd/model"
@@ -105,18 +118,22 @@
             "--tf-import-type=savedmodel_v1"
           OUTPUT_MLIR_FILE "abcd.mlir"
         )
-        """))
+        """
+            ),
+        )
 
-  def test_build_iree_import_tflite_model(self):
-    rule = cmake_builder.rules.build_iree_import_tflite_model(
-        target_path="pkg_abcd",
-        source="abcd.tflite",
-        import_flags=["--fake-flag=abcd"],
-        output_mlir_file="abcd.mlir")
+    def test_build_iree_import_tflite_model(self):
+        rule = cmake_builder.rules.build_iree_import_tflite_model(
+            target_path="pkg_abcd",
+            source="abcd.tflite",
+            import_flags=["--fake-flag=abcd"],
+            output_mlir_file="abcd.mlir",
+        )
 
-    self.assertEqual(
-        rule,
-        textwrap.dedent("""\
+        self.assertEqual(
+            rule,
+            textwrap.dedent(
+                """\
         iree_import_tflite_model(
           TARGET_NAME "pkg_abcd"
           SOURCE "abcd.tflite"
@@ -124,25 +141,26 @@
             "--fake-flag=abcd"
           OUTPUT_MLIR_FILE "abcd.mlir"
         )
-        """))
+        """
+            ),
+        )
 
-  def test_build_iree_benchmark_suite_module_test(self):
-    rule = cmake_builder.rules.build_iree_benchmark_suite_module_test(
-        target_name="model_test",
-        driver="LOCAL_TASK",
-        expected_output="xyz",
-        platform_module_map={
-            "x86_64": "a.vmfb",
-            "arm": "b.vmfb"
-        },
-        runner_args=["--x=0", "--y=1"],
-        timeout_secs=10,
-        labels=["defaults", "e2e"],
-        xfail_platforms=["arm_64-Android", "riscv_32-Linux"])
+    def test_build_iree_benchmark_suite_module_test(self):
+        rule = cmake_builder.rules.build_iree_benchmark_suite_module_test(
+            target_name="model_test",
+            driver="LOCAL_TASK",
+            expected_output="xyz",
+            platform_module_map={"x86_64": "a.vmfb", "arm": "b.vmfb"},
+            runner_args=["--x=0", "--y=1"],
+            timeout_secs=10,
+            labels=["defaults", "e2e"],
+            xfail_platforms=["arm_64-Android", "riscv_32-Linux"],
+        )
 
-    self.assertEqual(
-        rule,
-        textwrap.dedent("""\
+        self.assertEqual(
+            rule,
+            textwrap.dedent(
+                """\
         iree_benchmark_suite_module_test(
           NAME "model_test"
           DRIVER "LOCAL_TASK"
@@ -161,32 +179,41 @@
             "arm_64-Android"
             "riscv_32-Linux"
         )
-        """))
+        """
+            ),
+        )
 
-  def test_build_add_dependencies(self):
-    rule = cmake_builder.rules.build_add_dependencies(
-        target="iree_mlir_suites", deps=["pkg_abcd", "pkg_efgh"])
+    def test_build_add_dependencies(self):
+        rule = cmake_builder.rules.build_add_dependencies(
+            target="iree_mlir_suites", deps=["pkg_abcd", "pkg_efgh"]
+        )
 
-    self.assertEqual(
-        rule,
-        textwrap.dedent("""\
+        self.assertEqual(
+            rule,
+            textwrap.dedent(
+                """\
         add_dependencies(iree_mlir_suites
           pkg_abcd
           pkg_efgh
         )
-        """))
+        """
+            ),
+        )
 
-  def test_build_set(self):
-    rule = cmake_builder.rules.build_set(variable_name="_ABC", value="123")
+    def test_build_set(self):
+        rule = cmake_builder.rules.build_set(variable_name="_ABC", value="123")
 
-    self.assertEqual(
-        rule,
-        textwrap.dedent("""\
+        self.assertEqual(
+            rule,
+            textwrap.dedent(
+                """\
         set(_ABC
           123
         )
-        """))
+        """
+            ),
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_model_tests/cmake_generator.py b/build_tools/python/e2e_model_tests/cmake_generator.py
index e890f44..5b0bb1a 100644
--- a/build_tools/python/e2e_model_tests/cmake_generator.py
+++ b/build_tools/python/e2e_model_tests/cmake_generator.py
@@ -14,52 +14,62 @@
 
 
 def generate_rules(
-    module_generation_configs: List[iree_definitions.ModuleGenerationConfig]
+    module_generation_configs: List[iree_definitions.ModuleGenerationConfig],
 ) -> List[str]:
-  """Generates CMake rules for e2e model tests."""
+    """Generates CMake rules for e2e model tests."""
 
-  # ModelTestConfig uses (imported_model, compile_config (mapped from platform))
-  # to define the required module. Collect module paths indexed by the pair.
-  all_module_path_map = {}
-  for gen_config in module_generation_configs:
-    module_path = iree_artifacts.get_module_dir_path(
-        gen_config) / iree_artifacts.MODULE_FILENAME
-    all_module_path_map[(gen_config.imported_model.composite_id,
-                         gen_config.compile_config.id)] = module_path
+    # ModelTestConfig uses (imported_model, compile_config (mapped from platform))
+    # to define the required module. Collect module paths indexed by the pair.
+    all_module_path_map = {}
+    for gen_config in module_generation_configs:
+        module_path = (
+            iree_artifacts.get_module_dir_path(gen_config)
+            / iree_artifacts.MODULE_FILENAME
+        )
+        all_module_path_map[
+            (gen_config.imported_model.composite_id, gen_config.compile_config.id)
+        ] = module_path
 
-  cmake_rules = []
-  for test_config in test_definitions.TEST_CONFIGS:
-    imported_model = test_config.imported_model
-    platform_module_map = {}
-    for platform in test_definitions.CMakePlatform:
-      if platform in test_config.unsupported_platforms:
-        continue
+    cmake_rules = []
+    for test_config in test_definitions.TEST_CONFIGS:
+        imported_model = test_config.imported_model
+        platform_module_map = {}
+        for platform in test_definitions.CMakePlatform:
+            if platform in test_config.unsupported_platforms:
+                continue
 
-      compile_config = test_definitions.PLATFORM_COMPILE_CONFIG_MAP[platform]
-      module_path = all_module_path_map.get(
-          (imported_model.composite_id, compile_config.id))
-      if module_path is None:
-        raise ValueError(
-            f"Module for {test_config.name} on {platform} not found.")
-      platform_module_map[platform.value] = module_path
+            compile_config = test_definitions.PLATFORM_COMPILE_CONFIG_MAP[platform]
+            module_path = all_module_path_map.get(
+                (imported_model.composite_id, compile_config.id)
+            )
+            if module_path is None:
+                raise ValueError(
+                    f"Module for {test_config.name} on {platform} not found."
+                )
+            platform_module_map[platform.value] = module_path
 
-    # TODO(#11136): Currently the DRIVER is a separate field in the CMake rule (
-    # and has effect on test labels). Rules should be generated in another way
-    # to avoid that. Generates the flags without the driver for now.
-    runner_args = iree_definitions.generate_run_flags(
-        imported_model=imported_model,
-        input_data=test_config.input_data,
-        module_execution_config=test_config.execution_config,
-        with_driver=False) + test_config.extra_test_flags
-    cmake_rule = cmake_builder.rules.build_iree_benchmark_suite_module_test(
-        target_name=test_config.name,
-        driver=test_config.execution_config.driver.value,
-        expected_output=test_config.expected_output,
-        platform_module_map=platform_module_map,
-        runner_args=runner_args,
-        xfail_platforms=[
-            platform.value for platform in test_config.xfail_platforms
-        ])
-    cmake_rules.append(cmake_rule)
+        # TODO(#11136): Currently the DRIVER is a separate field in the CMake rule (
+        # and has effect on test labels). Rules should be generated in another way
+        # to avoid that. Generates the flags without the driver for now.
+        runner_args = (
+            iree_definitions.generate_run_flags(
+                imported_model=imported_model,
+                input_data=test_config.input_data,
+                module_execution_config=test_config.execution_config,
+                with_driver=False,
+            )
+            + test_config.extra_test_flags
+        )
+        cmake_rule = cmake_builder.rules.build_iree_benchmark_suite_module_test(
+            target_name=test_config.name,
+            driver=test_config.execution_config.driver.value,
+            expected_output=test_config.expected_output,
+            platform_module_map=platform_module_map,
+            runner_args=runner_args,
+            xfail_platforms=[
+                platform.value for platform in test_config.xfail_platforms
+            ],
+        )
+        cmake_rules.append(cmake_rule)
 
-  return cmake_rules
+    return cmake_rules
diff --git a/build_tools/python/e2e_model_tests/run_module_utils.py b/build_tools/python/e2e_model_tests/run_module_utils.py
index 3842a11..eac9ad3 100644
--- a/build_tools/python/e2e_model_tests/run_module_utils.py
+++ b/build_tools/python/e2e_model_tests/run_module_utils.py
@@ -12,16 +12,17 @@
 
 
 def build_linux_wrapper_cmds_for_device_spec(
-    device_spec: common_definitions.DeviceSpec) -> List[str]:
-  """Builds the commands with tools to create the execution environment."""
+    device_spec: common_definitions.DeviceSpec,
+) -> List[str]:
+    """Builds the commands with tools to create the execution environment."""
 
-  affinity_mask = None
-  for param in device_spec.device_parameters:
-    if param != device_parameters.ALL_CORES:
-      raise ValueError(f"Unsupported device parameter: {param}.")
+    affinity_mask = None
+    for param in device_spec.device_parameters:
+        if param != device_parameters.ALL_CORES:
+            raise ValueError(f"Unsupported device parameter: {param}.")
 
-  cmds = []
-  if affinity_mask is not None:
-    cmds += ["taskset", affinity_mask]
+    cmds = []
+    if affinity_mask is not None:
+        cmds += ["taskset", affinity_mask]
 
-  return cmds
+    return cmds
diff --git a/build_tools/python/e2e_model_tests/run_module_utils_test.py b/build_tools/python/e2e_model_tests/run_module_utils_test.py
index 2bbebf4..f86ddac 100644
--- a/build_tools/python/e2e_model_tests/run_module_utils_test.py
+++ b/build_tools/python/e2e_model_tests/run_module_utils_test.py
@@ -12,21 +12,20 @@
 
 
 class RunModuleUtilsTest(unittest.TestCase):
+    def test_build_linux_wrapper_cmds_for_device_spec(self):
+        device_spec = common_definitions.DeviceSpec.build(
+            id="abc",
+            device_name="test-device",
+            architecture=common_definitions.DeviceArchitecture.VMVX_GENERIC,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+            device_parameters=[device_parameters.ALL_CORES],
+            tags=[],
+        )
 
-  def test_build_linux_wrapper_cmds_for_device_spec(self):
-    device_spec = common_definitions.DeviceSpec.build(
-        id="abc",
-        device_name="test-device",
-        architecture=common_definitions.DeviceArchitecture.VMVX_GENERIC,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
-        device_parameters=[device_parameters.ALL_CORES],
-        tags=[])
+        flags = run_module_utils.build_linux_wrapper_cmds_for_device_spec(device_spec)
 
-    flags = run_module_utils.build_linux_wrapper_cmds_for_device_spec(
-        device_spec)
-
-    self.assertEqual(flags, [])
+        self.assertEqual(flags, [])
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_model_tests/test_definitions.py b/build_tools/python/e2e_model_tests/test_definitions.py
index 599de4f..def7dd3 100644
--- a/build_tools/python/e2e_model_tests/test_definitions.py
+++ b/build_tools/python/e2e_model_tests/test_definitions.py
@@ -12,50 +12,53 @@
 
 from e2e_test_framework.definitions import common_definitions, iree_definitions
 from e2e_test_framework.models import tflite_models
-from benchmark_suites.iree import (riscv_benchmarks, x86_64_benchmarks,
-                                   armv8_a_benchmarks, module_execution_configs)
+from benchmark_suites.iree import (
+    riscv_benchmarks,
+    x86_64_benchmarks,
+    armv8_a_benchmarks,
+    module_execution_configs,
+)
 
 
 class CMakePlatform(enum.Enum):
-  """Enum of CMake system platform string."""
-  ANDROID_ARMV8_A = "arm_64-Android"
-  LINUX_RISCV32 = "riscv_32-Linux"
-  LINUX_RISCV64 = "riscv_64-Linux"
-  LINUX_X86_64 = "x86_64-Linux"
+    """Enum of CMake system platform string."""
+
+    ANDROID_ARMV8_A = "arm_64-Android"
+    LINUX_RISCV32 = "riscv_32-Linux"
+    LINUX_RISCV64 = "riscv_64-Linux"
+    LINUX_X86_64 = "x86_64-Linux"
 
 
 # Compile config used for each CMake system platform.
 PLATFORM_COMPILE_CONFIG_MAP = {
-    CMakePlatform.ANDROID_ARMV8_A:
-        armv8_a_benchmarks.Android_ARMv8_A_Benchmarks.DEFAULT_COMPILE_CONFIG,
-    CMakePlatform.LINUX_RISCV32:
-        riscv_benchmarks.Linux_RV32_Benchmarks.DEFAULT_COMPILE_CONFIG,
-    CMakePlatform.LINUX_RISCV64:
-        riscv_benchmarks.Linux_RV64_Benchmarks.DEFAULT_COMPILE_CONFIG,
-    CMakePlatform.LINUX_X86_64:
-        x86_64_benchmarks.Linux_x86_64_Benchmarks.CASCADELAKE_COMPILE_CONFIG
+    CMakePlatform.ANDROID_ARMV8_A: armv8_a_benchmarks.Android_ARMv8_A_Benchmarks.DEFAULT_COMPILE_CONFIG,
+    CMakePlatform.LINUX_RISCV32: riscv_benchmarks.Linux_RV32_Benchmarks.DEFAULT_COMPILE_CONFIG,
+    CMakePlatform.LINUX_RISCV64: riscv_benchmarks.Linux_RV64_Benchmarks.DEFAULT_COMPILE_CONFIG,
+    CMakePlatform.LINUX_X86_64: x86_64_benchmarks.Linux_x86_64_Benchmarks.CASCADELAKE_COMPILE_CONFIG,
 }
 
 
 @dataclass(frozen=True)
 class ModelTestConfig(object):
-  """Defines an e2e model test to run by iree-run-module."""
-  # Test name shown in the test rule.
-  name: str
-  imported_model: iree_definitions.ImportedModel
-  execution_config: iree_definitions.ModuleExecutionConfig
+    """Defines an e2e model test to run by iree-run-module."""
 
-  # Either a string literal or a file path.
-  expected_output: str
-  input_data: common_definitions.ModelInputData = common_definitions.ZEROS_MODEL_INPUT_DATA
+    # Test name shown in the test rule.
+    name: str
+    imported_model: iree_definitions.ImportedModel
+    execution_config: iree_definitions.ModuleExecutionConfig
 
-  # Platforms to ignore this test.
-  unsupported_platforms: List[CMakePlatform] = dataclasses.field(
-      default_factory=list)
-  # Platforms to expect this test failed.
-  xfail_platforms: List[CMakePlatform] = dataclasses.field(default_factory=list)
-  # Extra flags for `iree-run-module`.
-  extra_test_flags: List[str] = dataclasses.field(default_factory=list)
+    # Either a string literal or a file path.
+    expected_output: str
+    input_data: common_definitions.ModelInputData = (
+        common_definitions.ZEROS_MODEL_INPUT_DATA
+    )
+
+    # Platforms to ignore this test.
+    unsupported_platforms: List[CMakePlatform] = dataclasses.field(default_factory=list)
+    # Platforms to expect this test failed.
+    xfail_platforms: List[CMakePlatform] = dataclasses.field(default_factory=list)
+    # Extra flags for `iree-run-module`.
+    extra_test_flags: List[str] = dataclasses.field(default_factory=list)
 
 
 TEST_CONFIGS = [
@@ -63,40 +66,51 @@
     ModelTestConfig(
         name="mobilenet_v1_fp32_correctness_test",
         imported_model=iree_definitions.ImportedModel.from_model(
-            tflite_models.MOBILENET_V1),
+            tflite_models.MOBILENET_V1
+        ),
         execution_config=module_execution_configs.ELF_LOCAL_SYNC_CONFIG,
         expected_output="mobilenet_v1_fp32_expected_output.txt",
         unsupported_platforms=[
-            CMakePlatform.LINUX_RISCV32, CMakePlatform.ANDROID_ARMV8_A
-        ]),
+            CMakePlatform.LINUX_RISCV32,
+            CMakePlatform.ANDROID_ARMV8_A,
+        ],
+    ),
     # efficientnet_int8_correctness_test
     ModelTestConfig(
         name="efficientnet_int8_correctness_test",
         imported_model=iree_definitions.ImportedModel.from_model(
-            tflite_models.EFFICIENTNET_INT8),
+            tflite_models.EFFICIENTNET_INT8
+        ),
         execution_config=module_execution_configs.ELF_LOCAL_SYNC_CONFIG,
         expected_output="efficientnet_int8_expected_output.txt",
         unsupported_platforms=[
-            CMakePlatform.ANDROID_ARMV8_A, CMakePlatform.LINUX_RISCV32,
-            CMakePlatform.LINUX_RISCV64
-        ]),
+            CMakePlatform.ANDROID_ARMV8_A,
+            CMakePlatform.LINUX_RISCV32,
+            CMakePlatform.LINUX_RISCV64,
+        ],
+    ),
     # deeplab_v3_fp32_correctness_test
     ModelTestConfig(
         name="deeplab_v3_fp32_correctness_test",
         imported_model=iree_definitions.ImportedModel.from_model(
-            tflite_models.DEEPLABV3_FP32),
+            tflite_models.DEEPLABV3_FP32
+        ),
         execution_config=module_execution_configs.ELF_LOCAL_SYNC_CONFIG,
         expected_output="deeplab_v3_fp32_input_0_expected_output.npy",
         extra_test_flags=["--expected_f32_threshold=0.001"],
         unsupported_platforms=[
-            CMakePlatform.LINUX_RISCV32, CMakePlatform.LINUX_RISCV64
-        ]),
+            CMakePlatform.LINUX_RISCV32,
+            CMakePlatform.LINUX_RISCV64,
+        ],
+    ),
     # person_detect_int8_correctness_test
     ModelTestConfig(
         name="person_detect_int8_correctness_test",
         imported_model=iree_definitions.ImportedModel.from_model(
-            tflite_models.PERSON_DETECT_INT8),
+            tflite_models.PERSON_DETECT_INT8
+        ),
         execution_config=module_execution_configs.ELF_LOCAL_SYNC_CONFIG,
         expected_output="1x2xi8=[72 -72]",
-        unsupported_platforms=[CMakePlatform.ANDROID_ARMV8_A])
+        unsupported_platforms=[CMakePlatform.ANDROID_ARMV8_A],
+    ),
 ]
diff --git a/build_tools/python/e2e_test_artifacts/cmake_generator/iree_rule_generator.py b/build_tools/python/e2e_test_artifacts/cmake_generator/iree_rule_generator.py
index 1610a24..3e060f1 100644
--- a/build_tools/python/e2e_test_artifacts/cmake_generator/iree_rule_generator.py
+++ b/build_tools/python/e2e_test_artifacts/cmake_generator/iree_rule_generator.py
@@ -32,181 +32,204 @@
 
 @dataclass(frozen=True)
 class IreeModelImportRule(object):
-  target_name: str
-  output_file_path: pathlib.PurePath
-  cmake_rules: List[str]
+    target_name: str
+    output_file_path: pathlib.PurePath
+    cmake_rules: List[str]
 
 
 @dataclass(frozen=True)
 class IreeModuleCompileRule(object):
-  target_name: str
-  output_module_path: pathlib.PurePath
-  cmake_rules: List[str]
+    target_name: str
+    output_module_path: pathlib.PurePath
+    cmake_rules: List[str]
 
 
 class IreeRuleBuilder(object):
-  """Builder to generate IREE CMake rules."""
+    """Builder to generate IREE CMake rules."""
 
-  _package_name: str
+    _package_name: str
 
-  def __init__(self, package_name: str):
-    self._package_name = package_name
+    def __init__(self, package_name: str):
+        self._package_name = package_name
 
-  def build_model_import_rule(
-      self, source_model_rule: model_rule_generator.ModelRule,
-      imported_model: iree_definitions.ImportedModel,
-      output_file_path: pathlib.PurePath) -> IreeModelImportRule:
+    def build_model_import_rule(
+        self,
+        source_model_rule: model_rule_generator.ModelRule,
+        imported_model: iree_definitions.ImportedModel,
+        output_file_path: pathlib.PurePath,
+    ) -> IreeModelImportRule:
+        model = imported_model.model
+        import_config = imported_model.import_config
+        if import_config.tool == iree_definitions.ImportTool.NONE:
+            if source_model_rule.file_path != output_file_path:
+                raise ValueError(
+                    f"Separate path for MLIR model isn't supported yet: "
+                    f"('{source_model_rule.file_path }' != '{output_file_path}')"
+                )
+            return IreeModelImportRule(
+                target_name=source_model_rule.target_name,
+                output_file_path=output_file_path,
+                cmake_rules=[],
+            )
 
-    model = imported_model.model
-    import_config = imported_model.import_config
-    if import_config.tool == iree_definitions.ImportTool.NONE:
-      if source_model_rule.file_path != output_file_path:
-        raise ValueError(
-            f"Separate path for MLIR model isn't supported yet: "
-            f"('{source_model_rule.file_path }' != '{output_file_path}')")
-      return IreeModelImportRule(target_name=source_model_rule.target_name,
-                                 output_file_path=output_file_path,
-                                 cmake_rules=[])
+        # Import target name: iree-imported-model-<imported_model_id>
+        target_name = f"iree-imported-model-{imported_model.composite_id}"
 
-    # Import target name: iree-imported-model-<imported_model_id>
-    target_name = f"iree-imported-model-{imported_model.composite_id}"
+        import_flags = import_config.materialize_import_flags(model)
+        if import_config.tool == iree_definitions.ImportTool.TFLITE_IMPORTER:
+            cmake_rules = [
+                cmake_builder.rules.build_iree_import_tflite_model(
+                    target_path=self.build_target_path(target_name),
+                    source=str(source_model_rule.file_path),
+                    import_flags=import_flags,
+                    output_mlir_file=str(output_file_path),
+                )
+            ]
+        elif import_config.tool == iree_definitions.ImportTool.TF_IMPORTER:
+            cmake_rules = [
+                cmake_builder.rules.build_iree_import_tf_model(
+                    target_path=self.build_target_path(target_name),
+                    source=str(source_model_rule.file_path),
+                    import_flags=import_flags,
+                    output_mlir_file=str(output_file_path),
+                )
+            ]
+        else:
+            raise ValueError(
+                f"Unsupported import tool '{import_config.tool}' of the model '{model.id}'."
+            )
 
-    import_flags = import_config.materialize_import_flags(model)
-    if import_config.tool == iree_definitions.ImportTool.TFLITE_IMPORTER:
-      cmake_rules = [
-          cmake_builder.rules.build_iree_import_tflite_model(
-              target_path=self.build_target_path(target_name),
-              source=str(source_model_rule.file_path),
-              import_flags=import_flags,
-              output_mlir_file=str(output_file_path))
-      ]
-    elif import_config.tool == iree_definitions.ImportTool.TF_IMPORTER:
-      cmake_rules = [
-          cmake_builder.rules.build_iree_import_tf_model(
-              target_path=self.build_target_path(target_name),
-              source=str(source_model_rule.file_path),
-              import_flags=import_flags,
-              output_mlir_file=str(output_file_path))
-      ]
-    else:
-      raise ValueError(
-          f"Unsupported import tool '{import_config.tool}' of the model '{model.id}'."
-      )
-
-    return IreeModelImportRule(target_name=target_name,
-                               output_file_path=output_file_path,
-                               cmake_rules=cmake_rules)
-
-  def build_module_compile_rule(
-      self, model_import_rule: IreeModelImportRule,
-      module_generation_config: iree_definitions.ModuleGenerationConfig,
-      output_file_path: pathlib.PurePath) -> IreeModuleCompileRule:
-
-    compile_flags = module_generation_config.materialize_compile_flags(
-        module_dir_path=output_file_path.parent)
-
-    # Module target name: iree-module-<gen_config_id>
-    target_name = f"iree-module-{module_generation_config.composite_id}"
-
-    cmake_rules = [
-        cmake_builder.rules.build_iree_bytecode_module(
+        return IreeModelImportRule(
             target_name=target_name,
-            src=str(model_import_rule.output_file_path),
-            module_name=str(output_file_path),
-            flags=compile_flags,
-            friendly_name=str(module_generation_config))
-    ]
+            output_file_path=output_file_path,
+            cmake_rules=cmake_rules,
+        )
 
-    # TODO(#10155): Dump the compile flags from iree_bytecode_module into a flagfile.
+    def build_module_compile_rule(
+        self,
+        model_import_rule: IreeModelImportRule,
+        module_generation_config: iree_definitions.ModuleGenerationConfig,
+        output_file_path: pathlib.PurePath,
+    ) -> IreeModuleCompileRule:
+        compile_flags = module_generation_config.materialize_compile_flags(
+            module_dir_path=output_file_path.parent
+        )
 
-    return IreeModuleCompileRule(target_name=target_name,
-                                 output_module_path=output_file_path,
-                                 cmake_rules=cmake_rules)
+        # Module target name: iree-module-<gen_config_id>
+        target_name = f"iree-module-{module_generation_config.composite_id}"
 
-  def build_target_path(self, target_name: str):
-    """Returns the full target path by combining the package name and the target
-    name.
-    """
-    return f"{self._package_name}_{target_name}"
+        cmake_rules = [
+            cmake_builder.rules.build_iree_bytecode_module(
+                target_name=target_name,
+                src=str(model_import_rule.output_file_path),
+                module_name=str(output_file_path),
+                flags=compile_flags,
+                friendly_name=str(module_generation_config),
+            )
+        ]
+
+        # TODO(#10155): Dump the compile flags from iree_bytecode_module into a flagfile.
+
+        return IreeModuleCompileRule(
+            target_name=target_name,
+            output_module_path=output_file_path,
+            cmake_rules=cmake_rules,
+        )
+
+    def build_target_path(self, target_name: str):
+        """Returns the full target path by combining the package name and the target
+        name.
+        """
+        return f"{self._package_name}_{target_name}"
 
 
 def generate_rules(
-    package_name: str, root_path: pathlib.PurePath,
-    module_generation_configs: Sequence[
-        iree_definitions.ModuleGenerationConfig],
-    model_rule_map: Dict[str, model_rule_generator.ModelRule]) -> List[str]:
-  """Generates all rules to build IREE artifacts.
+    package_name: str,
+    root_path: pathlib.PurePath,
+    module_generation_configs: Sequence[iree_definitions.ModuleGenerationConfig],
+    model_rule_map: Dict[str, model_rule_generator.ModelRule],
+) -> List[str]:
+    """Generates all rules to build IREE artifacts.
 
-  Args:
-    package_name: CMake package name for rules.
-    root_path: path of the root artifact directory.
-    module_generation_configs: list of IREE module generation configs.
-    model_rule_map: map of generated model rules keyed by model id, it must
-      cover all model referenced in module_generation_configs.
-  Returns:
-    List of cmake rules.
-  """
+    Args:
+      package_name: CMake package name for rules.
+      root_path: path of the root artifact directory.
+      module_generation_configs: list of IREE module generation configs.
+      model_rule_map: map of generated model rules keyed by model id, it must
+        cover all model referenced in module_generation_configs.
+    Returns:
+      List of cmake rules.
+    """
 
-  rule_builder = IreeRuleBuilder(package_name=package_name)
+    rule_builder = IreeRuleBuilder(package_name=package_name)
 
-  all_imported_models = dict(
-      (config.imported_model.composite_id, config.imported_model)
-      for config in module_generation_configs)
+    all_imported_models = dict(
+        (config.imported_model.composite_id, config.imported_model)
+        for config in module_generation_configs
+    )
 
-  cmake_rules = []
-  model_import_rule_map = {}
-  for imported_model_id, imported_model in all_imported_models.items():
-    model_rule = model_rule_map.get(imported_model.model.id)
-    if model_rule is None:
-      raise ValueError(f"Model rule not found for {imported_model.model.id}.")
+    cmake_rules = []
+    model_import_rule_map = {}
+    for imported_model_id, imported_model in all_imported_models.items():
+        model_rule = model_rule_map.get(imported_model.model.id)
+        if model_rule is None:
+            raise ValueError(f"Model rule not found for {imported_model.model.id}.")
 
-    imported_model_path = iree_artifacts.get_imported_model_path(
-        imported_model=imported_model, root_path=root_path)
-    model_import_rule = rule_builder.build_model_import_rule(
-        source_model_rule=model_rule,
-        imported_model=imported_model,
-        output_file_path=imported_model_path)
-    model_import_rule_map[imported_model_id] = model_import_rule
-    cmake_rules.extend(model_import_rule.cmake_rules)
+        imported_model_path = iree_artifacts.get_imported_model_path(
+            imported_model=imported_model, root_path=root_path
+        )
+        model_import_rule = rule_builder.build_model_import_rule(
+            source_model_rule=model_rule,
+            imported_model=imported_model,
+            output_file_path=imported_model_path,
+        )
+        model_import_rule_map[imported_model_id] = model_import_rule
+        cmake_rules.extend(model_import_rule.cmake_rules)
 
-  cmake_target_names = collections.defaultdict(set)
-  for gen_config in module_generation_configs:
-    model_import_rule = model_import_rule_map[
-        gen_config.imported_model.composite_id]
-    module_dir_path = iree_artifacts.get_module_dir_path(
-        module_generation_config=gen_config, root_path=root_path)
-    module_compile_rule = rule_builder.build_module_compile_rule(
-        model_import_rule=model_import_rule,
-        module_generation_config=gen_config,
-        output_file_path=module_dir_path / iree_artifacts.MODULE_FILENAME)
+    cmake_target_names = collections.defaultdict(set)
+    for gen_config in module_generation_configs:
+        model_import_rule = model_import_rule_map[
+            gen_config.imported_model.composite_id
+        ]
+        module_dir_path = iree_artifacts.get_module_dir_path(
+            module_generation_config=gen_config, root_path=root_path
+        )
+        module_compile_rule = rule_builder.build_module_compile_rule(
+            model_import_rule=model_import_rule,
+            module_generation_config=gen_config,
+            output_file_path=module_dir_path / iree_artifacts.MODULE_FILENAME,
+        )
 
-    is_compile_stats = (benchmark_tags.COMPILE_STATS
-                        in gen_config.compile_config.tags)
-    if benchmark_tags.LARGE in gen_config.tags:
-      import_target = LARGE_BENCHMARK_IMPORT_MODELS_CMAKE_TARGET
-      if is_compile_stats:
-        suite_target = LARGE_E2E_COMPILE_STATS_SUITES_CMAKE_TARGET
-      else:
-        suite_target = LARGE_BENCHMARK_SUITES_CMAKE_TARGET
-    else:
-      import_target = BENCHMARK_IMPORT_MODELS_CMAKE_TARGET
-      if is_compile_stats:
-        suite_target = E2E_COMPILE_STATS_SUITES
-      else:
-        suite_target = BENCHMARK_SUITES_CMAKE_TARGET
+        is_compile_stats = (
+            benchmark_tags.COMPILE_STATS in gen_config.compile_config.tags
+        )
+        if benchmark_tags.LARGE in gen_config.tags:
+            import_target = LARGE_BENCHMARK_IMPORT_MODELS_CMAKE_TARGET
+            if is_compile_stats:
+                suite_target = LARGE_E2E_COMPILE_STATS_SUITES_CMAKE_TARGET
+            else:
+                suite_target = LARGE_BENCHMARK_SUITES_CMAKE_TARGET
+        else:
+            import_target = BENCHMARK_IMPORT_MODELS_CMAKE_TARGET
+            if is_compile_stats:
+                suite_target = E2E_COMPILE_STATS_SUITES
+            else:
+                suite_target = BENCHMARK_SUITES_CMAKE_TARGET
 
-    cmake_target_names[import_target].add(model_import_rule.target_name)
-    cmake_target_names[suite_target].add(module_compile_rule.target_name)
-    cmake_rules.extend(module_compile_rule.cmake_rules)
+        cmake_target_names[import_target].add(model_import_rule.target_name)
+        cmake_target_names[suite_target].add(module_compile_rule.target_name)
+        cmake_rules.extend(module_compile_rule.cmake_rules)
 
-  for cmake_target, module_target_names in cmake_target_names.items():
-    module_target_names = sorted(module_target_names)
-    cmake_rules.append(
-        cmake_builder.rules.build_add_dependencies(
-            target=cmake_target,
-            deps=[
-                rule_builder.build_target_path(target_name)
-                for target_name in module_target_names
-            ]))
+    for cmake_target, module_target_names in cmake_target_names.items():
+        module_target_names = sorted(module_target_names)
+        cmake_rules.append(
+            cmake_builder.rules.build_add_dependencies(
+                target=cmake_target,
+                deps=[
+                    rule_builder.build_target_path(target_name)
+                    for target_name in module_target_names
+                ],
+            )
+        )
 
-  return cmake_rules
+    return cmake_rules
diff --git a/build_tools/python/e2e_test_artifacts/cmake_generator/iree_rule_generator_test.py b/build_tools/python/e2e_test_artifacts/cmake_generator/iree_rule_generator_test.py
index ea9d23a..44fb1e8 100644
--- a/build_tools/python/e2e_test_artifacts/cmake_generator/iree_rule_generator_test.py
+++ b/build_tools/python/e2e_test_artifacts/cmake_generator/iree_rule_generator_test.py
@@ -12,184 +12,206 @@
 
 
 class IreeRuleBuilderTest(unittest.TestCase):
+    def setUp(self):
+        self._builder = iree_rule_generator.IreeRuleBuilder(package_name="${package}")
 
-  def setUp(self):
-    self._builder = iree_rule_generator.IreeRuleBuilder(
-        package_name="${package}")
+    def test_build_model_import_rule_tflite(self):
+        tflite_model = common_definitions.Model(
+            id="1234",
+            name="tflite_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="https://example.com/xyz.tflite",
+            entry_function="main",
+            input_types=["1xf32"],
+        )
+        tflite_imported_model = iree_definitions.ImportedModel.from_model(tflite_model)
+        model_rule = model_rule_generator.ModelRule(
+            target_name="model-1234",
+            file_path=pathlib.PurePath("root/models/x.tflite"),
+            cmake_rules=["abc"],
+        )
+        output_file_path = pathlib.PurePath(
+            "root", "iree", tflite_model.id, f"{tflite_model.name}.mlir"
+        )
 
-  def test_build_model_import_rule_tflite(self):
-    tflite_model = common_definitions.Model(
-        id="1234",
-        name="tflite_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="https://example.com/xyz.tflite",
-        entry_function="main",
-        input_types=["1xf32"])
-    tflite_imported_model = iree_definitions.ImportedModel.from_model(
-        tflite_model)
-    model_rule = model_rule_generator.ModelRule(
-        target_name="model-1234",
-        file_path=pathlib.PurePath("root/models/x.tflite"),
-        cmake_rules=["abc"])
-    output_file_path = pathlib.PurePath("root", "iree", tflite_model.id,
-                                        f"{tflite_model.name}.mlir")
+        rule = self._builder.build_model_import_rule(
+            source_model_rule=model_rule,
+            imported_model=tflite_imported_model,
+            output_file_path=output_file_path,
+        )
 
-    rule = self._builder.build_model_import_rule(
-        source_model_rule=model_rule,
-        imported_model=tflite_imported_model,
-        output_file_path=output_file_path)
+        self.assertEqual(
+            rule.target_name,
+            f"iree-imported-model-{tflite_imported_model.composite_id}",
+        )
+        self.assertEqual(rule.output_file_path, output_file_path)
 
-    self.assertEqual(
-        rule.target_name,
-        f"iree-imported-model-{tflite_imported_model.composite_id}")
-    self.assertEqual(rule.output_file_path, output_file_path)
+    def test_build_model_import_rule_linalg(self):
+        linalg_model = common_definitions.Model(
+            id="9012",
+            name="linalg_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
+            source_url="https://example.com/xyz.mlir",
+            entry_function="main",
+            input_types=["3xf32"],
+        )
+        linalg_imported_model = iree_definitions.ImportedModel.from_model(linalg_model)
+        model_rule = model_rule_generator.ModelRule(
+            target_name="model-5678",
+            file_path=pathlib.PurePath("root/models/y.mlir"),
+            cmake_rules=["abc"],
+        )
 
-  def test_build_model_import_rule_linalg(self):
-    linalg_model = common_definitions.Model(
-        id="9012",
-        name="linalg_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-        source_url="https://example.com/xyz.mlir",
-        entry_function="main",
-        input_types=["3xf32"])
-    linalg_imported_model = iree_definitions.ImportedModel.from_model(
-        linalg_model)
-    model_rule = model_rule_generator.ModelRule(
-        target_name="model-5678",
-        file_path=pathlib.PurePath("root/models/y.mlir"),
-        cmake_rules=["abc"])
+        rule = self._builder.build_model_import_rule(
+            source_model_rule=model_rule,
+            imported_model=linalg_imported_model,
+            output_file_path=pathlib.PurePath(model_rule.file_path),
+        )
 
-    rule = self._builder.build_model_import_rule(
-        source_model_rule=model_rule,
-        imported_model=linalg_imported_model,
-        output_file_path=pathlib.PurePath(model_rule.file_path))
+        self.assertEqual(rule.target_name, model_rule.target_name)
+        self.assertEqual(
+            pathlib.PurePath(rule.output_file_path),
+            pathlib.PurePath(model_rule.file_path),
+        )
 
-    self.assertEqual(rule.target_name, model_rule.target_name)
-    self.assertEqual(pathlib.PurePath(rule.output_file_path),
-                     pathlib.PurePath(model_rule.file_path))
+    def test_build_module_compile_rule(self):
+        model = common_definitions.Model(
+            id="1234",
+            name="tflite_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="https://example.com/xyz.tflite",
+            entry_function="main",
+            input_types=["1xf32"],
+        )
+        imported_model = iree_definitions.ImportedModel.from_model(model)
+        compile_config = iree_definitions.CompileConfig.build(
+            id="config_a",
+            tags=["defaults"],
+            compile_targets=[
+                iree_definitions.CompileTarget(
+                    target_architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+                    target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+                    target_abi=iree_definitions.TargetABI.LINUX_GNU,
+                )
+            ],
+        )
+        gen_config = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model, compile_config=compile_config
+        )
+        model_import_rule = iree_rule_generator.IreeModelImportRule(
+            target_name=f"iree-import-model-abcd",
+            output_file_path=pathlib.PurePath("root/iree/abcd/1234.mlir"),
+            cmake_rules=["abc"],
+        )
+        output_file_path = pathlib.PurePath("root/iree/test_output")
 
-  def test_build_module_compile_rule(self):
-    model = common_definitions.Model(
-        id="1234",
-        name="tflite_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="https://example.com/xyz.tflite",
-        entry_function="main",
-        input_types=["1xf32"])
-    imported_model = iree_definitions.ImportedModel.from_model(model)
-    compile_config = iree_definitions.CompileConfig.build(
-        id="config_a",
-        tags=["defaults"],
-        compile_targets=[
-            iree_definitions.CompileTarget(
-                target_architecture=common_definitions.DeviceArchitecture.
-                X86_64_CASCADELAKE,
-                target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-                target_abi=iree_definitions.TargetABI.LINUX_GNU)
-        ])
-    gen_config = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model, compile_config=compile_config)
-    model_import_rule = iree_rule_generator.IreeModelImportRule(
-        target_name=f"iree-import-model-abcd",
-        output_file_path=pathlib.PurePath("root/iree/abcd/1234.mlir"),
-        cmake_rules=["abc"])
-    output_file_path = pathlib.PurePath("root/iree/test_output")
+        rule = self._builder.build_module_compile_rule(
+            model_import_rule=model_import_rule,
+            module_generation_config=gen_config,
+            output_file_path=output_file_path,
+        )
 
-    rule = self._builder.build_module_compile_rule(
-        model_import_rule=model_import_rule,
-        module_generation_config=gen_config,
-        output_file_path=output_file_path)
+        self.assertEqual(rule.target_name, f"iree-module-{gen_config.composite_id}")
+        self.assertEqual(rule.output_module_path, output_file_path)
 
-    self.assertEqual(rule.target_name, f"iree-module-{gen_config.composite_id}")
-    self.assertEqual(rule.output_module_path, output_file_path)
+    def test_build_target_path(self):
+        builder = iree_rule_generator.IreeRuleBuilder(package_name="xyz")
 
-  def test_build_target_path(self):
-    builder = iree_rule_generator.IreeRuleBuilder(package_name="xyz")
+        path = builder.build_target_path("target-abc")
 
-    path = builder.build_target_path("target-abc")
-
-    self.assertEqual(path, f"xyz_target-abc")
+        self.assertEqual(path, f"xyz_target-abc")
 
 
 class IreeGeneratorTest(unittest.TestCase):
-
-  def test_generate_rules(self):
-    model_a = common_definitions.Model(
-        id="1234",
-        name="tflite_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="https://example.com/xyz.tflite",
-        entry_function="main",
-        input_types=["1xf32"])
-    model_b = common_definitions.Model(
-        id="5678",
-        name="stablehlo_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
-        source_url="https://example.com/xyz_stablehlo.mlir",
-        entry_function="predict",
-        input_types=["2xf32"])
-    imported_model_a = iree_definitions.ImportedModel.from_model(model_a)
-    imported_model_b = iree_definitions.ImportedModel.from_model(model_b)
-    compile_config_a = iree_definitions.CompileConfig.build(
-        id="config_a",
-        tags=["defaults"],
-        compile_targets=[
-            iree_definitions.CompileTarget(
-                target_architecture=common_definitions.DeviceArchitecture.
-                X86_64_CASCADELAKE,
-                target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-                target_abi=iree_definitions.TargetABI.LINUX_GNU)
-        ])
-    compile_config_b = iree_definitions.CompileConfig.build(
-        id="config_b",
-        tags=["defaults"],
-        compile_targets=[
-            iree_definitions.CompileTarget(
-                target_architecture=common_definitions.DeviceArchitecture.
-                RV64_GENERIC,
-                target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-                target_abi=iree_definitions.TargetABI.LINUX_GNU)
-        ])
-    gen_config_a = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model_a, compile_config=compile_config_a)
-    gen_config_b = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model_b, compile_config=compile_config_a)
-    gen_config_c = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model_b, compile_config=compile_config_b)
-    model_rule_map = {
-        model_a.id:
-            model_rule_generator.ModelRule(
+    def test_generate_rules(self):
+        model_a = common_definitions.Model(
+            id="1234",
+            name="tflite_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="https://example.com/xyz.tflite",
+            entry_function="main",
+            input_types=["1xf32"],
+        )
+        model_b = common_definitions.Model(
+            id="5678",
+            name="stablehlo_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
+            source_url="https://example.com/xyz_stablehlo.mlir",
+            entry_function="predict",
+            input_types=["2xf32"],
+        )
+        imported_model_a = iree_definitions.ImportedModel.from_model(model_a)
+        imported_model_b = iree_definitions.ImportedModel.from_model(model_b)
+        compile_config_a = iree_definitions.CompileConfig.build(
+            id="config_a",
+            tags=["defaults"],
+            compile_targets=[
+                iree_definitions.CompileTarget(
+                    target_architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+                    target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+                    target_abi=iree_definitions.TargetABI.LINUX_GNU,
+                )
+            ],
+        )
+        compile_config_b = iree_definitions.CompileConfig.build(
+            id="config_b",
+            tags=["defaults"],
+            compile_targets=[
+                iree_definitions.CompileTarget(
+                    target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+                    target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+                    target_abi=iree_definitions.TargetABI.LINUX_GNU,
+                )
+            ],
+        )
+        gen_config_a = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model_a, compile_config=compile_config_a
+        )
+        gen_config_b = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model_b, compile_config=compile_config_a
+        )
+        gen_config_c = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model_b, compile_config=compile_config_b
+        )
+        model_rule_map = {
+            model_a.id: model_rule_generator.ModelRule(
                 target_name=f"model-x",
                 file_path=pathlib.PurePath("x.tflite"),
-                cmake_rules=["abc"]),
-        model_b.id:
-            model_rule_generator.ModelRule(
+                cmake_rules=["abc"],
+            ),
+            model_b.id: model_rule_generator.ModelRule(
                 target_name=f"model-y",
                 file_path=pathlib.PurePath("root/model_5678_stablehlo_m.mlir"),
-                cmake_rules=["efg"]),
-    }
+                cmake_rules=["efg"],
+            ),
+        }
 
-    cmake_rules = iree_rule_generator.generate_rules(
-        package_name="${package}",
-        root_path=pathlib.PurePath("root"),
-        module_generation_configs=[gen_config_a, gen_config_b, gen_config_c],
-        model_rule_map=model_rule_map)
+        cmake_rules = iree_rule_generator.generate_rules(
+            package_name="${package}",
+            root_path=pathlib.PurePath("root"),
+            module_generation_configs=[gen_config_a, gen_config_b, gen_config_c],
+            model_rule_map=model_rule_map,
+        )
 
-    concated_cmake_rules = "\n".join(cmake_rules)
-    self.assertRegex(concated_cmake_rules,
-                     f"iree-imported-model-{imported_model_a.composite_id}")
-    self.assertRegex(concated_cmake_rules,
-                     f"iree-module-{gen_config_a.composite_id}")
-    self.assertRegex(concated_cmake_rules,
-                     f"iree-module-{gen_config_b.composite_id}")
-    self.assertRegex(concated_cmake_rules,
-                     f"iree-module-{gen_config_c.composite_id}")
+        concated_cmake_rules = "\n".join(cmake_rules)
+        self.assertRegex(
+            concated_cmake_rules, f"iree-imported-model-{imported_model_a.composite_id}"
+        )
+        self.assertRegex(
+            concated_cmake_rules, f"iree-module-{gen_config_a.composite_id}"
+        )
+        self.assertRegex(
+            concated_cmake_rules, f"iree-module-{gen_config_b.composite_id}"
+        )
+        self.assertRegex(
+            concated_cmake_rules, f"iree-module-{gen_config_c.composite_id}"
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_test_artifacts/cmake_generator/model_rule_generator.py b/build_tools/python/e2e_test_artifacts/cmake_generator/model_rule_generator.py
index 08e4297..aedd18c 100644
--- a/build_tools/python/e2e_test_artifacts/cmake_generator/model_rule_generator.py
+++ b/build_tools/python/e2e_test_artifacts/cmake_generator/model_rule_generator.py
@@ -17,37 +17,37 @@
 
 @dataclass
 class ModelRule(object):
-  target_name: str
-  file_path: pathlib.PurePath
-  cmake_rules: List[str]
+    target_name: str
+    file_path: pathlib.PurePath
+    cmake_rules: List[str]
 
 
 def generate_model_rule_map(
-    root_path: pathlib.PurePath,
-    models: Iterable[common_definitions.Model]) -> Dict[str, ModelRule]:
-  """Returns the model rules keyed by model id in an ordered map."""
+    root_path: pathlib.PurePath, models: Iterable[common_definitions.Model]
+) -> Dict[str, ModelRule]:
+    """Returns the model rules keyed by model id in an ordered map."""
 
-  model_rules = {}
-  for model in models:
-    # Model target: <package_name>-model-<model_id>
-    target_name = f"model-{model.id}"
-    model_path = model_artifacts.get_model_path(model=model,
-                                                root_path=root_path)
+    model_rules = {}
+    for model in models:
+        # Model target: <package_name>-model-<model_id>
+        target_name = f"model-{model.id}"
+        model_path = model_artifacts.get_model_path(model=model, root_path=root_path)
 
-    model_url = urllib.parse.urlparse(model.source_url)
-    if model_url.scheme == "https":
-      cmake_rules = [
-          cmake_builder.rules.build_iree_fetch_artifact(
-              target_name=target_name,
-              source_url=model.source_url,
-              output=str(model_path),
-              unpack=True)
-      ]
-    else:
-      raise ValueError("Unsupported model url: {model.source_url}.")
+        model_url = urllib.parse.urlparse(model.source_url)
+        if model_url.scheme == "https":
+            cmake_rules = [
+                cmake_builder.rules.build_iree_fetch_artifact(
+                    target_name=target_name,
+                    source_url=model.source_url,
+                    output=str(model_path),
+                    unpack=True,
+                )
+            ]
+        else:
+            raise ValueError("Unsupported model url: {model.source_url}.")
 
-    model_rules[model.id] = ModelRule(target_name=target_name,
-                                      file_path=model_path,
-                                      cmake_rules=cmake_rules)
+        model_rules[model.id] = ModelRule(
+            target_name=target_name, file_path=model_path, cmake_rules=cmake_rules
+        )
 
-  return model_rules
+    return model_rules
diff --git a/build_tools/python/e2e_test_artifacts/cmake_generator/model_rule_generator_test.py b/build_tools/python/e2e_test_artifacts/cmake_generator/model_rule_generator_test.py
index 71958ef..1d43a72 100644
--- a/build_tools/python/e2e_test_artifacts/cmake_generator/model_rule_generator_test.py
+++ b/build_tools/python/e2e_test_artifacts/cmake_generator/model_rule_generator_test.py
@@ -13,39 +13,43 @@
 
 
 class CommonGeneratorsTest(unittest.TestCase):
+    def test_generate_model_rule_map(self):
+        model_a = common_definitions.Model(
+            id="1234",
+            name="tflite_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="https://example.com/xyz.tflite",
+            entry_function="main",
+            input_types=["1xf32"],
+        )
+        model_b = common_definitions.Model(
+            id="5678",
+            name="tf_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
+            source_url="https://example.com/xyz_mlir",
+            entry_function="predict",
+            input_types=["2xf32"],
+        )
+        root_path = pathlib.PurePath("model_root")
 
-  def test_generate_model_rule_map(self):
-    model_a = common_definitions.Model(
-        id="1234",
-        name="tflite_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="https://example.com/xyz.tflite",
-        entry_function="main",
-        input_types=["1xf32"])
-    model_b = common_definitions.Model(
-        id="5678",
-        name="tf_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
-        source_url="https://example.com/xyz_mlir",
-        entry_function="predict",
-        input_types=["2xf32"])
-    root_path = pathlib.PurePath("model_root")
+        rule_map = model_rule_generator.generate_model_rule_map(
+            root_path=root_path, models=[model_a, model_b]
+        )
 
-    rule_map = model_rule_generator.generate_model_rule_map(
-        root_path=root_path, models=[model_a, model_b])
-
-    self.assertEqual(list(rule_map.keys()), [model_a.id, model_b.id])
-    self.assertEqual(rule_map[model_a.id].target_name, f"model-{model_a.id}")
-    self.assertEqual(
-        rule_map[model_a.id].file_path,
-        model_artifacts.get_model_path(model=model_a, root_path=root_path))
-    self.assertEqual(rule_map[model_b.id].target_name, f"model-{model_b.id}")
-    self.assertEqual(
-        rule_map[model_b.id].file_path,
-        model_artifacts.get_model_path(model=model_b, root_path=root_path))
+        self.assertEqual(list(rule_map.keys()), [model_a.id, model_b.id])
+        self.assertEqual(rule_map[model_a.id].target_name, f"model-{model_a.id}")
+        self.assertEqual(
+            rule_map[model_a.id].file_path,
+            model_artifacts.get_model_path(model=model_a, root_path=root_path),
+        )
+        self.assertEqual(rule_map[model_b.id].target_name, f"model-{model_b.id}")
+        self.assertEqual(
+            rule_map[model_b.id].file_path,
+            model_artifacts.get_model_path(model=model_b, root_path=root_path),
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_test_artifacts/iree_artifacts.py b/build_tools/python/e2e_test_artifacts/iree_artifacts.py
index 939c5cd..2d3cdc5 100644
--- a/build_tools/python/e2e_test_artifacts/iree_artifacts.py
+++ b/build_tools/python/e2e_test_artifacts/iree_artifacts.py
@@ -17,64 +17,65 @@
 
 
 def _get_model_prefix(imported_model: iree_definitions.ImportedModel) -> str:
-  """Returns the model prefix for IREE artifacts. The common prefix helps group
-  artifacts from the same model together for easier navigation.
-  """
-  model = imported_model.model
-  # IREE model prefix: <iree_artifact_prefix>_<model_name>
-  return f"{IREE_ARTIFACT_PREFIX}_{model.name}"
+    """Returns the model prefix for IREE artifacts. The common prefix helps group
+    artifacts from the same model together for easier navigation.
+    """
+    model = imported_model.model
+    # IREE model prefix: <iree_artifact_prefix>_<model_name>
+    return f"{IREE_ARTIFACT_PREFIX}_{model.name}"
 
 
 def get_imported_model_path(
     imported_model: iree_definitions.ImportedModel,
-    root_path: pathlib.PurePath = pathlib.PurePath()
+    root_path: pathlib.PurePath = pathlib.PurePath(),
 ) -> pathlib.PurePath:
-  """Returns the path of an IREE imported MLIR model. If the source model is
-  in MLIR format, returns the path of source model.
-  
-  Args:
-    imported_model: IREE model importing config.
-    root_path: path of the root artifact directory, on which the returned path
-      will base.
-  Returns:
-    Path of the imported model file.
-  """
-  model = imported_model.model
-  if model.source_type in [
-      common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-      common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
-  ]:
-    # Uses the MLIR model directly.
-    return model_artifacts.get_model_path(model=model, root_path=root_path)
+    """Returns the path of an IREE imported MLIR model. If the source model is
+    in MLIR format, returns the path of source model.
 
-  model_prefix = _get_model_prefix(imported_model)
-  # Imported model path: <root_path>/<model_prefix>_<imported_model_id>.mlir
-  return (root_path / f"{model_prefix}_{imported_model.composite_id}.mlir")
+    Args:
+      imported_model: IREE model importing config.
+      root_path: path of the root artifact directory, on which the returned path
+        will base.
+    Returns:
+      Path of the imported model file.
+    """
+    model = imported_model.model
+    if model.source_type in [
+        common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
+        common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
+    ]:
+        # Uses the MLIR model directly.
+        return model_artifacts.get_model_path(model=model, root_path=root_path)
+
+    model_prefix = _get_model_prefix(imported_model)
+    # Imported model path: <root_path>/<model_prefix>_<imported_model_id>.mlir
+    return root_path / f"{model_prefix}_{imported_model.composite_id}.mlir"
 
 
 def get_module_dir_path(
     module_generation_config: iree_definitions.ModuleGenerationConfig,
-    root_path: pathlib.PurePath = pathlib.PurePath()
+    root_path: pathlib.PurePath = pathlib.PurePath(),
 ) -> pathlib.PurePath:
-  """Returns the path of an IREE module directory, which contains the compiled
-  module and related flag files.
-  
-  Args:
-    module_generation_config: IREE module generation config.
-    root_path: path of the root artifact directory, on which the returned path
-      will base.
-  Returns:
-    Path of the module directory.
-  """
-  model_prefix = _get_model_prefix(module_generation_config.imported_model)
-  # Module dir path: <root_path>/<model_prefix>_module_<gen_config_id>
-  return (root_path /
-          f"{model_prefix}_module_{module_generation_config.composite_id}")
+    """Returns the path of an IREE module directory, which contains the compiled
+    module and related flag files.
+
+    Args:
+      module_generation_config: IREE module generation config.
+      root_path: path of the root artifact directory, on which the returned path
+        will base.
+    Returns:
+      Path of the module directory.
+    """
+    model_prefix = _get_model_prefix(module_generation_config.imported_model)
+    # Module dir path: <root_path>/<model_prefix>_module_<gen_config_id>
+    return root_path / f"{model_prefix}_module_{module_generation_config.composite_id}"
 
 
 def get_dependent_model_map(
-    module_generation_configs: Iterable[iree_definitions.ModuleGenerationConfig]
+    module_generation_configs: Iterable[iree_definitions.ModuleGenerationConfig],
 ) -> Dict[str, common_definitions.Model]:
-  """Returns an ordered map of the dependent models keyed by model id."""
-  return dict((config.imported_model.model.id, config.imported_model.model)
-              for config in module_generation_configs)
+    """Returns an ordered map of the dependent models keyed by model id."""
+    return dict(
+        (config.imported_model.model.id, config.imported_model.model)
+        for config in module_generation_configs
+    )
diff --git a/build_tools/python/e2e_test_artifacts/iree_artifacts_test.py b/build_tools/python/e2e_test_artifacts/iree_artifacts_test.py
index 0ac6ec5..cc254ac 100644
--- a/build_tools/python/e2e_test_artifacts/iree_artifacts_test.py
+++ b/build_tools/python/e2e_test_artifacts/iree_artifacts_test.py
@@ -12,126 +12,146 @@
 
 
 class IreeArtifactsTest(unittest.TestCase):
+    def test_get_imported_model_path(self):
+        model = common_definitions.Model(
+            id="1234",
+            name="tflite_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="https://example.com/xyz.tflite",
+            entry_function="main",
+            input_types=["1xf32"],
+        )
+        imported_model = iree_definitions.ImportedModel.from_model(model)
+        root_path = pathlib.PurePath("root")
 
-  def test_get_imported_model_path(self):
-    model = common_definitions.Model(
-        id="1234",
-        name="tflite_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="https://example.com/xyz.tflite",
-        entry_function="main",
-        input_types=["1xf32"])
-    imported_model = iree_definitions.ImportedModel.from_model(model)
-    root_path = pathlib.PurePath("root")
+        path = iree_artifacts.get_imported_model_path(
+            imported_model=imported_model, root_path=root_path
+        )
 
-    path = iree_artifacts.get_imported_model_path(imported_model=imported_model,
-                                                  root_path=root_path)
+        self.assertEqual(
+            path,
+            root_path / f"{iree_artifacts.IREE_ARTIFACT_PREFIX}_{model.name}_"
+            f"{imported_model.composite_id}.mlir",
+        )
 
-    self.assertEqual(
-        path, root_path / f"{iree_artifacts.IREE_ARTIFACT_PREFIX}_{model.name}_"
-        f"{imported_model.composite_id}.mlir")
+    def test_get_imported_model_path_with_mlir_model(self):
+        model = common_definitions.Model(
+            id="9012",
+            name="linalg_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
+            source_url="https://example.com/xyz.mlir",
+            entry_function="main",
+            input_types=["3xf32"],
+        )
+        imported_model = iree_definitions.ImportedModel.from_model(model)
+        root_path = pathlib.PurePath("root")
 
-  def test_get_imported_model_path_with_mlir_model(self):
-    model = common_definitions.Model(
-        id="9012",
-        name="linalg_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-        source_url="https://example.com/xyz.mlir",
-        entry_function="main",
-        input_types=["3xf32"])
-    imported_model = iree_definitions.ImportedModel.from_model(model)
-    root_path = pathlib.PurePath("root")
+        path = iree_artifacts.get_imported_model_path(
+            imported_model=imported_model, root_path=root_path
+        )
 
-    path = iree_artifacts.get_imported_model_path(imported_model=imported_model,
-                                                  root_path=root_path)
+        self.assertEqual(
+            path, model_artifacts.get_model_path(model=model, root_path=root_path)
+        )
 
-    self.assertEqual(
-        path, model_artifacts.get_model_path(model=model, root_path=root_path))
+    def test_get_module_dir_path(self):
+        model = common_definitions.Model(
+            id="1234",
+            name="tflite_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="https://example.com/xyz.tflite",
+            entry_function="main",
+            input_types=["1xf32"],
+        )
+        imported_model = iree_definitions.ImportedModel.from_model(model)
+        compile_config = iree_definitions.CompileConfig.build(
+            id="config_a",
+            tags=["defaults"],
+            compile_targets=[
+                iree_definitions.CompileTarget(
+                    target_architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+                    target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+                    target_abi=iree_definitions.TargetABI.LINUX_GNU,
+                )
+            ],
+        )
+        gen_config = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model, compile_config=compile_config
+        )
+        root_path = pathlib.PurePath("root")
 
-  def test_get_module_dir_path(self):
-    model = common_definitions.Model(
-        id="1234",
-        name="tflite_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="https://example.com/xyz.tflite",
-        entry_function="main",
-        input_types=["1xf32"])
-    imported_model = iree_definitions.ImportedModel.from_model(model)
-    compile_config = iree_definitions.CompileConfig.build(
-        id="config_a",
-        tags=["defaults"],
-        compile_targets=[
-            iree_definitions.CompileTarget(
-                target_architecture=common_definitions.DeviceArchitecture.
-                X86_64_CASCADELAKE,
-                target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-                target_abi=iree_definitions.TargetABI.LINUX_GNU)
-        ])
-    gen_config = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model, compile_config=compile_config)
-    root_path = pathlib.PurePath("root")
+        path = iree_artifacts.get_module_dir_path(
+            module_generation_config=gen_config, root_path=root_path
+        )
 
-    path = iree_artifacts.get_module_dir_path(
-        module_generation_config=gen_config, root_path=root_path)
+        self.assertEqual(
+            path,
+            root_path / f"{iree_artifacts.IREE_ARTIFACT_PREFIX}_{model.name}_"
+            f"module_{gen_config.composite_id}",
+        )
 
-    self.assertEqual(
-        path, root_path / f"{iree_artifacts.IREE_ARTIFACT_PREFIX}_{model.name}_"
-        f"module_{gen_config.composite_id}")
+    def test_get_dependent_model_map(self):
+        model_a = common_definitions.Model(
+            id="1234",
+            name="tflite_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="https://example.com/xyz.tflite",
+            entry_function="main",
+            input_types=["1xf32"],
+        )
+        model_b = common_definitions.Model(
+            id="9012",
+            name="linalg_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
+            source_url="https://example.com/xyz.mlir",
+            entry_function="main",
+            input_types=["3xf32"],
+        )
+        imported_model_a = iree_definitions.ImportedModel.from_model(model_a)
+        imported_model_b = iree_definitions.ImportedModel.from_model(model_b)
+        compile_config_a = iree_definitions.CompileConfig.build(
+            id="config_a",
+            tags=["defaults"],
+            compile_targets=[
+                iree_definitions.CompileTarget(
+                    target_architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+                    target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+                    target_abi=iree_definitions.TargetABI.LINUX_GNU,
+                )
+            ],
+        )
+        compile_config_b = iree_definitions.CompileConfig.build(
+            id="config_b",
+            tags=["defaults"],
+            compile_targets=[
+                iree_definitions.CompileTarget(
+                    target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+                    target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+                    target_abi=iree_definitions.TargetABI.LINUX_GNU,
+                )
+            ],
+        )
+        gen_config_a = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model_a, compile_config=compile_config_a
+        )
+        gen_config_b = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model_b, compile_config=compile_config_a
+        )
+        gen_config_c = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model_b, compile_config=compile_config_b
+        )
 
-  def test_get_dependent_model_map(self):
-    model_a = common_definitions.Model(
-        id="1234",
-        name="tflite_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="https://example.com/xyz.tflite",
-        entry_function="main",
-        input_types=["1xf32"])
-    model_b = common_definitions.Model(
-        id="9012",
-        name="linalg_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-        source_url="https://example.com/xyz.mlir",
-        entry_function="main",
-        input_types=["3xf32"])
-    imported_model_a = iree_definitions.ImportedModel.from_model(model_a)
-    imported_model_b = iree_definitions.ImportedModel.from_model(model_b)
-    compile_config_a = iree_definitions.CompileConfig.build(
-        id="config_a",
-        tags=["defaults"],
-        compile_targets=[
-            iree_definitions.CompileTarget(
-                target_architecture=common_definitions.DeviceArchitecture.
-                X86_64_CASCADELAKE,
-                target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-                target_abi=iree_definitions.TargetABI.LINUX_GNU)
-        ])
-    compile_config_b = iree_definitions.CompileConfig.build(
-        id="config_b",
-        tags=["defaults"],
-        compile_targets=[
-            iree_definitions.CompileTarget(
-                target_architecture=common_definitions.DeviceArchitecture.
-                RV64_GENERIC,
-                target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-                target_abi=iree_definitions.TargetABI.LINUX_GNU)
-        ])
-    gen_config_a = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model_a, compile_config=compile_config_a)
-    gen_config_b = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model_b, compile_config=compile_config_a)
-    gen_config_c = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model_b, compile_config=compile_config_b)
+        models = iree_artifacts.get_dependent_model_map(
+            module_generation_configs=[gen_config_a, gen_config_b, gen_config_c]
+        )
 
-    models = iree_artifacts.get_dependent_model_map(
-        module_generation_configs=[gen_config_a, gen_config_b, gen_config_c])
-
-    self.assertEqual(models, {model_a.id: model_a, model_b.id: model_b})
+        self.assertEqual(models, {model_a.id: model_a, model_b.id: model_b})
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_test_artifacts/model_artifacts.py b/build_tools/python/e2e_test_artifacts/model_artifacts.py
index 6866461..cbbaa4e 100644
--- a/build_tools/python/e2e_test_artifacts/model_artifacts.py
+++ b/build_tools/python/e2e_test_artifacts/model_artifacts.py
@@ -16,25 +16,23 @@
 
 
 def get_model_path(
-    model: common_definitions.Model,
-    root_path: pathlib.PurePath = pathlib.PurePath()
+    model: common_definitions.Model, root_path: pathlib.PurePath = pathlib.PurePath()
 ) -> pathlib.PurePath:
-  """Returns the path of an model artifact file or directory.
-  
-  Args:
-    model: source model.
-    root_path: path of the root artifact directory, on which the returned path
-      will base.
-  Returns:
-    Path of the model artifact.
-  """
-  model_url = urllib.parse.urlparse(model.source_url)
-  # Drop the archive extensions.
-  file_exts = pathlib.PurePath(model_url.path).suffixes
-  while len(file_exts) > 0 and file_exts[-1] in ARCHIVE_FILE_EXTENSIONS:
-    file_exts.pop()
-  model_ext = "".join(file_exts)
+    """Returns the path of an model artifact file or directory.
 
-  # Model path: <root_path>/<model_artifact_prefix>_<model_id>_<model_name><model_ext>
-  return (root_path /
-          f"{MODEL_ARTIFACT_PREFIX}_{model.id}_{model.name}{model_ext}")
+    Args:
+      model: source model.
+      root_path: path of the root artifact directory, on which the returned path
+        will base.
+    Returns:
+      Path of the model artifact.
+    """
+    model_url = urllib.parse.urlparse(model.source_url)
+    # Drop the archive extensions.
+    file_exts = pathlib.PurePath(model_url.path).suffixes
+    while len(file_exts) > 0 and file_exts[-1] in ARCHIVE_FILE_EXTENSIONS:
+        file_exts.pop()
+    model_ext = "".join(file_exts)
+
+    # Model path: <root_path>/<model_artifact_prefix>_<model_id>_<model_name><model_ext>
+    return root_path / f"{MODEL_ARTIFACT_PREFIX}_{model.id}_{model.name}{model_ext}"
diff --git a/build_tools/python/e2e_test_artifacts/model_artifacts_test.py b/build_tools/python/e2e_test_artifacts/model_artifacts_test.py
index 796ed33..3103233 100644
--- a/build_tools/python/e2e_test_artifacts/model_artifacts_test.py
+++ b/build_tools/python/e2e_test_artifacts/model_artifacts_test.py
@@ -12,44 +12,46 @@
 
 
 class ModelArtifactsTest(unittest.TestCase):
+    def test_get_model_path_with_tflite_model(self):
+        tflite_model = common_definitions.Model(
+            id="1234",
+            name="tflite_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+            source_url="https://example.com/xyz.tflite",
+            entry_function="main",
+            input_types=["1xf32"],
+        )
+        root_path = pathlib.PurePath("root")
 
-  def test_get_model_path_with_tflite_model(self):
-    tflite_model = common_definitions.Model(
-        id="1234",
-        name="tflite_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-        source_url="https://example.com/xyz.tflite",
-        entry_function="main",
-        input_types=["1xf32"])
-    root_path = pathlib.PurePath("root")
+        path = model_artifacts.get_model_path(model=tflite_model, root_path=root_path)
 
-    path = model_artifacts.get_model_path(model=tflite_model,
-                                          root_path=root_path)
+        self.assertEqual(
+            path,
+            root_path
+            / f"{model_artifacts.MODEL_ARTIFACT_PREFIX}_{tflite_model.id}_{tflite_model.name}.tflite",
+        )
 
-    self.assertEqual(
-        path, root_path /
-        f"{model_artifacts.MODEL_ARTIFACT_PREFIX}_{tflite_model.id}_{tflite_model.name}.tflite"
-    )
+    def test_get_model_path_with_tf_model(self):
+        tf_model = common_definitions.Model(
+            id="5678",
+            name="tf_m",
+            tags=[],
+            source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
+            source_url="https://example.com/xyz_mlir",
+            entry_function="predict",
+            input_types=["2xf32"],
+        )
+        root_path = pathlib.PurePath("root")
 
-  def test_get_model_path_with_tf_model(self):
-    tf_model = common_definitions.Model(
-        id="5678",
-        name="tf_m",
-        tags=[],
-        source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
-        source_url="https://example.com/xyz_mlir",
-        entry_function="predict",
-        input_types=["2xf32"])
-    root_path = pathlib.PurePath("root")
+        path = model_artifacts.get_model_path(model=tf_model, root_path=root_path)
 
-    path = model_artifacts.get_model_path(model=tf_model, root_path=root_path)
-
-    self.assertEqual(
-        path, root_path /
-        f"{model_artifacts.MODEL_ARTIFACT_PREFIX}_{tf_model.id}_{tf_model.name}"
-    )
+        self.assertEqual(
+            path,
+            root_path
+            / f"{model_artifacts.MODEL_ARTIFACT_PREFIX}_{tf_model.id}_{tf_model.name}",
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_test_framework/definitions/common_definitions.py b/build_tools/python/e2e_test_framework/definitions/common_definitions.py
index 87d1009..f74aef2 100644
--- a/build_tools/python/e2e_test_framework/definitions/common_definitions.py
+++ b/build_tools/python/e2e_test_framework/definitions/common_definitions.py
@@ -13,203 +13,217 @@
 
 
 class ArchitectureType(Enum):
-  """Type of architecture."""
-  CPU = "cpu"
-  GPU = "gpu"
+    """Type of architecture."""
+
+    CPU = "cpu"
+    GPU = "gpu"
 
 
 @dataclass(frozen=True)
 class _ArchitectureInfo(object):
-  """Architecture information."""
-  type: ArchitectureType
-  architecture: str
-  microarchitecture: str = ""
-  vendor: str = ""
+    """Architecture information."""
+
+    type: ArchitectureType
+    architecture: str
+    microarchitecture: str = ""
+    vendor: str = ""
 
 
 class DeviceArchitecture(_ArchitectureInfo, Enum):
-  """Predefined architecture/microarchitecture."""
+    """Predefined architecture/microarchitecture."""
 
-  # VMVX virtual machine
-  VMVX_GENERIC = (ArchitectureType.CPU, "vmvx", "generic")
+    # VMVX virtual machine
+    VMVX_GENERIC = (ArchitectureType.CPU, "vmvx", "generic")
 
-  # x86_64 CPUs
-  X86_64_CASCADELAKE = (ArchitectureType.CPU, "x86_64", "cascadelake")
+    # x86_64 CPUs
+    X86_64_CASCADELAKE = (ArchitectureType.CPU, "x86_64", "cascadelake")
 
-  # ARM CPUs
-  ARMV8_2_A_GENERIC = (ArchitectureType.CPU, "armv8.2-a", "generic")
-  ARMV9_A_GENERIC = (ArchitectureType.CPU, "armv9-a", "generic")
+    # ARM CPUs
+    ARMV8_2_A_GENERIC = (ArchitectureType.CPU, "armv8.2-a", "generic")
+    ARMV9_A_GENERIC = (ArchitectureType.CPU, "armv9-a", "generic")
 
-  # RISC-V CPUs
-  RV64_GENERIC = (ArchitectureType.CPU, "riscv_64", "generic")
-  RV32_GENERIC = (ArchitectureType.CPU, "riscv_32", "generic")
+    # RISC-V CPUs
+    RV64_GENERIC = (ArchitectureType.CPU, "riscv_64", "generic")
+    RV32_GENERIC = (ArchitectureType.CPU, "riscv_32", "generic")
 
-  # Vulkan GPUs
-  QUALCOMM_ADRENO = (ArchitectureType.GPU, "adreno", "", "qualcomm")
-  ARM_VALHALL = (ArchitectureType.GPU, "valhall", "", "arm")
-  NVIDIA_AMPERE = (ArchitectureType.GPU, "ampere", "", "nvidia")
-  NVIDIA_PASCAL = (ArchitectureType.GPU, "pascal", "", "nvidia")
+    # Vulkan GPUs
+    QUALCOMM_ADRENO = (ArchitectureType.GPU, "adreno", "", "qualcomm")
+    ARM_VALHALL = (ArchitectureType.GPU, "valhall", "", "arm")
+    NVIDIA_AMPERE = (ArchitectureType.GPU, "ampere", "", "nvidia")
+    NVIDIA_PASCAL = (ArchitectureType.GPU, "pascal", "", "nvidia")
 
-  # CUDA GPUs
-  CUDA_SM70 = (ArchitectureType.GPU, "cuda", "sm_70")
-  CUDA_SM80 = (ArchitectureType.GPU, "cuda", "sm_80")
+    # CUDA GPUs
+    CUDA_SM70 = (ArchitectureType.GPU, "cuda", "sm_70")
+    CUDA_SM80 = (ArchitectureType.GPU, "cuda", "sm_80")
 
-  # Starting from 3.11, enum members are defined before the subclasses (don't
-  # follow MRO, see https://docs.python.org/3/whatsnew/3.11.html#enum).
-  # Therefore __str__ is defined here instead of in _ArchitectureInfo to
-  # override the default one.
-  def __str__(self):
-    parts = [self.vendor, self.architecture, self.microarchitecture]
-    return "-".join(part for part in parts if part != "")
+    # Starting from 3.11, enum members are defined before the subclasses (don't
+    # follow MRO, see https://docs.python.org/3/whatsnew/3.11.html#enum).
+    # Therefore __str__ is defined here instead of in _ArchitectureInfo to
+    # override the default one.
+    def __str__(self):
+        parts = [self.vendor, self.architecture, self.microarchitecture]
+        return "-".join(part for part in parts if part != "")
 
 
 @dataclass(frozen=True)
 class _HostEnvironmentInfo(object):
-  """Environment information of a host.
+    """Environment information of a host.
 
-  The definitions and terms here matches the macros in
-  `runtime/src/iree/base/target_platform.h`.
+    The definitions and terms here matches the macros in
+    `runtime/src/iree/base/target_platform.h`.
 
-  Note that this is the environment where the runtime "runs". For example:
-  ```
-  {
-    "platform": "linux",
-    "architecture": "x86_64"
-  }
-  ```
-  means the runtime will run on a Linux x86_64 host. The runtime might dispatch
-  the workloads on GPU or it can be a VM to run workloads compiled in another
-  ISA, but those are irrelevant to the information here.
-  """
-  platform: str
-  architecture: str
+    Note that this is the environment where the runtime "runs". For example:
+    ```
+    {
+      "platform": "linux",
+      "architecture": "x86_64"
+    }
+    ```
+    means the runtime will run on a Linux x86_64 host. The runtime might dispatch
+    the workloads on GPU or it can be a VM to run workloads compiled in another
+    ISA, but those are irrelevant to the information here.
+    """
+
+    platform: str
+    architecture: str
 
 
 class HostEnvironment(_HostEnvironmentInfo, Enum):
-  """Predefined host environment."""
+    """Predefined host environment."""
 
-  LINUX_X86_64 = ("linux", "x86_64")
-  ANDROID_ARMV8_2_A = ("android", "armv8.2-a")
+    LINUX_X86_64 = ("linux", "x86_64")
+    ANDROID_ARMV8_2_A = ("android", "armv8.2-a")
 
 
 class ModelSourceType(Enum):
-  """Type of model source."""
-  # Exported Linalg MLIR file.
-  EXPORTED_LINALG_MLIR = "exported_linalg_mlir"
-  # Exported Stable HLO file.
-  EXPORTED_STABLEHLO_MLIR = "exported_stablehlo_mlir"
-  # Exported TFLite model file.
-  EXPORTED_TFLITE = "exported_tflite"
+    """Type of model source."""
+
+    # Exported Linalg MLIR file.
+    EXPORTED_LINALG_MLIR = "exported_linalg_mlir"
+    # Exported Stable HLO file.
+    EXPORTED_STABLEHLO_MLIR = "exported_stablehlo_mlir"
+    # Exported TFLite model file.
+    EXPORTED_TFLITE = "exported_tflite"
 
 
 class InputDataFormat(Enum):
-  """Model input data format."""
-  ZEROS = "zeros"
-  NUMPY_NPY = "numpy_npy"
+    """Model input data format."""
+
+    ZEROS = "zeros"
+    NUMPY_NPY = "numpy_npy"
 
 
 @serialization.serializable(type_key="device_specs")
 @dataclass(frozen=True)
 class DeviceSpec(object):
-  """Benchmark device specification."""
-  id: str
+    """Benchmark device specification."""
 
-  # Unique name of the device spec.
-  name: str
+    id: str
 
-  # Device name. E.g., Pixel-6.
-  device_name: str
+    # Unique name of the device spec.
+    name: str
 
-  # Tags to describe the device spec.
-  tags: List[str]
+    # Device name. E.g., Pixel-6.
+    device_name: str
 
-  # Host environment where the IREE runtime is running. For CPU device type,
-  # this is usually the same as the device that workloads are dispatched to.
-  # With a separate device, such as a GPU, however, the runtime and dispatched
-  # workloads will run on different platforms.
-  host_environment: HostEnvironment
+    # Tags to describe the device spec.
+    tags: List[str]
 
-  # Architecture of the target device.
-  architecture: DeviceArchitecture
+    # Host environment where the IREE runtime is running. For CPU device type,
+    # this is usually the same as the device that workloads are dispatched to.
+    # With a separate device, such as a GPU, however, the runtime and dispatched
+    # workloads will run on different platforms.
+    host_environment: HostEnvironment
 
-  # Device-specific parameters. E.g., 2-big-cores, 4-little-cores.
-  # This is for modeling the spec of a heterogeneous processor. Depending on
-  # which cores you run, the device has a different spec. Benchmark machines use
-  # these parameters to set up the devices. E.g. set CPU mask.
-  device_parameters: List[str] = dataclasses.field(default_factory=list)
+    # Architecture of the target device.
+    architecture: DeviceArchitecture
 
-  def __str__(self):
-    return self.name
+    # Device-specific parameters. E.g., 2-big-cores, 4-little-cores.
+    # This is for modeling the spec of a heterogeneous processor. Depending on
+    # which cores you run, the device has a different spec. Benchmark machines use
+    # these parameters to set up the devices. E.g. set CPU mask.
+    device_parameters: List[str] = dataclasses.field(default_factory=list)
 
-  @classmethod
-  def build(cls,
-            id: str,
-            device_name: str,
-            tags: Sequence[str],
-            host_environment: HostEnvironment,
-            architecture: DeviceArchitecture,
-            device_parameters: Optional[Sequence[str]] = None):
-    tag_part = ",".join(tags)
-    # Format: <device_name>[<tag>,...]
-    name = f"{device_name}[{tag_part}]"
-    device_parameters = device_parameters or []
-    return cls(id=id,
-               name=name,
-               tags=list(tags),
-               device_name=device_name,
-               host_environment=host_environment,
-               architecture=architecture,
-               device_parameters=list(device_parameters))
+    def __str__(self):
+        return self.name
+
+    @classmethod
+    def build(
+        cls,
+        id: str,
+        device_name: str,
+        tags: Sequence[str],
+        host_environment: HostEnvironment,
+        architecture: DeviceArchitecture,
+        device_parameters: Optional[Sequence[str]] = None,
+    ):
+        tag_part = ",".join(tags)
+        # Format: <device_name>[<tag>,...]
+        name = f"{device_name}[{tag_part}]"
+        device_parameters = device_parameters or []
+        return cls(
+            id=id,
+            name=name,
+            tags=list(tags),
+            device_name=device_name,
+            host_environment=host_environment,
+            architecture=architecture,
+            device_parameters=list(device_parameters),
+        )
 
 
 @serialization.serializable(type_key="models")
 @dataclass(frozen=True)
 class Model(object):
-  """Model to be benchmarked."""
-  id: str
-  # Friendly unique name.
-  name: str
-  # Tags that describe the model characteristics.
-  tags: List[str]
-  source_type: ModelSourceType
-  source_url: str
-  entry_function: str
-  # Input types. E.g., ["100x100xf32", "200x200x5xf32"].
-  input_types: List[str]
+    """Model to be benchmarked."""
 
-  def __str__(self):
-    return self.name
+    id: str
+    # Friendly unique name.
+    name: str
+    # Tags that describe the model characteristics.
+    tags: List[str]
+    source_type: ModelSourceType
+    source_url: str
+    entry_function: str
+    # Input types. E.g., ["100x100xf32", "200x200x5xf32"].
+    input_types: List[str]
+
+    def __str__(self):
+        return self.name
 
 
 @serialization.serializable(type_key="model_input_data")
 @dataclass(frozen=True)
 class ModelInputData(object):
-  """Input data to benchmark the model."""
-  id: str
-  # Associated model.
-  model_id: str
-  # Friendly name.
-  name: str
-  # Tags that describe the data characteristics.
-  tags: List[str]
-  data_format: InputDataFormat
-  source_url: str
+    """Input data to benchmark the model."""
 
-  def __str__(self):
-    return self.name
+    id: str
+    # Associated model.
+    model_id: str
+    # Friendly name.
+    name: str
+    # Tags that describe the data characteristics.
+    tags: List[str]
+    data_format: InputDataFormat
+    source_url: str
+
+    def __str__(self):
+        return self.name
 
 
 # All-zeros dummy input data. Runners will generate the zeros input with proper
 # shapes.
-ZEROS_MODEL_INPUT_DATA = ModelInputData(id=unique_ids.MODEL_INPUT_DATA_ZEROS,
-                                        model_id="",
-                                        name="zeros",
-                                        tags=[],
-                                        data_format=InputDataFormat.ZEROS,
-                                        source_url="")
+ZEROS_MODEL_INPUT_DATA = ModelInputData(
+    id=unique_ids.MODEL_INPUT_DATA_ZEROS,
+    model_id="",
+    name="zeros",
+    tags=[],
+    data_format=InputDataFormat.ZEROS,
+    source_url="",
+)
 
 
 @dataclass(frozen=True)
 class CpuBenchmarkConfig(object):
-  model: Model
-  threads: List[int]
+    model: Model
+    threads: List[int]
diff --git a/build_tools/python/e2e_test_framework/definitions/iree_definitions.py b/build_tools/python/e2e_test_framework/definitions/iree_definitions.py
index a10ffa4..5230164 100644
--- a/build_tools/python/e2e_test_framework/definitions/iree_definitions.py
+++ b/build_tools/python/e2e_test_framework/definitions/iree_definitions.py
@@ -16,211 +16,229 @@
 
 
 class TargetBackend(Enum):
-  """IREE target backend."""
-  LLVM_CPU = "llvm-cpu"
-  CUDA = "cuda"
-  ROCM = "rocm"
-  VMVX = "vmvx"
-  METAL_SPIRV = "metal-spirv"
-  VULKAN_SPIRV = "vulkan-spirv"
+    """IREE target backend."""
+
+    LLVM_CPU = "llvm-cpu"
+    CUDA = "cuda"
+    ROCM = "rocm"
+    VMVX = "vmvx"
+    METAL_SPIRV = "metal-spirv"
+    VULKAN_SPIRV = "vulkan-spirv"
 
 
 class TargetABI(Enum):
-  VMVX = "vmvx"
-  LINUX_GNU = "linux-gnu"
-  LINUX_ANDROID29 = "linux-android29"
-  # IREE defined OS name for vulkan target. See:
-  # compiler/src/iree/compiler/Dialect/Vulkan/IR/VulkanBase.td
-  VULKAN_ANDROID30 = "android30"
-  VULKAN_ANDROID31 = "android31"
-  VULKAN_LINUX = "linux"
+    VMVX = "vmvx"
+    LINUX_GNU = "linux-gnu"
+    LINUX_ANDROID29 = "linux-android29"
+    # IREE defined OS name for vulkan target. See:
+    # compiler/src/iree/compiler/Dialect/Vulkan/IR/VulkanBase.td
+    VULKAN_ANDROID30 = "android30"
+    VULKAN_ANDROID31 = "android31"
+    VULKAN_LINUX = "linux"
 
 
 class RuntimeLoader(Enum):
-  """IREE runtime loader."""
-  # For target that doesn't support loader configuration.
-  NONE = "none"
-  EMBEDDED_ELF = "embedded-elf"
-  VMVX_MODULE = "vmvx-module"
-  SYSTEM_LIBRARY = "system-library"
+    """IREE runtime loader."""
+
+    # For target that doesn't support loader configuration.
+    NONE = "none"
+    EMBEDDED_ELF = "embedded-elf"
+    VMVX_MODULE = "vmvx-module"
+    SYSTEM_LIBRARY = "system-library"
 
 
 class RuntimeDriver(Enum):
-  """IREE runtime driver."""
-  LOCAL_SYNC = "local-sync"
-  LOCAL_TASK = "local-task"
-  CUDA = "cuda"
-  VULKAN = "vulkan"
+    """IREE runtime driver."""
+
+    LOCAL_SYNC = "local-sync"
+    LOCAL_TASK = "local-task"
+    CUDA = "cuda"
+    VULKAN = "vulkan"
 
 
 @serialization.serializable
 @dataclass(frozen=True)
 class CompileTarget(object):
-  """Describes a target device to build for."""
-  target_backend: TargetBackend
-  target_architecture: common_definitions.DeviceArchitecture
-  target_abi: TargetABI
+    """Describes a target device to build for."""
 
-  def __str__(self):
-    return (f"{self.target_architecture}-"
+    target_backend: TargetBackend
+    target_architecture: common_definitions.DeviceArchitecture
+    target_abi: TargetABI
+
+    def __str__(self):
+        return (
+            f"{self.target_architecture}-"
             f"{self.target_abi.name}-"
-            f"{self.target_backend.name}").lower()
+            f"{self.target_backend.name}"
+        ).lower()
 
 
 @serialization.serializable(type_key="iree_compile_configs")
 @dataclass(frozen=True)
 class CompileConfig(object):
-  """Describes the options to build a module."""
-  id: str
-  name: str
-  tags: List[str]
-  compile_targets: List[CompileTarget]
-  extra_flags: List[str] = dataclasses.field(default_factory=list)
+    """Describes the options to build a module."""
 
-  def __str__(self):
-    return self.name
+    id: str
+    name: str
+    tags: List[str]
+    compile_targets: List[CompileTarget]
+    extra_flags: List[str] = dataclasses.field(default_factory=list)
 
-  @classmethod
-  def build(cls,
-            id: str,
-            tags: Sequence[str],
-            compile_targets: Sequence[CompileTarget],
-            extra_flags: Optional[Sequence[str]] = None):
-    target_part = ",".join(str(target) for target in compile_targets)
-    tag_part = ",".join(tags)
-    # Format: [<target_name>,...][<tag>,...]
-    name = f"[{target_part}][{tag_part}]"
-    extra_flags = extra_flags or []
-    return cls(id=id,
-               name=name,
-               tags=list(tags),
-               compile_targets=list(compile_targets),
-               extra_flags=list(extra_flags))
+    def __str__(self):
+        return self.name
+
+    @classmethod
+    def build(
+        cls,
+        id: str,
+        tags: Sequence[str],
+        compile_targets: Sequence[CompileTarget],
+        extra_flags: Optional[Sequence[str]] = None,
+    ):
+        target_part = ",".join(str(target) for target in compile_targets)
+        tag_part = ",".join(tags)
+        # Format: [<target_name>,...][<tag>,...]
+        name = f"[{target_part}][{tag_part}]"
+        extra_flags = extra_flags or []
+        return cls(
+            id=id,
+            name=name,
+            tags=list(tags),
+            compile_targets=list(compile_targets),
+            extra_flags=list(extra_flags),
+        )
 
 
 @serialization.serializable(type_key="iree_module_execution_configs")
 @dataclass(frozen=True)
 class ModuleExecutionConfig(object):
-  """Describes the options to run a module."""
-  id: str
-  name: str
-  tags: List[str]
-  loader: RuntimeLoader
-  driver: RuntimeDriver
-  extra_flags: List[str] = dataclasses.field(default_factory=list)
+    """Describes the options to run a module."""
 
-  def __str__(self):
-    return self.name
+    id: str
+    name: str
+    tags: List[str]
+    loader: RuntimeLoader
+    driver: RuntimeDriver
+    extra_flags: List[str] = dataclasses.field(default_factory=list)
 
-  @classmethod
-  def build(cls,
-            id: str,
-            tags: Sequence[str],
-            loader: RuntimeLoader,
-            driver: RuntimeDriver,
-            extra_flags: Optional[Sequence[str]] = None):
-    runtime_part = f"{driver.name}({loader.name})".lower()
-    tag_part = ",".join(tags)
-    # Format: <driver>(<loader>)[<tag>,...]
-    name = f"{runtime_part}[{tag_part}]"
-    extra_flags = extra_flags or []
-    return cls(id=id,
-               name=name,
-               tags=list(tags),
-               loader=loader,
-               driver=driver,
-               extra_flags=list(extra_flags))
+    def __str__(self):
+        return self.name
+
+    @classmethod
+    def build(
+        cls,
+        id: str,
+        tags: Sequence[str],
+        loader: RuntimeLoader,
+        driver: RuntimeDriver,
+        extra_flags: Optional[Sequence[str]] = None,
+    ):
+        runtime_part = f"{driver.name}({loader.name})".lower()
+        tag_part = ",".join(tags)
+        # Format: <driver>(<loader>)[<tag>,...]
+        name = f"{runtime_part}[{tag_part}]"
+        extra_flags = extra_flags or []
+        return cls(
+            id=id,
+            name=name,
+            tags=list(tags),
+            loader=loader,
+            driver=driver,
+            extra_flags=list(extra_flags),
+        )
 
 
 class ImportTool(Enum):
-  """Iree model import tool."""
-  NONE = "none"
-  TF_IMPORTER = "iree-import-tf"
-  TFLITE_IMPORTER = "iree-import-tflite"
+    """Iree model import tool."""
+
+    NONE = "none"
+    TF_IMPORTER = "iree-import-tf"
+    TFLITE_IMPORTER = "iree-import-tflite"
 
 
 # Value should be the name of an IREE supported input type (--iree-input-type).
 class MLIRDialectType(Enum):
-  """Imported MLIR dialect type."""
-  NONE = "none"
-  TOSA = "tosa"
-  STABLEHLO = "stablehlo"
+    """Imported MLIR dialect type."""
+
+    NONE = "none"
+    TOSA = "tosa"
+    STABLEHLO = "stablehlo"
 
 
 @serialization.serializable(type_key="iree_import_configs")
 @dataclass(frozen=True)
 class ImportConfig(object):
-  """Config to import the model."""
-  id: str
-  name: str
-  tool: ImportTool
-  dialect_type: MLIRDialectType
-  import_flags: List[str] = dataclasses.field(default_factory=list)
+    """Config to import the model."""
 
-  def __str__(self):
-    return self.name
+    id: str
+    name: str
+    tool: ImportTool
+    dialect_type: MLIRDialectType
+    import_flags: List[str] = dataclasses.field(default_factory=list)
 
-  def materialize_import_flags(self,
-                               model: common_definitions.Model) -> List[str]:
-    """Materialize flags with dependent values."""
-    return utils.substitute_flag_vars(flags=self.import_flags,
-                                      ENTRY_FUNCTION=model.entry_function)
+    def __str__(self):
+        return self.name
+
+    def materialize_import_flags(self, model: common_definitions.Model) -> List[str]:
+        """Materialize flags with dependent values."""
+        return utils.substitute_flag_vars(
+            flags=self.import_flags, ENTRY_FUNCTION=model.entry_function
+        )
 
 
 DEFAULT_TFLITE_IMPORT_CONFIG = ImportConfig(
     id=unique_ids.IREE_MODEL_IMPORT_TFLITE_DEFAULT,
     name="tflite",
     tool=ImportTool.TFLITE_IMPORTER,
-    dialect_type=MLIRDialectType.TOSA)
+    dialect_type=MLIRDialectType.TOSA,
+)
 
 DEFAULT_LINALG_MLIR_IMPORT_CONFIG = ImportConfig(
     id=unique_ids.IREE_MODEL_IMPORT_LINALG_MLIR_DEFAULT,
     name="linalg",
     tool=ImportTool.NONE,
-    dialect_type=MLIRDialectType.NONE)
+    dialect_type=MLIRDialectType.NONE,
+)
 
 DEFAULT_STABLEHLO_MLIR_IMPORT_CONFIG = ImportConfig(
     id=unique_ids.IREE_MODEL_IMPORT_STABLEHLO_MLIR_DEFAULT,
     name="stablehlo",
     tool=ImportTool.NONE,
-    dialect_type=MLIRDialectType.STABLEHLO)
+    dialect_type=MLIRDialectType.STABLEHLO,
+)
 
 MODEL_SOURCE_TO_DEFAULT_IMPORT_CONFIG_MAP = {
-    common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR:
-        DEFAULT_LINALG_MLIR_IMPORT_CONFIG,
-    common_definitions.ModelSourceType.EXPORTED_TFLITE:
-        DEFAULT_TFLITE_IMPORT_CONFIG,
-    common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR:
-        DEFAULT_STABLEHLO_MLIR_IMPORT_CONFIG,
+    common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR: DEFAULT_LINALG_MLIR_IMPORT_CONFIG,
+    common_definitions.ModelSourceType.EXPORTED_TFLITE: DEFAULT_TFLITE_IMPORT_CONFIG,
+    common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR: DEFAULT_STABLEHLO_MLIR_IMPORT_CONFIG,
 }
 
 
-@serialization.serializable(type_key="iree_imported_models",
-                            id_field="composite_id")
+@serialization.serializable(type_key="iree_imported_models", id_field="composite_id")
 @dataclass(frozen=True)
 class ImportedModel(object):
-  """Describes an imported MLIR model."""
-  composite_id: str
-  name: str
-  model: common_definitions.Model
-  import_config: ImportConfig
+    """Describes an imported MLIR model."""
 
-  def __str__(self):
-    return self.name
+    composite_id: str
+    name: str
+    model: common_definitions.Model
+    import_config: ImportConfig
 
-  @classmethod
-  def from_model(cls, model: common_definitions.Model):
-    config = MODEL_SOURCE_TO_DEFAULT_IMPORT_CONFIG_MAP.get(model.source_type)
-    if config is None:
-      raise ValueError(f"Unsupported model source type: {model.source_type}.")
+    def __str__(self):
+        return self.name
 
-    composite_id = unique_ids.hash_composite_id([model.id, config.id])
-    # Format: <model_name>(<import_config_name>)
-    name = f"{model}({config})"
-    return cls(composite_id=composite_id,
-               name=name,
-               model=model,
-               import_config=config)
+    @classmethod
+    def from_model(cls, model: common_definitions.Model):
+        config = MODEL_SOURCE_TO_DEFAULT_IMPORT_CONFIG_MAP.get(model.source_type)
+        if config is None:
+            raise ValueError(f"Unsupported model source type: {model.source_type}.")
+
+        composite_id = unique_ids.hash_composite_id([model.id, config.id])
+        # Format: <model_name>(<import_config_name>)
+        name = f"{model}({config})"
+        return cls(
+            composite_id=composite_id, name=name, model=model, import_config=config
+        )
 
 
 # Variable in flags to be replaced with module dir path. The whole path should
@@ -228,221 +246,252 @@
 MODULE_DIR_VARIABLE = r"${MODULE_DIR}"
 
 
-@serialization.serializable(type_key="iree_module_generation_configs",
-                            id_field="composite_id")
+@serialization.serializable(
+    type_key="iree_module_generation_configs", id_field="composite_id"
+)
 @dataclass(frozen=True)
 class ModuleGenerationConfig(object):
-  """Describes a compile target to generate the module."""
-  composite_id: str
-  name: str
-  tags: List[str]
-  imported_model: ImportedModel
-  compile_config: CompileConfig
-  # Full list of flags to compile with, derived from sub-components, with
-  # unmaterialized placeholders. Allows the compile flags to be persisted and
-  # decouple from the generation code. Also serves as useful information in the
-  # serialized JSON.
-  compile_flags: List[str]
+    """Describes a compile target to generate the module."""
 
-  def __str__(self):
-    return self.name
+    composite_id: str
+    name: str
+    tags: List[str]
+    imported_model: ImportedModel
+    compile_config: CompileConfig
+    # Full list of flags to compile with, derived from sub-components, with
+    # unmaterialized placeholders. Allows the compile flags to be persisted and
+    # decouple from the generation code. Also serves as useful information in the
+    # serialized JSON.
+    compile_flags: List[str]
 
-  def materialize_compile_flags(self, module_dir_path: pathlib.PurePath):
-    """Materialize flags with dependent values."""
+    def __str__(self):
+        return self.name
 
-    def _replace_module_dir_placeholder(value: str) -> str:
-      """Replaces ${MODULE_DIR} in a POSIX path and returns the
-      platform-dependent path string.
-      """
-      parts = pathlib.PurePosixPath(value).parts
-      if MODULE_DIR_VARIABLE not in parts:
-        return value
-      if parts[0] != MODULE_DIR_VARIABLE:
-        raise ValueError(
-            f"'{MODULE_DIR_VARIABLE}' needs to be the head of flag value"
-            f" if present, but got '{value}'.")
-      # Properly construct the platform-dependent path.
-      return str(module_dir_path.joinpath(*parts[1:]))
+    def materialize_compile_flags(self, module_dir_path: pathlib.PurePath):
+        """Materialize flags with dependent values."""
 
-    return utils.transform_flags(flags=self.compile_flags,
-                                 map_funcs=[_replace_module_dir_placeholder])
+        def _replace_module_dir_placeholder(value: str) -> str:
+            """Replaces ${MODULE_DIR} in a POSIX path and returns the
+            platform-dependent path string.
+            """
+            parts = pathlib.PurePosixPath(value).parts
+            if MODULE_DIR_VARIABLE not in parts:
+                return value
+            if parts[0] != MODULE_DIR_VARIABLE:
+                raise ValueError(
+                    f"'{MODULE_DIR_VARIABLE}' needs to be the head of flag value"
+                    f" if present, but got '{value}'."
+                )
+            # Properly construct the platform-dependent path.
+            return str(module_dir_path.joinpath(*parts[1:]))
 
-  @classmethod
-  def build(cls,
-            imported_model: ImportedModel,
-            compile_config: CompileConfig,
-            tags: Sequence[str] = ()):
-    composite_id = unique_ids.hash_composite_id(
-        [imported_model.composite_id, compile_config.id])
-    # Format: <imported_model_name> <compile_config_name>
-    name = f"{imported_model} {compile_config}"
-    compile_flags = _generate_compile_flags(
-        compile_config, imported_model.import_config.dialect_type)
-    return cls(composite_id=composite_id,
-               name=name,
-               tags=list(tags),
-               imported_model=imported_model,
-               compile_config=compile_config,
-               compile_flags=compile_flags)
+        return utils.transform_flags(
+            flags=self.compile_flags, map_funcs=[_replace_module_dir_placeholder]
+        )
+
+    @classmethod
+    def build(
+        cls,
+        imported_model: ImportedModel,
+        compile_config: CompileConfig,
+        tags: Sequence[str] = (),
+    ):
+        composite_id = unique_ids.hash_composite_id(
+            [imported_model.composite_id, compile_config.id]
+        )
+        # Format: <imported_model_name> <compile_config_name>
+        name = f"{imported_model} {compile_config}"
+        compile_flags = _generate_compile_flags(
+            compile_config, imported_model.import_config.dialect_type
+        )
+        return cls(
+            composite_id=composite_id,
+            name=name,
+            tags=list(tags),
+            imported_model=imported_model,
+            compile_config=compile_config,
+            compile_flags=compile_flags,
+        )
 
 
 class E2EModelRunTool(Enum):
-  """Tool to run a module."""
-  IREE_BENCHMARK_MODULE = "iree-benchmark-module"
+    """Tool to run a module."""
+
+    IREE_BENCHMARK_MODULE = "iree-benchmark-module"
 
 
-@serialization.serializable(type_key="iree_e2e_model_run_configs",
-                            id_field="composite_id")
+@serialization.serializable(
+    type_key="iree_e2e_model_run_configs", id_field="composite_id"
+)
 @dataclass(frozen=True)
 class E2EModelRunConfig(object):
-  """Describes an e2e run."""
-  composite_id: str
-  name: str
-  tags: List[str]
-  module_generation_config: ModuleGenerationConfig
-  module_execution_config: ModuleExecutionConfig
-  target_device_spec: common_definitions.DeviceSpec
-  input_data: common_definitions.ModelInputData
-  # Full list of flags to run with, derived from sub-components, with
-  # unmaterialized placeholders. Allows the run flags to be persisted and
-  # decouple from the generation code. Also serves as useful information in the
-  # serialized JSON.
-  run_flags: List[str]
-  tool: E2EModelRunTool
+    """Describes an e2e run."""
 
-  def __str__(self):
-    return self.name
+    composite_id: str
+    name: str
+    tags: List[str]
+    module_generation_config: ModuleGenerationConfig
+    module_execution_config: ModuleExecutionConfig
+    target_device_spec: common_definitions.DeviceSpec
+    input_data: common_definitions.ModelInputData
+    # Full list of flags to run with, derived from sub-components, with
+    # unmaterialized placeholders. Allows the run flags to be persisted and
+    # decouple from the generation code. Also serves as useful information in the
+    # serialized JSON.
+    run_flags: List[str]
+    tool: E2EModelRunTool
 
-  def materialize_run_flags(self, gpu_id: str = "0"):
-    """Materialize flags with dependent values."""
-    return utils.substitute_flag_vars(flags=self.run_flags, GPU_ID=gpu_id)
+    def __str__(self):
+        return self.name
 
-  @classmethod
-  def build(cls,
-            module_generation_config: ModuleGenerationConfig,
-            module_execution_config: ModuleExecutionConfig,
-            target_device_spec: common_definitions.DeviceSpec,
-            input_data: common_definitions.ModelInputData,
-            tool: E2EModelRunTool,
-            tags: Optional[Sequence[str]] = None):
-    composite_id = unique_ids.hash_composite_id([
-        module_generation_config.composite_id, module_execution_config.id,
-        target_device_spec.id, input_data.id
-    ])
-    # Format: <module_generation_config_name> <module_execution_config_name> with <input_data_name> @ <target_device_spec_name>
-    name = f"{module_generation_config} {module_execution_config} with {input_data} @ {target_device_spec}"
-    run_flags = generate_run_flags(
-        imported_model=module_generation_config.imported_model,
-        input_data=input_data,
-        module_execution_config=module_execution_config,
-        gpu_id=r"${GPU_ID}")
-    tags_list = [] if tags is None else list(tags)
-    return cls(composite_id=composite_id,
-               name=name,
-               tags=tags_list,
-               module_generation_config=module_generation_config,
-               module_execution_config=module_execution_config,
-               target_device_spec=target_device_spec,
-               input_data=input_data,
-               run_flags=run_flags,
-               tool=tool)
+    def materialize_run_flags(self, gpu_id: str = "0"):
+        """Materialize flags with dependent values."""
+        return utils.substitute_flag_vars(flags=self.run_flags, GPU_ID=gpu_id)
+
+    @classmethod
+    def build(
+        cls,
+        module_generation_config: ModuleGenerationConfig,
+        module_execution_config: ModuleExecutionConfig,
+        target_device_spec: common_definitions.DeviceSpec,
+        input_data: common_definitions.ModelInputData,
+        tool: E2EModelRunTool,
+        tags: Optional[Sequence[str]] = None,
+    ):
+        composite_id = unique_ids.hash_composite_id(
+            [
+                module_generation_config.composite_id,
+                module_execution_config.id,
+                target_device_spec.id,
+                input_data.id,
+            ]
+        )
+        # Format: <module_generation_config_name> <module_execution_config_name> with <input_data_name> @ <target_device_spec_name>
+        name = f"{module_generation_config} {module_execution_config} with {input_data} @ {target_device_spec}"
+        run_flags = generate_run_flags(
+            imported_model=module_generation_config.imported_model,
+            input_data=input_data,
+            module_execution_config=module_execution_config,
+            gpu_id=r"${GPU_ID}",
+        )
+        tags_list = [] if tags is None else list(tags)
+        return cls(
+            composite_id=composite_id,
+            name=name,
+            tags=tags_list,
+            module_generation_config=module_generation_config,
+            module_execution_config=module_execution_config,
+            target_device_spec=target_device_spec,
+            input_data=input_data,
+            run_flags=run_flags,
+            tool=tool,
+        )
 
 
-def generate_run_flags(imported_model: ImportedModel,
-                       input_data: common_definitions.ModelInputData,
-                       module_execution_config: ModuleExecutionConfig,
-                       gpu_id: str = "0",
-                       with_driver: bool = True) -> List[str]:
-  """Returns the IREE run module flags of the input model and execution config.
-  Args:
-    model: source model.
-    input_data: model input data.
-    module_execution_config: execution config.
-    gpu_id: target gpu id, if runs on GPUs.
-    with_driver: populate the driver flags if true. False can be used for
-      generating flags for some CMake rules with a separate DRIVER arg.
-  Returns:
-    List of flags.
-  """
+def generate_run_flags(
+    imported_model: ImportedModel,
+    input_data: common_definitions.ModelInputData,
+    module_execution_config: ModuleExecutionConfig,
+    gpu_id: str = "0",
+    with_driver: bool = True,
+) -> List[str]:
+    """Returns the IREE run module flags of the input model and execution config.
+    Args:
+      model: source model.
+      input_data: model input data.
+      module_execution_config: execution config.
+      gpu_id: target gpu id, if runs on GPUs.
+      with_driver: populate the driver flags if true. False can be used for
+        generating flags for some CMake rules with a separate DRIVER arg.
+    Returns:
+      List of flags.
+    """
 
-  model = imported_model.model
-  run_flags = [f"--function={model.entry_function}"]
-  if input_data != common_definitions.ZEROS_MODEL_INPUT_DATA:
-    raise ValueError("Currently only support all-zeros data.")
-  run_flags += [f"--input={input_type}=0" for input_type in model.input_types]
+    model = imported_model.model
+    run_flags = [f"--function={model.entry_function}"]
+    if input_data != common_definitions.ZEROS_MODEL_INPUT_DATA:
+        raise ValueError("Currently only support all-zeros data.")
+    run_flags += [f"--input={input_type}=0" for input_type in model.input_types]
 
-  exec_config = module_execution_config
-  run_flags += exec_config.extra_flags.copy()
-  if with_driver:
-    driver = exec_config.driver
-    if driver == RuntimeDriver.CUDA:
-      run_flags.append(f"--device=cuda://{gpu_id}")
-    else:
-      run_flags.append(f"--device={driver.value}")
+    exec_config = module_execution_config
+    run_flags += exec_config.extra_flags.copy()
+    if with_driver:
+        driver = exec_config.driver
+        if driver == RuntimeDriver.CUDA:
+            run_flags.append(f"--device=cuda://{gpu_id}")
+        else:
+            run_flags.append(f"--device={driver.value}")
 
-  return run_flags
+    return run_flags
 
 
-def _generate_compile_flags(compile_config: CompileConfig,
-                            dialect_type: MLIRDialectType) -> List[str]:
-  if len(compile_config.compile_targets) != 1:
-    raise ValueError(f"Only one compile target is supported. Got:"
-                     f" {compile_config.compile_targets}")
+def _generate_compile_flags(
+    compile_config: CompileConfig, dialect_type: MLIRDialectType
+) -> List[str]:
+    if len(compile_config.compile_targets) != 1:
+        raise ValueError(
+            f"Only one compile target is supported. Got:"
+            f" {compile_config.compile_targets}"
+        )
 
-  compile_target = compile_config.compile_targets[0]
-  flags = [
-      f"--iree-hal-target-backends={compile_target.target_backend.value}",
-      f"--iree-input-type={dialect_type.value}"
-  ]
-  flags += _generate_compile_target_flags(compile_target)
-  flags += compile_config.extra_flags
-  return flags
+    compile_target = compile_config.compile_targets[0]
+    flags = [
+        f"--iree-hal-target-backends={compile_target.target_backend.value}",
+        f"--iree-input-type={dialect_type.value}",
+    ]
+    flags += _generate_compile_target_flags(compile_target)
+    flags += compile_config.extra_flags
+    return flags
 
 
 def _generate_compile_target_flags(target: CompileTarget) -> List[str]:
-  arch_info = target.target_architecture
-  if target.target_backend == TargetBackend.VULKAN_SPIRV:
-    gpu_arch = arch_info.microarchitecture if len(
-        arch_info.microarchitecture) != 0 else arch_info.architecture
-    return [
-        f"--iree-vulkan-target-triple={gpu_arch}-unknown-{target.target_abi.value}",
-    ]
+    arch_info = target.target_architecture
+    if target.target_backend == TargetBackend.VULKAN_SPIRV:
+        gpu_arch = (
+            arch_info.microarchitecture
+            if len(arch_info.microarchitecture) != 0
+            else arch_info.architecture
+        )
+        return [
+            f"--iree-vulkan-target-triple={gpu_arch}-unknown-{target.target_abi.value}",
+        ]
 
-  if arch_info.architecture == "x86_64":
-    flags = [
-        f"--iree-llvmcpu-target-triple=x86_64-unknown-{target.target_abi.value}",
-        f"--iree-llvmcpu-target-cpu={arch_info.microarchitecture.lower()}"
-    ]
-  elif arch_info.architecture == "riscv_64":
-    flags = [
-        f"--iree-llvmcpu-target-triple=riscv64-pc-{target.target_abi.value}",
-        "--iree-llvmcpu-target-cpu=generic-rv64",
-        "--iree-llvmcpu-target-abi=lp64d",
-        "--iree-llvmcpu-target-cpu-features=+m,+a,+f,+d,+zvl512b,+v",
-        "--riscv-v-fixed-length-vector-lmul-max=8"
-    ]
-  elif arch_info.architecture == "riscv_32":
-    # TODO(llvm-project/60463): Replace 'zve32f' with 'zve32x'.
-    flags = [
-        f"--iree-llvmcpu-target-triple=riscv32-pc-{target.target_abi.value}",
-        "--iree-llvmcpu-target-cpu=generic-rv32",
-        "--iree-llvmcpu-target-abi=ilp32",
-        "--iree-llvmcpu-target-cpu-features=+m,+a,+f,+zvl512b,+zve32f",
-        "--riscv-v-fixed-length-vector-lmul-max=8"
-    ]
-  elif arch_info.architecture == "armv8.2-a":
-    flags = [
-        f"--iree-llvmcpu-target-triple=aarch64-none-{target.target_abi.value}",
-    ]
-  elif arch_info.architecture == "cuda":
-    if target.target_abi != TargetABI.LINUX_GNU:
-      raise ValueError(
-          f"Unsupported target ABI for CUDA backend: `{target.target_abi}`")
-    flags = [
-        f"--iree-hal-cuda-llvm-target-arch={arch_info.microarchitecture}",
-    ]
-  elif arch_info.architecture == "vmvx":
-    flags = []
-  else:
-    raise ValueError(f"Unsupported architecture: '{arch_info.architecture}'")
-  return flags
+    if arch_info.architecture == "x86_64":
+        flags = [
+            f"--iree-llvmcpu-target-triple=x86_64-unknown-{target.target_abi.value}",
+            f"--iree-llvmcpu-target-cpu={arch_info.microarchitecture.lower()}",
+        ]
+    elif arch_info.architecture == "riscv_64":
+        flags = [
+            f"--iree-llvmcpu-target-triple=riscv64-pc-{target.target_abi.value}",
+            "--iree-llvmcpu-target-cpu=generic-rv64",
+            "--iree-llvmcpu-target-abi=lp64d",
+            "--iree-llvmcpu-target-cpu-features=+m,+a,+f,+d,+zvl512b,+v",
+            "--riscv-v-fixed-length-vector-lmul-max=8",
+        ]
+    elif arch_info.architecture == "riscv_32":
+        # TODO(llvm-project/60463): Replace 'zve32f' with 'zve32x'.
+        flags = [
+            f"--iree-llvmcpu-target-triple=riscv32-pc-{target.target_abi.value}",
+            "--iree-llvmcpu-target-cpu=generic-rv32",
+            "--iree-llvmcpu-target-abi=ilp32",
+            "--iree-llvmcpu-target-cpu-features=+m,+a,+f,+zvl512b,+zve32f",
+            "--riscv-v-fixed-length-vector-lmul-max=8",
+        ]
+    elif arch_info.architecture == "armv8.2-a":
+        flags = [
+            f"--iree-llvmcpu-target-triple=aarch64-none-{target.target_abi.value}",
+        ]
+    elif arch_info.architecture == "cuda":
+        if target.target_abi != TargetABI.LINUX_GNU:
+            raise ValueError(
+                f"Unsupported target ABI for CUDA backend: `{target.target_abi}`"
+            )
+        flags = [
+            f"--iree-hal-cuda-llvm-target-arch={arch_info.microarchitecture}",
+        ]
+    elif arch_info.architecture == "vmvx":
+        flags = []
+    else:
+        raise ValueError(f"Unsupported architecture: '{arch_info.architecture}'")
+    return flags
diff --git a/build_tools/python/e2e_test_framework/definitions/iree_definitions_test.py b/build_tools/python/e2e_test_framework/definitions/iree_definitions_test.py
index 02d1987..585c7e5 100644
--- a/build_tools/python/e2e_test_framework/definitions/iree_definitions_test.py
+++ b/build_tools/python/e2e_test_framework/definitions/iree_definitions_test.py
@@ -11,147 +11,180 @@
 
 
 class IreeDefinitionsTest(unittest.TestCase):
+    def test_generate_run_flags(self):
+        imported_model = iree_definitions.ImportedModel.from_model(
+            common_definitions.Model(
+                id="1234",
+                name="tflite_m",
+                tags=[],
+                source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+                source_url="https://example.com/xyz.tflite",
+                entry_function="main",
+                input_types=["1xf32", "2x2xf32"],
+            )
+        )
+        execution_config = iree_definitions.ModuleExecutionConfig.build(
+            id="123",
+            tags=["test"],
+            loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+            driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
+            extra_flags=["--task=10"],
+        )
 
-  def test_generate_run_flags(self):
-    imported_model = iree_definitions.ImportedModel.from_model(
-        common_definitions.Model(
-            id="1234",
-            name="tflite_m",
-            tags=[],
-            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-            source_url="https://example.com/xyz.tflite",
-            entry_function="main",
-            input_types=["1xf32", "2x2xf32"]))
-    execution_config = iree_definitions.ModuleExecutionConfig.build(
-        id="123",
-        tags=["test"],
-        loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-        driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
-        extra_flags=["--task=10"])
+        flags = iree_definitions.generate_run_flags(
+            imported_model=imported_model,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            module_execution_config=execution_config,
+        )
 
-    flags = iree_definitions.generate_run_flags(
-        imported_model=imported_model,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        module_execution_config=execution_config)
+        self.assertEqual(
+            flags,
+            [
+                "--function=main",
+                "--input=1xf32=0",
+                "--input=2x2xf32=0",
+                "--task=10",
+                "--device=local-task",
+            ],
+        )
 
-    self.assertEqual(flags, [
-        "--function=main", "--input=1xf32=0", "--input=2x2xf32=0", "--task=10",
-        "--device=local-task"
-    ])
+    def test_generate_run_flags_with_cuda(self):
+        imported_model = iree_definitions.ImportedModel.from_model(
+            common_definitions.Model(
+                id="1234",
+                name="tflite_m",
+                tags=[],
+                source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+                source_url="https://example.com/xyz.tflite",
+                entry_function="main",
+                input_types=["1xf32"],
+            )
+        )
+        execution_config = iree_definitions.ModuleExecutionConfig.build(
+            id="123",
+            tags=["test"],
+            loader=iree_definitions.RuntimeLoader.NONE,
+            driver=iree_definitions.RuntimeDriver.CUDA,
+            extra_flags=[],
+        )
 
-  def test_generate_run_flags_with_cuda(self):
-    imported_model = iree_definitions.ImportedModel.from_model(
-        common_definitions.Model(
-            id="1234",
-            name="tflite_m",
-            tags=[],
-            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-            source_url="https://example.com/xyz.tflite",
-            entry_function="main",
-            input_types=["1xf32"]))
-    execution_config = iree_definitions.ModuleExecutionConfig.build(
-        id="123",
-        tags=["test"],
-        loader=iree_definitions.RuntimeLoader.NONE,
-        driver=iree_definitions.RuntimeDriver.CUDA,
-        extra_flags=[])
+        flags = iree_definitions.generate_run_flags(
+            imported_model=imported_model,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            module_execution_config=execution_config,
+            gpu_id="3",
+        )
 
-    flags = iree_definitions.generate_run_flags(
-        imported_model=imported_model,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        module_execution_config=execution_config,
-        gpu_id="3")
+        self.assertEqual(
+            flags, ["--function=main", "--input=1xf32=0", "--device=cuda://3"]
+        )
 
-    self.assertEqual(
-        flags, ["--function=main", "--input=1xf32=0", "--device=cuda://3"])
+    def test_generate_run_flags_without_driver(self):
+        imported_model = iree_definitions.ImportedModel.from_model(
+            common_definitions.Model(
+                id="1234",
+                name="tflite_m",
+                tags=[],
+                source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+                source_url="https://example.com/xyz.tflite",
+                entry_function="main",
+                input_types=["1xf32"],
+            )
+        )
+        execution_config = iree_definitions.ModuleExecutionConfig.build(
+            id="123",
+            tags=["test"],
+            loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
+            driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
+            extra_flags=["--task=10"],
+        )
 
-  def test_generate_run_flags_without_driver(self):
-    imported_model = iree_definitions.ImportedModel.from_model(
-        common_definitions.Model(
-            id="1234",
-            name="tflite_m",
-            tags=[],
-            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-            source_url="https://example.com/xyz.tflite",
-            entry_function="main",
-            input_types=["1xf32"]))
-    execution_config = iree_definitions.ModuleExecutionConfig.build(
-        id="123",
-        tags=["test"],
-        loader=iree_definitions.RuntimeLoader.EMBEDDED_ELF,
-        driver=iree_definitions.RuntimeDriver.LOCAL_TASK,
-        extra_flags=["--task=10"])
+        flags = iree_definitions.generate_run_flags(
+            imported_model=imported_model,
+            input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
+            module_execution_config=execution_config,
+            with_driver=False,
+        )
 
-    flags = iree_definitions.generate_run_flags(
-        imported_model=imported_model,
-        input_data=common_definitions.ZEROS_MODEL_INPUT_DATA,
-        module_execution_config=execution_config,
-        with_driver=False)
-
-    self.assertEqual(flags, ["--function=main", "--input=1xf32=0", "--task=10"])
+        self.assertEqual(flags, ["--function=main", "--input=1xf32=0", "--task=10"])
 
 
 class ModuleGenerationConfigTest(unittest.TestCase):
+    def test_materialize_compile_flags(self):
+        imported_model = iree_definitions.ImportedModel.from_model(
+            common_definitions.Model(
+                id="1234",
+                name="tflite_m",
+                tags=[],
+                source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+                source_url="https://example.com/xyz.tflite",
+                entry_function="main",
+                input_types=["1xf32"],
+            )
+        )
+        compile_target = iree_definitions.CompileTarget(
+            target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+            target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            target_abi=iree_definitions.TargetABI.LINUX_GNU,
+        )
+        compile_config = iree_definitions.CompileConfig(
+            id="compile_config_a",
+            name="compile_config_a",
+            tags=["test"],
+            compile_targets=[compile_target],
+            extra_flags=[r"--test=${MODULE_DIR}/test.json"],
+        )
+        gen_config = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model, compile_config=compile_config
+        )
 
-  def test_materialize_compile_flags(self):
-    imported_model = iree_definitions.ImportedModel.from_model(
-        common_definitions.Model(
-            id="1234",
-            name="tflite_m",
-            tags=[],
-            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-            source_url="https://example.com/xyz.tflite",
-            entry_function="main",
-            input_types=["1xf32"]))
-    compile_target = iree_definitions.CompileTarget(
-        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-        target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        target_abi=iree_definitions.TargetABI.LINUX_GNU)
-    compile_config = iree_definitions.CompileConfig(
-        id="compile_config_a",
-        name="compile_config_a",
-        tags=["test"],
-        compile_targets=[compile_target],
-        extra_flags=[r"--test=${MODULE_DIR}/test.json"])
-    gen_config = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model, compile_config=compile_config)
+        flags = gen_config.materialize_compile_flags(
+            module_dir_path=pathlib.Path("abc")
+        )
 
-    flags = gen_config.materialize_compile_flags(
-        module_dir_path=pathlib.Path("abc"))
+        expected_path = pathlib.Path("abc", "test.json")
+        self.assertIn(f"--test={expected_path}", flags)
 
-    expected_path = pathlib.Path("abc", "test.json")
-    self.assertIn(f"--test={expected_path}", flags)
+    def test_materialize_compile_flags_invalid_module_dir_position(self):
+        imported_model = iree_definitions.ImportedModel.from_model(
+            common_definitions.Model(
+                id="1234",
+                name="tflite_m",
+                tags=[],
+                source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
+                source_url="https://example.com/xyz.tflite",
+                entry_function="main",
+                input_types=["1xf32"],
+            )
+        )
+        compile_target = iree_definitions.CompileTarget(
+            target_backend=iree_definitions.TargetBackend.LLVM_CPU,
+            target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
+            target_abi=iree_definitions.TargetABI.LINUX_GNU,
+        )
+        compile_config = iree_definitions.CompileConfig(
+            id="compile_config_a",
+            name="compile_config_a",
+            tags=["test"],
+            compile_targets=[compile_target],
+            extra_flags=[r"--test=prefix/${MODULE_DIR}/test.json"],
+        )
+        gen_config = iree_definitions.ModuleGenerationConfig.build(
+            imported_model=imported_model, compile_config=compile_config
+        )
+        expected_error = (
+            r"^'\${MODULE_DIR}' needs to be the head of flag value if present,"
+            r" but got 'prefix/\${MODULE_DIR}/test.json'.$"
+        )
 
-  def test_materialize_compile_flags_invalid_module_dir_position(self):
-    imported_model = iree_definitions.ImportedModel.from_model(
-        common_definitions.Model(
-            id="1234",
-            name="tflite_m",
-            tags=[],
-            source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
-            source_url="https://example.com/xyz.tflite",
-            entry_function="main",
-            input_types=["1xf32"]))
-    compile_target = iree_definitions.CompileTarget(
-        target_backend=iree_definitions.TargetBackend.LLVM_CPU,
-        target_architecture=common_definitions.DeviceArchitecture.RV64_GENERIC,
-        target_abi=iree_definitions.TargetABI.LINUX_GNU)
-    compile_config = iree_definitions.CompileConfig(
-        id="compile_config_a",
-        name="compile_config_a",
-        tags=["test"],
-        compile_targets=[compile_target],
-        extra_flags=[r"--test=prefix/${MODULE_DIR}/test.json"])
-    gen_config = iree_definitions.ModuleGenerationConfig.build(
-        imported_model=imported_model, compile_config=compile_config)
-    expected_error = (
-        r"^'\${MODULE_DIR}' needs to be the head of flag value if present,"
-        r" but got 'prefix/\${MODULE_DIR}/test.json'.$")
-
-    self.assertRaisesRegex(
-        ValueError, expected_error, lambda: gen_config.
-        materialize_compile_flags(module_dir_path=pathlib.Path("abc")))
+        self.assertRaisesRegex(
+            ValueError,
+            expected_error,
+            lambda: gen_config.materialize_compile_flags(
+                module_dir_path=pathlib.Path("abc")
+            ),
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_test_framework/definitions/utils.py b/build_tools/python/e2e_test_framework/definitions/utils.py
index a37e561..5079cc6 100644
--- a/build_tools/python/e2e_test_framework/definitions/utils.py
+++ b/build_tools/python/e2e_test_framework/definitions/utils.py
@@ -11,41 +11,42 @@
 MAX_SUBSTITUTION_ITERATIONS = 10
 
 
-def transform_flags(flags: Sequence[str],
-                    map_funcs: Sequence[Callable[[str], str]]) -> List[str]:
-  """Call map functions to transform flag values, e.g., replace placeholders
-  that were unknown when the flag was constructed.
+def transform_flags(
+    flags: Sequence[str], map_funcs: Sequence[Callable[[str], str]]
+) -> List[str]:
+    """Call map functions to transform flag values, e.g., replace placeholders
+    that were unknown when the flag was constructed.
 
-  It parses and extracts the flag values from both keyword and positional flags,
-  transforms them, and returns the updated flags with transformed values.
+    It parses and extracts the flag values from both keyword and positional flags,
+    transforms them, and returns the updated flags with transformed values.
 
-  Each flag value is transformed only once by each map function in order.
-  
-  Args:
-    flags: list of flags.
-    map_funcs: list of map functions to map flag value.
-  Returns:
-    List of transformed flags.
-  """
+    Each flag value is transformed only once by each map function in order.
 
-  transformed_flags = []
-  for flag in flags:
-    keyword, separator, value = ("", "", flag)
-    if flag.startswith("-"):
-      keyword, separator, value = flag.partition("=")
+    Args:
+      flags: list of flags.
+      map_funcs: list of map functions to map flag value.
+    Returns:
+      List of transformed flags.
+    """
 
-    if value:
-      for map_func in map_funcs:
-        value = map_func(value)
+    transformed_flags = []
+    for flag in flags:
+        keyword, separator, value = ("", "", flag)
+        if flag.startswith("-"):
+            keyword, separator, value = flag.partition("=")
 
-    transformed_flags.append(f"{keyword}{separator}{value}")
+        if value:
+            for map_func in map_funcs:
+                value = map_func(value)
 
-  return transformed_flags
+        transformed_flags.append(f"{keyword}{separator}{value}")
+
+    return transformed_flags
 
 
 def substitute_flag_vars(flags: Sequence[str], **mapping: Any) -> List[str]:
-  """Sugar of transform_flags to substitute variables in string.Template format.
-  """
-  return transform_flags(
-      flags=flags,
-      map_funcs=[lambda value: string.Template(value).substitute(mapping)])
+    """Sugar of transform_flags to substitute variables in string.Template format."""
+    return transform_flags(
+        flags=flags,
+        map_funcs=[lambda value: string.Template(value).substitute(mapping)],
+    )
diff --git a/build_tools/python/e2e_test_framework/definitions/utils_test.py b/build_tools/python/e2e_test_framework/definitions/utils_test.py
index 700687e..7beae7e 100644
--- a/build_tools/python/e2e_test_framework/definitions/utils_test.py
+++ b/build_tools/python/e2e_test_framework/definitions/utils_test.py
@@ -10,44 +10,44 @@
 
 
 class UtilsTest(unittest.TestCase):
+    def test_transform_flags(self):
+        flags = utils.transform_flags(
+            flags=[
+                r"${HOLDER_A} ${HOLDER_B}",
+                r"--key=${HOLDER_A}",
+                "--no-value-key",
+                r"--filter=x=${HOLDER_A}",
+            ],
+            map_funcs=[
+                lambda value: value.replace(r"${HOLDER_A}", "val_a"),
+                lambda value: value.replace(r"${HOLDER_B}", "val_b"),
+            ],
+        )
 
-  def test_transform_flags(self):
-    flags = utils.transform_flags(
-        flags=[
-            r"${HOLDER_A} ${HOLDER_B}", r"--key=${HOLDER_A}", "--no-value-key",
-            r"--filter=x=${HOLDER_A}"
-        ],
-        map_funcs=[
-            lambda value: value.replace(r"${HOLDER_A}", "val_a"),
-            lambda value: value.replace(r"${HOLDER_B}", "val_b")
-        ])
+        self.assertEquals(
+            flags, ["val_a val_b", "--key=val_a", "--no-value-key", "--filter=x=val_a"]
+        )
 
-    self.assertEquals(
-        flags,
-        ["val_a val_b", "--key=val_a", "--no-value-key", "--filter=x=val_a"])
+    def test_substitute_flag_vars(self):
+        raw_flags = [
+            r"${HOLDER_A}",
+            r"--key=${HOLDER_B}",
+        ]
 
-  def test_substitute_flag_vars(self):
-    raw_flags = [
-        r"${HOLDER_A}",
-        r"--key=${HOLDER_B}",
-    ]
+        flags = utils.substitute_flag_vars(flags=raw_flags, HOLDER_A=1, HOLDER_B="b")
 
-    flags = utils.substitute_flag_vars(flags=raw_flags,
-                                       HOLDER_A=1,
-                                       HOLDER_B="b")
+        self.assertEquals(flags, ["1", "--key=b"])
 
-    self.assertEquals(flags, ["1", "--key=b"])
+    def test_substitute_flag_vars_missing_variable(self):
+        raw_flags = [
+            r"${HOLDER_A}",
+            r"--key=${HOLDER_B}",
+        ]
 
-  def test_substitute_flag_vars_missing_variable(self):
-    raw_flags = [
-        r"${HOLDER_A}",
-        r"--key=${HOLDER_B}",
-    ]
-
-    self.assertRaises(
-        KeyError,
-        lambda: utils.substitute_flag_vars(flags=raw_flags, HOLDER_A=1))
+        self.assertRaises(
+            KeyError, lambda: utils.substitute_flag_vars(flags=raw_flags, HOLDER_A=1)
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_test_framework/device_specs/device_collections.py b/build_tools/python/e2e_test_framework/device_specs/device_collections.py
index 9ed4629..250884b 100644
--- a/build_tools/python/e2e_test_framework/device_specs/device_collections.py
+++ b/build_tools/python/e2e_test_framework/device_specs/device_collections.py
@@ -7,42 +7,47 @@
 
 from typing import List, Sequence, Set
 from e2e_test_framework.definitions import common_definitions
-from e2e_test_framework.device_specs import gcp_specs, pixel_4_specs, pixel_6_pro_specs, moto_edge_x30_specs
+from e2e_test_framework.device_specs import (
+    gcp_specs,
+    pixel_4_specs,
+    pixel_6_pro_specs,
+    moto_edge_x30_specs,
+)
 
 
 class DeviceCollection(object):
-  """Class to collect and query device specs."""
+    """Class to collect and query device specs."""
 
-  def __init__(self, device_specs: Sequence[common_definitions.DeviceSpec]):
-    self.device_specs = device_specs
+    def __init__(self, device_specs: Sequence[common_definitions.DeviceSpec]):
+        self.device_specs = device_specs
 
-  def query_device_specs(
-      self,
-      architecture: common_definitions.DeviceArchitecture,
-      host_environment: common_definitions.HostEnvironment,
-      device_parameters: Set[str] = set()
-  ) -> List[common_definitions.DeviceSpec]:
-    """Query the device specs.
+    def query_device_specs(
+        self,
+        architecture: common_definitions.DeviceArchitecture,
+        host_environment: common_definitions.HostEnvironment,
+        device_parameters: Set[str] = set(),
+    ) -> List[common_definitions.DeviceSpec]:
+        """Query the device specs.
 
-    Args:
-      architecture: device architecture to match.
-      platform: device platform to match.
-      device_parameters: parameters that devices need to have.
-    Returns:
-      List of matched device specs.
-    """
+        Args:
+          architecture: device architecture to match.
+          platform: device platform to match.
+          device_parameters: parameters that devices need to have.
+        Returns:
+          List of matched device specs.
+        """
 
-    matched_device_specs = []
-    for device_spec in self.device_specs:
-      if device_spec.architecture != architecture:
-        continue
-      if device_spec.host_environment != host_environment:
-        continue
-      if not device_parameters.issubset(device_spec.device_parameters):
-        continue
-      matched_device_specs.append(device_spec)
+        matched_device_specs = []
+        for device_spec in self.device_specs:
+            if device_spec.architecture != architecture:
+                continue
+            if device_spec.host_environment != host_environment:
+                continue
+            if not device_parameters.issubset(device_spec.device_parameters):
+                continue
+            matched_device_specs.append(device_spec)
 
-    return matched_device_specs
+        return matched_device_specs
 
 
 ALL_DEVICE_SPECS = [
diff --git a/build_tools/python/e2e_test_framework/device_specs/device_collections_test.py b/build_tools/python/e2e_test_framework/device_specs/device_collections_test.py
index f441a32..f7154be 100644
--- a/build_tools/python/e2e_test_framework/device_specs/device_collections_test.py
+++ b/build_tools/python/e2e_test_framework/device_specs/device_collections_test.py
@@ -10,68 +10,82 @@
 
 
 class DeviceCollectionTest(unittest.TestCase):
+    def test_query_device_specs(self):
+        linux_x86_device_spec = common_definitions.DeviceSpec.build(
+            id="linux_x86",
+            device_name="a",
+            architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+            tags=[],
+        )
+        android_x86_device_spec = common_definitions.DeviceSpec.build(
+            id="android_x86",
+            device_name="b",
+            architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            tags=[],
+        )
+        little_cores_device_spec = common_definitions.DeviceSpec.build(
+            id="android_little",
+            device_name="c",
+            architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            device_parameters=["little-cores"],
+            tags=[],
+        )
+        big_cores_device_spec = common_definitions.DeviceSpec.build(
+            id="android_big",
+            device_name="d",
+            architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            device_parameters=["big-cores"],
+            tags=[],
+        )
+        devices = device_collections.DeviceCollection(
+            device_specs=[
+                linux_x86_device_spec,
+                android_x86_device_spec,
+                little_cores_device_spec,
+                big_cores_device_spec,
+            ]
+        )
 
-  def test_query_device_specs(self):
-    linux_x86_device_spec = common_definitions.DeviceSpec.build(
-        id="linux_x86",
-        device_name="a",
-        architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
-        tags=[])
-    android_x86_device_spec = common_definitions.DeviceSpec.build(
-        id="android_x86",
-        device_name="b",
-        architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        tags=[])
-    little_cores_device_spec = common_definitions.DeviceSpec.build(
-        id="android_little",
-        device_name="c",
-        architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        device_parameters=["little-cores"],
-        tags=[])
-    big_cores_device_spec = common_definitions.DeviceSpec.build(
-        id="android_big",
-        device_name="d",
-        architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        device_parameters=["big-cores"],
-        tags=[])
-    devices = device_collections.DeviceCollection(device_specs=[
-        linux_x86_device_spec, android_x86_device_spec,
-        little_cores_device_spec, big_cores_device_spec
-    ])
+        linux_x86_devices = devices.query_device_specs(
+            architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+        )
+        android_x86_devices = devices.query_device_specs(
+            architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+        )
+        little_cores_devices = devices.query_device_specs(
+            architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            device_parameters={"little-cores"},
+        )
+        big_cores_devices = devices.query_device_specs(
+            architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+            device_parameters={"big-cores"},
+        )
+        all_arm_devices = devices.query_device_specs(
+            architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
+            host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
+        )
+        no_matched_device = devices.query_device_specs(
+            architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
+            host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
+        )
 
-    linux_x86_devices = devices.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64)
-    android_x86_devices = devices.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A)
-    little_cores_devices = devices.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        device_parameters={"little-cores"})
-    big_cores_devices = devices.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-        device_parameters={"big-cores"})
-    all_arm_devices = devices.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
-        host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A)
-    no_matched_device = devices.query_device_specs(
-        architecture=common_definitions.DeviceArchitecture.ARMV9_A_GENERIC,
-        host_environment=common_definitions.HostEnvironment.LINUX_X86_64)
-
-    self.assertEqual(linux_x86_devices, [linux_x86_device_spec])
-    self.assertEqual(android_x86_devices, [android_x86_device_spec])
-    self.assertEqual(little_cores_devices, [little_cores_device_spec])
-    self.assertEqual(big_cores_devices, [big_cores_device_spec])
-    self.assertEqual(all_arm_devices,
-                     [little_cores_device_spec, big_cores_device_spec])
-    self.assertEqual(no_matched_device, [])
+        self.assertEqual(linux_x86_devices, [linux_x86_device_spec])
+        self.assertEqual(android_x86_devices, [android_x86_device_spec])
+        self.assertEqual(little_cores_devices, [little_cores_device_spec])
+        self.assertEqual(big_cores_devices, [big_cores_device_spec])
+        self.assertEqual(
+            all_arm_devices, [little_cores_device_spec, big_cores_device_spec]
+        )
+        self.assertEqual(no_matched_device, [])
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_test_framework/device_specs/gcp_specs.py b/build_tools/python/e2e_test_framework/device_specs/gcp_specs.py
index 82b48ec..9ca92f8 100644
--- a/build_tools/python/e2e_test_framework/device_specs/gcp_specs.py
+++ b/build_tools/python/e2e_test_framework/device_specs/gcp_specs.py
@@ -15,11 +15,13 @@
     host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
     architecture=common_definitions.DeviceArchitecture.X86_64_CASCADELAKE,
     device_parameters=[device_parameters.ALL_CORES],
-    tags=["cpu"])
+    tags=["cpu"],
+)
 
 GCP_A2_HIGHGPU_1G = common_definitions.DeviceSpec.build(
     id=unique_ids.DEVICE_SPEC_GCP_A2_HIGHGPU_1G,
     device_name="a2-highgpu-1g",
     host_environment=common_definitions.HostEnvironment.LINUX_X86_64,
     architecture=common_definitions.DeviceArchitecture.NVIDIA_AMPERE,
-    tags=["gpu"])
+    tags=["gpu"],
+)
diff --git a/build_tools/python/e2e_test_framework/device_specs/moto_edge_x30_specs.py b/build_tools/python/e2e_test_framework/device_specs/moto_edge_x30_specs.py
index 68337f9..4a1f2d1 100644
--- a/build_tools/python/e2e_test_framework/device_specs/moto_edge_x30_specs.py
+++ b/build_tools/python/e2e_test_framework/device_specs/moto_edge_x30_specs.py
@@ -15,4 +15,5 @@
     device_name=DEVICE_NAME,
     architecture=common_definitions.DeviceArchitecture.QUALCOMM_ADRENO,
     host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-    tags=["gpu"])
+    tags=["gpu"],
+)
diff --git a/build_tools/python/e2e_test_framework/device_specs/pixel_4_specs.py b/build_tools/python/e2e_test_framework/device_specs/pixel_4_specs.py
index c9e6dc6..9cac9d2 100644
--- a/build_tools/python/e2e_test_framework/device_specs/pixel_4_specs.py
+++ b/build_tools/python/e2e_test_framework/device_specs/pixel_4_specs.py
@@ -17,11 +17,13 @@
     architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
     host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
     device_parameters=[device_parameters.ARM_BIG_CORES],
-    tags=["big-core"])
+    tags=["big-core"],
+)
 LITTLE_CORES = common_definitions.DeviceSpec.build(
     id=unique_ids.DEVICE_SPEC_MOBILE_PIXEL_4 + "-little-core",
     device_name=DEVICE_NAME,
     architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
     host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
     device_parameters=[device_parameters.ARM_LITTLE_CORES],
-    tags=["little-core"])
+    tags=["little-core"],
+)
diff --git a/build_tools/python/e2e_test_framework/device_specs/pixel_6_pro_specs.py b/build_tools/python/e2e_test_framework/device_specs/pixel_6_pro_specs.py
index bd1f176..26cba51 100644
--- a/build_tools/python/e2e_test_framework/device_specs/pixel_6_pro_specs.py
+++ b/build_tools/python/e2e_test_framework/device_specs/pixel_6_pro_specs.py
@@ -17,17 +17,20 @@
     architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
     host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
     device_parameters=[device_parameters.ARM_BIG_CORES],
-    tags=["big-core"])
+    tags=["big-core"],
+)
 LITTLE_CORES = common_definitions.DeviceSpec.build(
     id=unique_ids.DEVICE_SPEC_MOBILE_PIXEL_6_PRO + "-little-core",
     device_name=DEVICE_NAME,
     architecture=common_definitions.DeviceArchitecture.ARMV8_2_A_GENERIC,
     host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
     device_parameters=[device_parameters.ARM_LITTLE_CORES],
-    tags=["little-core"])
+    tags=["little-core"],
+)
 GPU = common_definitions.DeviceSpec.build(
     id=unique_ids.DEVICE_SPEC_MOBILE_PIXEL_6_PRO + "-gpu",
     device_name=DEVICE_NAME,
     architecture=common_definitions.DeviceArchitecture.ARM_VALHALL,
     host_environment=common_definitions.HostEnvironment.ANDROID_ARMV8_2_A,
-    tags=["gpu"])
+    tags=["gpu"],
+)
diff --git a/build_tools/python/e2e_test_framework/models/jax_models.py b/build_tools/python/e2e_test_framework/models/jax_models.py
index 84b5982..af861f6 100644
--- a/build_tools/python/e2e_test_framework/models/jax_models.py
+++ b/build_tools/python/e2e_test_framework/models/jax_models.py
@@ -16,59 +16,71 @@
 ID_FORMAT = string.Template("${model_id}-batch${batch_size}")
 NAME_FORMAT = string.Template("${name}_BATCH${batch_size}")
 SOURCE_URL_FORMAT = string.Template(
-    GCS_ARTIFACT_ROOT_DIR +
-    "/${directory}/batch_${batch_size}/stablehlo.mlirbc")
+    GCS_ARTIFACT_ROOT_DIR + "/${directory}/batch_${batch_size}/stablehlo.mlirbc"
+)
 
 # Derived from https://huggingface.co/docs/transformers/model_doc/resnet#transformers.FlaxResNetModel.
 RESNET50_TAGS = ["fp32", "cnn", "resnet"]
 
 RESNET50_FP32_JAX_3X224X224XF32_BATCHES = model_utils.generate_batch_models(
     id_template=model_utils.partial_template_substitute(
-        ID_FORMAT, model_id=unique_ids.MODEL_RESNET50_FP32_JAX_3X224X224XF32),
+        ID_FORMAT, model_id=unique_ids.MODEL_RESNET50_FP32_JAX_3X224X224XF32
+    ),
     name_template=model_utils.partial_template_substitute(
-        NAME_FORMAT, name="RESNET50_FP32_JAX_3X224X224XF32"),
+        NAME_FORMAT, name="RESNET50_FP32_JAX_3X224X224XF32"
+    ),
     tags=RESNET50_TAGS,
     source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
     source_url_template=model_utils.partial_template_substitute(
-        SOURCE_URL_FORMAT, directory="RESNET50"),
+        SOURCE_URL_FORMAT, directory="RESNET50"
+    ),
     entry_function="main",
     input_type_templates=[string.Template("${batch_size}x3x224x224xf32")],
-    batch_sizes=[1, 8, 64, 128, 256, 2048])
+    batch_sizes=[1, 8, 64, 128, 256, 2048],
+)
 
 # Derived from https://huggingface.co/docs/transformers/model_doc/bert#transformers.FlaxBertModel.
 BERT_LARGE_TAGS = ["fp32", "seqlen384", "jax", "bert-variant"]
 
 BERT_LARGE_FP32_JAX_384XI32_BATCHES = model_utils.generate_batch_models(
     id_template=model_utils.partial_template_substitute(
-        ID_FORMAT, model_id=unique_ids.MODEL_BERT_LARGE_FP32_JAX_384XI32),
+        ID_FORMAT, model_id=unique_ids.MODEL_BERT_LARGE_FP32_JAX_384XI32
+    ),
     name_template=model_utils.partial_template_substitute(
-        NAME_FORMAT, name="BERT_LARGE_JAX_384XI32"),
+        NAME_FORMAT, name="BERT_LARGE_JAX_384XI32"
+    ),
     tags=BERT_LARGE_TAGS,
     source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
     source_url_template=model_utils.partial_template_substitute(
-        SOURCE_URL_FORMAT, directory="BERT_LARGE"),
+        SOURCE_URL_FORMAT, directory="BERT_LARGE"
+    ),
     entry_function="main",
     input_type_templates=[
         string.Template("${batch_size}x384xi32"),
-        string.Template("${batch_size}x384xi32")
+        string.Template("${batch_size}x384xi32"),
     ],
-    batch_sizes=[1, 16, 24, 32, 48, 64, 512, 1024, 1280])
+    batch_sizes=[1, 16, 24, 32, 48, 64, 512, 1024, 1280],
+)
 
 # Derived from https://huggingface.co/docs/transformers/model_doc/t5#transformers.FlaxT5Model
 T5_TAGS = ["fp32", "transformer-encoder", "transformer-decoder", "t5"]
 
 T5_LARGE_FP32_JAX_512XI32_BATCHES = model_utils.generate_batch_models(
     id_template=model_utils.partial_template_substitute(
-        ID_FORMAT, model_id=unique_ids.MODEL_T5_LARGE_FP32_JAX_512XI32),
+        ID_FORMAT, model_id=unique_ids.MODEL_T5_LARGE_FP32_JAX_512XI32
+    ),
     name_template=model_utils.partial_template_substitute(
-        NAME_FORMAT, name="T5_LARGE_FP32_JAX_512XI32"),
+        NAME_FORMAT, name="T5_LARGE_FP32_JAX_512XI32"
+    ),
     tags=T5_TAGS,
     source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
     source_url_template=model_utils.partial_template_substitute(
-        SOURCE_URL_FORMAT, directory="T5_LARGE"),
+        SOURCE_URL_FORMAT, directory="T5_LARGE"
+    ),
     entry_function="main",
     input_type_templates=[
         string.Template("${batch_size}x512xi32"),
-        string.Template("${batch_size}x512xi32")
+        string.Template("${batch_size}x512xi32"),
     ],
-    batch_sizes=[1, 16, 24, 32, 48, 64, 512])
+    batch_sizes=[1, 16, 24, 32, 48, 64, 512],
+)
diff --git a/build_tools/python/e2e_test_framework/models/matmul.py b/build_tools/python/e2e_test_framework/models/matmul.py
index 16a635f..d751528 100644
--- a/build_tools/python/e2e_test_framework/models/matmul.py
+++ b/build_tools/python/e2e_test_framework/models/matmul.py
@@ -13,97 +13,97 @@
     name="matmul_3456x1024x2048_f16t_tile_config_default",
     tags=["fp16", "ubench", "matmul"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_3456x1024x2048_f16t_f16t_f16t_tile_config_default.mlirbc",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_3456x1024x2048_f16t_f16t_f16t_tile_config_default.mlirbc",
     entry_function="matmul_3456x1024x2048_f16t_f16t_f16t_tile_config_default",
-    input_types=["3456x2048xf16", "2048x1024xf16"])
+    input_types=["3456x2048xf16", "2048x1024xf16"],
+)
 
 MATMUL_3456X1024X2048_FP32_MLIR = common_definitions.Model(
     id=unique_ids.MICRO_MATMUL_3456X1024X2048_FP32_MLIR,
     name="matmul_3456x1024x2048_f32t_tile_config_default",
     tags=["fp32", "ubench", "matmul"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_3456x1024x2048_f32t_f32t_f32t_tile_config_default.mlirbc",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_3456x1024x2048_f32t_f32t_f32t_tile_config_default.mlirbc",
     entry_function="matmul_3456x1024x2048_f32t_f32t_f32t_tile_config_default",
-    input_types=["3456x2048xf32", "2048x1024xf32"])
+    input_types=["3456x2048xf32", "2048x1024xf32"],
+)
 
 MATMUL_2560X2560X2560_FP16_MLIR = common_definitions.Model(
     id=unique_ids.MICRO_MATMUL_2560X2560X2560_FP16_MLIR,
     name="matmul_2560x2560x2560_f16t_tile_config_default",
     tags=["fp16", "ubench", "matmul"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_2560x2560x2560_f16t_f16t_f16t_tile_config_default.mlirbc",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_2560x2560x2560_f16t_f16t_f16t_tile_config_default.mlirbc",
     entry_function="matmul_2560x2560x2560_f16t_f16t_f16t_tile_config_default",
-    input_types=["2560x2560xf16", "2560x2560xf16"])
+    input_types=["2560x2560xf16", "2560x2560xf16"],
+)
 
 MATMUL_2560X2560X2560_FP32_MLIR = common_definitions.Model(
     id=unique_ids.MICRO_MATMUL_2560X2560X2560_FP32_MLIR,
     name="matmul_2560x2560x2560_f32t_tile_config_default",
     tags=["fp32", "ubench", "matmul"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_2560x2560x2560_f32t_f32t_f32t_tile_config_default.mlirbc",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_2560x2560x2560_f32t_f32t_f32t_tile_config_default.mlirbc",
     entry_function="matmul_2560x2560x2560_f32t_f32t_f32t_tile_config_default",
-    input_types=["2560x2560xf32", "2560x2560xf32"])
+    input_types=["2560x2560xf32", "2560x2560xf32"],
+)
 
 MATMUL_128X256X8192_FP16_MLIR = common_definitions.Model(
     id=unique_ids.MICRO_MATMUL_128X256X8192_FP16_MLIR,
     name="matmul_128x256x8192_f16t_tile_config_default",
     tags=["fp16", "ubench", "matmul", "splitk"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_128x256x8192_f16t_f16t_f16t_tile_config_default.mlirbc",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_128x256x8192_f16t_f16t_f16t_tile_config_default.mlirbc",
     entry_function="matmul_128x256x8192_f16t_f16t_f16t_tile_config_default",
-    input_types=["128x8192xf16", "8192x256xf16"])
+    input_types=["128x8192xf16", "8192x256xf16"],
+)
 
 MATMUL_128X256X8192_FP32_MLIR = common_definitions.Model(
     id=unique_ids.MICRO_MATMUL_128X256X8192_FP32_MLIR,
     name="matmul_128x256x8192_f32t_tile_config_default",
     tags=["fp32", "ubench", "matmul", "splitk"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_128x256x8192_f32t_f32t_f32t_tile_config_default.mlirbc",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230410_1681181224/matmul_128x256x8192_f32t_f32t_f32t_tile_config_default.mlirbc",
     entry_function="matmul_128x256x8192_f32t_f32t_f32t_tile_config_default",
-    input_types=["128x8192xf32", "8192x256xf32"])
+    input_types=["128x8192xf32", "8192x256xf32"],
+)
 
 MATMUL_2564x2564x2564_FP32_MLIR = common_definitions.Model(
     id=unique_ids.MICRO_MATMUL_2564x2564x2564_FP32_MLIR,
     name="matmul_2564x2564x2564_f32t_f32t_f32t_tile_config_default",
     tags=["fp32", "ubench", "matmul"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230525_1685058259/matmul_2564x2564x2564_f32t_f32t_f32t_tile_config_default.mlirbc",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230525_1685058259/matmul_2564x2564x2564_f32t_f32t_f32t_tile_config_default.mlirbc",
     entry_function="matmul_2564x2564x2564_f32t_f32t_f32t_tile_config_default",
-    input_types=["2564x2564xf32", "2564x2564xf32", "2564x2564xf32"])
+    input_types=["2564x2564xf32", "2564x2564xf32", "2564x2564xf32"],
+)
 
 MATMUL_2562x2564x2562_FP32_MLIR = common_definitions.Model(
     id=unique_ids.MICRO_MATMUL_2562x2564x2562_FP32_MLIR,
     name="matmul_2562x2564x2562_f32t_f32t_f32t_tile_config_default",
     tags=["fp32", "ubench", "matmul"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230525_1685058259/matmul_2562x2564x2562_f32t_f32t_f32t_tile_config_default.mlirbc",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230525_1685058259/matmul_2562x2564x2562_f32t_f32t_f32t_tile_config_default.mlirbc",
     entry_function="matmul_2562x2564x2562_f32t_f32t_f32t_tile_config_default",
-    input_types=["2562x2562xf32", "2562x2564xf32", "2562x2564xf32"])
+    input_types=["2562x2562xf32", "2562x2564xf32", "2562x2564xf32"],
+)
 
 MATMUL_2562x2561x2561_FP32_MLIR = common_definitions.Model(
     id=unique_ids.MICRO_MATMUL_2562x2561x2561_FP32_MLIR,
     name="matmul_2562x2561x2561_f32t_f32t_f32t_tile_config_default",
     tags=["fp32", "ubench", "matmul"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230525_1685058259/matmul_2562x2561x2561_f32t_f32t_f32t_tile_config_default.mlirbc",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230525_1685058259/matmul_2562x2561x2561_f32t_f32t_f32t_tile_config_default.mlirbc",
     entry_function="matmul_2562x2561x2561_f32t_f32t_f32t_tile_config_default",
-    input_types=["2562x2561xf32", "2561x2561xf32", "2562x2561xf32"])
+    input_types=["2562x2561xf32", "2561x2561xf32", "2562x2561xf32"],
+)
 
 MATMUL_123x2561x2561_FP32_MLIR = common_definitions.Model(
     id=unique_ids.MICRO_MATMUL_123x2561x2561_FP32_MLIR,
     name="matmul_123x2561x2561_f32t_f32t_f32t_tile_config_default",
     tags=["fp32", "ubench", "matmul"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230612_1686563210/matmul_123x2561x2561_f32t_f32t_f32t_tile_config_default.mlirbc",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/microbenchmarks/matmul/20230612_1686563210/matmul_123x2561x2561_f32t_f32t_f32t_tile_config_default.mlirbc",
     entry_function="matmul_123x2561x2561_f32t_f32t_f32t_tile_config_default",
-    input_types=["123x2561xf32", "2561x2561xf32", "123x2561xf32"])
+    input_types=["123x2561xf32", "2561x2561xf32", "123x2561xf32"],
+)
diff --git a/build_tools/python/e2e_test_framework/models/model_groups.py b/build_tools/python/e2e_test_framework/models/model_groups.py
index d3a0b2a..197bea6 100644
--- a/build_tools/python/e2e_test_framework/models/model_groups.py
+++ b/build_tools/python/e2e_test_framework/models/model_groups.py
@@ -6,7 +6,13 @@
 """Defines the groups of models."""
 
 from e2e_test_framework.definitions import common_definitions
-from e2e_test_framework.models import matmul, tflite_models, torch_models, tf_models, jax_models
+from e2e_test_framework.models import (
+    matmul,
+    tflite_models,
+    torch_models,
+    tf_models,
+    jax_models,
+)
 
 # x86 models, single batch.
 
@@ -15,106 +21,145 @@
 X86_64_BENCHMARK_CONFIG = [
     # Tiny models.
     common_definitions.CpuBenchmarkConfig(
-        model=tflite_models.PERSON_DETECT_INT8, threads=[0, 1]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILENET_V3SMALL,
-                                          threads=[0, 1]),
+        model=tflite_models.PERSON_DETECT_INT8, threads=[0, 1]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.MOBILENET_V3SMALL, threads=[0, 1]
+    ),
     # Small models.
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.DEEPLABV3_FP32,
-                                          threads=[1, 8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.EFFICIENTNET_INT8,
-                                          threads=[1, 8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILENET_V1,
-                                          threads=[1, 8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILENET_V2,
-                                          threads=[1, 8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILENET_V2_INT8,
-                                          threads=[1, 8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILESSD_FP32,
-                                          threads=[1, 8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.POSENET_FP32,
-                                          threads=[1, 8]),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.DEEPLABV3_FP32, threads=[1, 8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.EFFICIENTNET_INT8, threads=[1, 8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.MOBILENET_V1, threads=[1, 8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.MOBILENET_V2, threads=[1, 8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.MOBILENET_V2_INT8, threads=[1, 8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.MOBILESSD_FP32, threads=[1, 8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.POSENET_FP32, threads=[1, 8]
+    ),
     # Medium models.
     # TODO: Add 13 threads once we move to new hardware.
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILEBERT_FP16,
-                                          threads=[1, 8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILEBERT_FP32,
-                                          threads=[1, 8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILEBERT_INT8,
-                                          threads=[1, 8]),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.EFFICIENTNET_V2_S_FP32, threads=[1, 8]),
+        model=tflite_models.MOBILEBERT_FP16, threads=[1, 8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.MINILM_L12_H384_UNCASED_INT32_SEQLEN128, threads=[1,
-                                                                          8]),
+        model=tflite_models.MOBILEBERT_FP32, threads=[1, 8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=torch_models.EFFICIENTNET_V2_S_FP32_TORCH, threads=[1, 8]),
+        model=tflite_models.MOBILEBERT_INT8, threads=[1, 8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tf_models.EFFICIENTNET_V2_S_FP32, threads=[1, 8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tf_models.MINILM_L12_H384_UNCASED_INT32_SEQLEN128, threads=[1, 8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=torch_models.EFFICIENTNET_V2_S_FP32_TORCH, threads=[1, 8]
+    ),
     # Large models.
     # TODO: These models should be running at 8, 13, 28 threads but we use 8 for now until new hardware becomes available.
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.BERT_FOR_MASKED_LM_FP32_SEQLEN512, threads=[8]),
+        model=tf_models.BERT_FOR_MASKED_LM_FP32_SEQLEN512, threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.BERT_LARGE_TF_FP32_SEQLEN384, threads=[8]),
+        model=tf_models.BERT_LARGE_TF_FP32_SEQLEN384, threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=torch_models.EFFICIENTNET_B7_FP32_TORCH, threads=[8]),
+        model=torch_models.EFFICIENTNET_B7_FP32_TORCH, threads=[8]
+    ),
 ]
 
 # A subset of `x86_64_MODELS_AND_THREADS`.
 X86_64_BENCHMARK_CONFIG_EXPERIMENTAL = [
     # Tiny models.
     common_definitions.CpuBenchmarkConfig(
-        model=tflite_models.PERSON_DETECT_INT8, threads=[1]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILENET_V3SMALL,
-                                          threads=[1]),
+        model=tflite_models.PERSON_DETECT_INT8, threads=[1]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.MOBILENET_V3SMALL, threads=[1]
+    ),
     # Small models.
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.DEEPLABV3_FP32,
-                                          threads=[8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.EFFICIENTNET_INT8,
-                                          threads=[8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILENET_V2,
-                                          threads=[8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILENET_V2_INT8,
-                                          threads=[8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILESSD_FP32,
-                                          threads=[8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.POSENET_FP32,
-                                          threads=[8]),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.DEEPLABV3_FP32, threads=[8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.EFFICIENTNET_INT8, threads=[8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.MOBILENET_V2, threads=[8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.MOBILENET_V2_INT8, threads=[8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.MOBILESSD_FP32, threads=[8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tflite_models.POSENET_FP32, threads=[8]
+    ),
     # Medium models.
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILEBERT_FP32,
-                                          threads=[8]),
-    common_definitions.CpuBenchmarkConfig(model=tflite_models.MOBILEBERT_INT8,
-                                          threads=[8]),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.EFFICIENTNET_V2_S_FP32, threads=[8]),
+        model=tflite_models.MOBILEBERT_FP32, threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.MINILM_L12_H384_UNCASED_INT32_SEQLEN128, threads=[8]),
+        model=tflite_models.MOBILEBERT_INT8, threads=[8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tf_models.EFFICIENTNET_V2_S_FP32, threads=[8]
+    ),
+    common_definitions.CpuBenchmarkConfig(
+        model=tf_models.MINILM_L12_H384_UNCASED_INT32_SEQLEN128, threads=[8]
+    ),
     # Disabled due to https://github.com/openxla/iree/issues/12772.
     # common_definitions.CpuBenchmarkConfig(model=torch_models.EFFICIENTNET_V2_S_FP32_TORCH, threads=[8]),
     # Large models.
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.BERT_LARGE_TF_FP32_SEQLEN384, threads=[8]),
+        model=tf_models.BERT_LARGE_TF_FP32_SEQLEN384, threads=[8]
+    ),
     # Disabled due to https://github.com/openxla/iree/issues/12772.
     # common_definitions.CpuBenchmarkConfig(model=torch_models.EFFICIENTNET_B7_FP32_TORCH, threads=[8]),
 ]
 
 X86_64_BENCHMARK_CONFIG_LONG = [
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.BERT_LARGE_384_FP32_TF_BATCHES[1], threads=[8]),
+        model=tf_models.BERT_LARGE_384_FP32_TF_BATCHES[1], threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.BERT_LARGE_384_FP32_TF_BATCHES[32], threads=[8]),
+        model=tf_models.BERT_LARGE_384_FP32_TF_BATCHES[32], threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.BERT_LARGE_384_FP32_TF_BATCHES[64], threads=[8]),
+        model=tf_models.BERT_LARGE_384_FP32_TF_BATCHES[64], threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.RESNET50_3X224X224_FP32_TF_BATCHES[1], threads=[8]),
+        model=tf_models.RESNET50_3X224X224_FP32_TF_BATCHES[1], threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.RESNET50_3X224X224_FP32_TF_BATCHES[64], threads=[8]),
+        model=tf_models.RESNET50_3X224X224_FP32_TF_BATCHES[64], threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.RESNET50_3X224X224_FP32_TF_BATCHES[128], threads=[8]),
+        model=tf_models.RESNET50_3X224X224_FP32_TF_BATCHES[128], threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.T5_LARGE_512_FP32_TF_BATCHES[1], threads=[8]),
+        model=tf_models.T5_LARGE_512_FP32_TF_BATCHES[1], threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.T5_LARGE_512_FP32_TF_BATCHES[16], threads=[8]),
+        model=tf_models.T5_LARGE_512_FP32_TF_BATCHES[16], threads=[8]
+    ),
     common_definitions.CpuBenchmarkConfig(
-        model=tf_models.T5_LARGE_512_FP32_TF_BATCHES[32], threads=[8]),
+        model=tf_models.T5_LARGE_512_FP32_TF_BATCHES[32], threads=[8]
+    ),
 ]
 
 # Microkernels.
@@ -137,28 +182,28 @@
 
 # Batched Torch models.
 
-BERT_LARGE_TORCH_BATCHES = list(
-    torch_models.BERT_LARGE_384_FP32_TORCH_BATCHES.values())
+BERT_LARGE_TORCH_BATCHES = list(torch_models.BERT_LARGE_384_FP32_TORCH_BATCHES.values())
 
 BERT_LARGE_FP16_TORCH_BATCHES = [
-    model for batch_size, model in
-    torch_models.BERT_LARGE_384_FP16_TORCH_BATCHES.items()
+    model
+    for batch_size, model in torch_models.BERT_LARGE_384_FP16_TORCH_BATCHES.items()
     # Batchsize 1 is included seperately in CUDA_MODELS
     if batch_size != 1
 ]
 
 RESNET50_TORCH_BATCHES = list(
-    torch_models.RESNET50_3X224X224_FP32_TORCH_BATCHES.values())
+    torch_models.RESNET50_3X224X224_FP32_TORCH_BATCHES.values()
+)
 
 RESNET50_FP16_TORCH_BATCHES = list(
-    torch_models.RESNET50_3X224X224_FP16_TORCH_BATCHES.values())
+    torch_models.RESNET50_3X224X224_FP16_TORCH_BATCHES.values()
+)
 
 # Batched Tensorflow models.
 
 BERT_LARGE_TF_BATCHES = list(tf_models.BERT_LARGE_384_FP32_TF_BATCHES.values())
 
-RESNET50_TF_BATCHES = list(
-    tf_models.RESNET50_3X224X224_FP32_TF_BATCHES.values())
+RESNET50_TF_BATCHES = list(tf_models.RESNET50_3X224X224_FP32_TF_BATCHES.values())
 
 T5_LARGE_TF_BATCHES = [
     model
@@ -169,14 +214,11 @@
 
 # Batched JAX models.
 
-RESNET50_JAX_BATCHES = list(
-    jax_models.RESNET50_FP32_JAX_3X224X224XF32_BATCHES.values())
+RESNET50_JAX_BATCHES = list(jax_models.RESNET50_FP32_JAX_3X224X224XF32_BATCHES.values())
 
-BERT_LARGE_JAX_BATCHES = list(
-    jax_models.BERT_LARGE_FP32_JAX_384XI32_BATCHES.values())
+BERT_LARGE_JAX_BATCHES = list(jax_models.BERT_LARGE_FP32_JAX_384XI32_BATCHES.values())
 
-T5_LARGE_JAX_BATCHES = list(
-    jax_models.T5_LARGE_FP32_JAX_512XI32_BATCHES.values())
+T5_LARGE_JAX_BATCHES = list(jax_models.T5_LARGE_FP32_JAX_512XI32_BATCHES.values())
 
 # GPU model groups.
 
@@ -192,11 +234,18 @@
     torch_models.EFFICIENTNET_V2_S_FP16_TORCH,
 ]
 
-CUDA_MODELS_LONG = (RESNET50_TF_BATCHES + BERT_LARGE_TF_BATCHES +
-                    T5_LARGE_TF_BATCHES + BERT_LARGE_TORCH_BATCHES +
-                    RESNET50_TORCH_BATCHES + RESNET50_FP16_TORCH_BATCHES +
-                    BERT_LARGE_FP16_TORCH_BATCHES + BERT_LARGE_JAX_BATCHES +
-                    RESNET50_JAX_BATCHES + T5_LARGE_JAX_BATCHES)
+CUDA_MODELS_LONG = (
+    RESNET50_TF_BATCHES
+    + BERT_LARGE_TF_BATCHES
+    + T5_LARGE_TF_BATCHES
+    + BERT_LARGE_TORCH_BATCHES
+    + RESNET50_TORCH_BATCHES
+    + RESNET50_FP16_TORCH_BATCHES
+    + BERT_LARGE_FP16_TORCH_BATCHES
+    + BERT_LARGE_JAX_BATCHES
+    + RESNET50_JAX_BATCHES
+    + T5_LARGE_JAX_BATCHES
+)
 
 VULKAN_MODELS = [
     torch_models.MODEL_CLIP_TEXT_SEQLEN64_FP32_TORCH,
diff --git a/build_tools/python/e2e_test_framework/models/tf_models.py b/build_tools/python/e2e_test_framework/models/tf_models.py
index 58ac9cc..3cf7f2c 100644
--- a/build_tools/python/e2e_test_framework/models/tf_models.py
+++ b/build_tools/python/e2e_test_framework/models/tf_models.py
@@ -11,7 +11,9 @@
 from e2e_test_framework.definitions import common_definitions
 import e2e_test_framework.models.utils as model_utils
 
-TF_MODELS_MANUAL_ROOT_DIR = "https://storage.googleapis.com/iree-model-artifacts/tensorflow/manual"
+TF_MODELS_MANUAL_ROOT_DIR = (
+    "https://storage.googleapis.com/iree-model-artifacts/tensorflow/manual"
+)
 
 MINILM_L12_H384_UNCASED_INT32_SEQLEN128 = common_definitions.Model(
     id=unique_ids.MODEL_MINILM_L12_H384_UNCASED_INT32_SEQLEN128,
@@ -19,10 +21,10 @@
     tags=["int32", "seqlen128"],
     source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
     # Converted from https://huggingface.co/microsoft/MiniLM-L12-H384-uncased/commit/44acabbec0ef496f6dbc93adadea57f376b7c0ec
-    source_url=
-    f"{TF_MODELS_MANUAL_ROOT_DIR}/MiniLML12H384Uncased_2023-05-07.timestamp_1683504734.mlirbc",
+    source_url=f"{TF_MODELS_MANUAL_ROOT_DIR}/MiniLML12H384Uncased_2023-05-07.timestamp_1683504734.mlirbc",
     entry_function="predict",
-    input_types=["1x128xi32", "1x128xi32", "1x128xi32"])
+    input_types=["1x128xi32", "1x128xi32", "1x128xi32"],
+)
 
 BERT_FOR_MASKED_LM_FP32_SEQLEN512 = common_definitions.Model(
     id=unique_ids.MODEL_BERT_FOR_MASKED_LM_FP32_SEQLEN512_TF,
@@ -30,10 +32,10 @@
     tags=["fp32", "seqlen512", "tensorflow"],
     source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
     # Converted from https://huggingface.co/transformers/v3.0.2/model_doc/bert.html#tfbertformaskedlm
-    source_url=
-    f"{TF_MODELS_MANUAL_ROOT_DIR}/BertForMaskedLMTF_2023-05-07.timestamp_1683504734.mlirbc",
+    source_url=f"{TF_MODELS_MANUAL_ROOT_DIR}/BertForMaskedLMTF_2023-05-07.timestamp_1683504734.mlirbc",
     entry_function="forward",
-    input_types=["1x512xi32", "1x512xi32"])
+    input_types=["1x512xi32", "1x512xi32"],
+)
 
 EFFICIENTNET_V2_S_FP32 = common_definitions.Model(
     id=unique_ids.MODEL_EFFICIENTNET_V2_S_FP32_TF,
@@ -41,10 +43,10 @@
     tags=["fp32", "cnn", "tensorflow"],
     source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
     # Converted from https://github.com/keras-team/keras/blob/v2.10.0/keras/applications/efficientnet_v2.py
-    source_url=
-    f"{TF_MODELS_MANUAL_ROOT_DIR}/EfficientNetV2STF_2023-05-07.timestamp_1683504734.mlirbc",
+    source_url=f"{TF_MODELS_MANUAL_ROOT_DIR}/EfficientNetV2STF_2023-05-07.timestamp_1683504734.mlirbc",
     entry_function="forward",
-    input_types=["1x384x384x3xf32"])
+    input_types=["1x384x384x3xf32"],
+)
 
 # This is the model used in the MLPerf Inference Suite.
 BERT_LARGE_TF_FP32_SEQLEN384 = common_definitions.Model(
@@ -54,62 +56,75 @@
     source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
     # Derived from https://github.com/mlcommons/inference/tree/master/language/bert
     # Instructions on how to regenerate the model: https://gist.github.com/mariecwhite/e61ccebd979d98d097946ac7725bcc29
-    source_url=
-    f"{TF_MODELS_MANUAL_ROOT_DIR}/BertLargeTF_2023-05-07.timestamp_1683504734.mlirbc",
+    source_url=f"{TF_MODELS_MANUAL_ROOT_DIR}/BertLargeTF_2023-05-07.timestamp_1683504734.mlirbc",
     entry_function="serving_default",
-    input_types=["1x384xi32", "1x384xi32", "1x384xi32"])
+    input_types=["1x384xi32", "1x384xi32", "1x384xi32"],
+)
 
 TF_MODELS_ROOT_DIR = "https://storage.googleapis.com/iree-model-artifacts/tensorflow/tf_models_2.12.0_1683544084"
 
 ID_FORMAT = string.Template("${model_id}-batch-${batch_size}")
 NAME_FORMAT = string.Template("${name}Batch${batch_size}")
 SOURCE_URL_FORMAT = string.Template(
-    TF_MODELS_ROOT_DIR + "/${directory}/batch_${batch_size}/hlo.mlirbc")
+    TF_MODELS_ROOT_DIR + "/${directory}/batch_${batch_size}/hlo.mlirbc"
+)
 
 # Derived from https://huggingface.co/docs/transformers/model_doc/bert#transformers.TFBertModel.
 BERT_LARGE_384_FP32_TF_BATCHES = model_utils.generate_batch_models(
     id_template=model_utils.partial_template_substitute(
-        ID_FORMAT, model_id=unique_ids.MODEL_BERT_LARGE_384_FP32_TF),
-    name_template=model_utils.partial_template_substitute(NAME_FORMAT,
-                                                          name="BertLargeTF"),
+        ID_FORMAT, model_id=unique_ids.MODEL_BERT_LARGE_384_FP32_TF
+    ),
+    name_template=model_utils.partial_template_substitute(
+        NAME_FORMAT, name="BertLargeTF"
+    ),
     tags=["fp32", "seqlen384", "tensorflow", "bert-variant"],
     source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
     source_url_template=model_utils.partial_template_substitute(
-        SOURCE_URL_FORMAT, directory="BERT_LARGE"),
+        SOURCE_URL_FORMAT, directory="BERT_LARGE"
+    ),
     entry_function="forward",
     input_type_templates=[
         string.Template("${batch_size}x384xi32"),
-        string.Template("${batch_size}x384xi32")
+        string.Template("${batch_size}x384xi32"),
     ],
-    batch_sizes=[1, 16, 24, 32, 48, 64, 512, 1024, 1280])
+    batch_sizes=[1, 16, 24, 32, 48, 64, 512, 1024, 1280],
+)
 
 # Converted from https://www.tensorflow.org/api_docs/python/tf/keras/applications/resnet50/ResNet50
 RESNET50_3X224X224_FP32_TF_BATCHES = model_utils.generate_batch_models(
     id_template=model_utils.partial_template_substitute(
-        ID_FORMAT, model_id=unique_ids.MODEL_RESNET50_3X224X224_FP32_TF),
-    name_template=model_utils.partial_template_substitute(NAME_FORMAT,
-                                                          name="Resnet50TF"),
+        ID_FORMAT, model_id=unique_ids.MODEL_RESNET50_3X224X224_FP32_TF
+    ),
+    name_template=model_utils.partial_template_substitute(
+        NAME_FORMAT, name="Resnet50TF"
+    ),
     tags=["fp32", "cnn"],
     source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
     source_url_template=model_utils.partial_template_substitute(
-        SOURCE_URL_FORMAT, directory="RESNET50"),
+        SOURCE_URL_FORMAT, directory="RESNET50"
+    ),
     entry_function="forward",
     input_type_templates=[string.Template("${batch_size}x224x224x3xf32")],
-    batch_sizes=[1, 8, 64, 128, 256, 2048])
+    batch_sizes=[1, 8, 64, 128, 256, 2048],
+)
 
 # Derived from https://huggingface.co/transformers/v3.0.2/model_doc/t5.html#tft5model.
 T5_LARGE_512_FP32_TF_BATCHES = model_utils.generate_batch_models(
     id_template=model_utils.partial_template_substitute(
-        ID_FORMAT, model_id=unique_ids.MODEL_T5_LARGE_512_FP32_TF),
-    name_template=model_utils.partial_template_substitute(NAME_FORMAT,
-                                                          name="T5LargeTF"),
+        ID_FORMAT, model_id=unique_ids.MODEL_T5_LARGE_512_FP32_TF
+    ),
+    name_template=model_utils.partial_template_substitute(
+        NAME_FORMAT, name="T5LargeTF"
+    ),
     tags=["fp32", "seqlen512", "tensorflow"],
     source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
     source_url_template=model_utils.partial_template_substitute(
-        SOURCE_URL_FORMAT, directory="T5_LARGE"),
+        SOURCE_URL_FORMAT, directory="T5_LARGE"
+    ),
     entry_function="forward",
     input_type_templates=[
         string.Template("${batch_size}x512xi32"),
-        string.Template("${batch_size}x512xi32")
+        string.Template("${batch_size}x512xi32"),
     ],
-    batch_sizes=[1, 16, 24, 32, 48, 64, 512])
+    batch_sizes=[1, 16, 24, 32, 48, 64, 512],
+)
diff --git a/build_tools/python/e2e_test_framework/models/tflite_models.py b/build_tools/python/e2e_test_framework/models/tflite_models.py
index 4909c3b..6fdcd79 100644
--- a/build_tools/python/e2e_test_framework/models/tflite_models.py
+++ b/build_tools/python/e2e_test_framework/models/tflite_models.py
@@ -14,10 +14,10 @@
     tags=["fp32"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://tfhub.dev/tensorflow/lite-model/deeplabv3/1/default/1
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/deeplabv3.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/deeplabv3.tflite",
     entry_function="main",
-    input_types=["1x257x257x3xf32"])
+    input_types=["1x257x257x3xf32"],
+)
 
 MOBILESSD_FP32 = common_definitions.Model(
     id=unique_ids.MODEL_MOBILESSD_FP32,
@@ -25,10 +25,10 @@
     tags=["fp32"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://storage.googleapis.com/download.tensorflow.org/models/tflite/gpu/mobile_ssd_v2_float_coco.tflite
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/mobile_ssd_v2_float_coco.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/mobile_ssd_v2_float_coco.tflite",
     entry_function="main",
-    input_types=["1x320x320x3xf32"])
+    input_types=["1x320x320x3xf32"],
+)
 
 POSENET_FP32 = common_definitions.Model(
     id=unique_ids.MODEL_POSENET_FP32,
@@ -36,10 +36,10 @@
     tags=["fp32"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://tfhub.dev/tensorflow/lite-model/posenet/mobilenet/float/075/1/default/1
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/posenet.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/posenet.tflite",
     entry_function="main",
-    input_types=["1x353x257x3xf32"])
+    input_types=["1x353x257x3xf32"],
+)
 
 MOBILEBERT_FP32 = common_definitions.Model(
     id=unique_ids.MODEL_MOBILEBERT_FP32,
@@ -47,10 +47,10 @@
     tags=["fp32"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://tfhub.dev/iree/lite-model/mobilebert/fp32/1
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/mobilebert-baseline-tf2-float.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/mobilebert-baseline-tf2-float.tflite",
     entry_function="main",
-    input_types=["1x384xi32", "1x384xi32", "1x384xi32"])
+    input_types=["1x384xi32", "1x384xi32", "1x384xi32"],
+)
 
 MOBILEBERT_INT8 = common_definitions.Model(
     id=unique_ids.MODEL_MOBILEBERT_INT8,
@@ -58,10 +58,10 @@
     tags=["int8"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://tfhub.dev/iree/lite-model/mobilebert/int8/1
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/mobilebert-baseline-tf2-quant.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/mobilebert-baseline-tf2-quant.tflite",
     entry_function="main",
-    input_types=["1x384xi32", "1x384xi32", "1x384xi32"])
+    input_types=["1x384xi32", "1x384xi32", "1x384xi32"],
+)
 
 MOBILEBERT_FP16 = common_definitions.Model(
     id=unique_ids.MODEL_MOBILEBERT_FP16,
@@ -69,10 +69,10 @@
     tags=["fp16"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://tfhub.dev/tensorflow/lite-model/mobilebert/1/default/1
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/mobilebertsquad.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/mobilebertsquad.tflite",
     entry_function="main",
-    input_types=["1x384xi32", "1x384xi32", "1x384xi32"])
+    input_types=["1x384xi32", "1x384xi32", "1x384xi32"],
+)
 
 MOBILENET_V1 = common_definitions.Model(
     id=unique_ids.MODEL_MOBILENET_V1,
@@ -80,10 +80,10 @@
     tags=["fp32", "imagenet"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://tfhub.dev/iree/lite-model/mobilenet_v1_100_224/fp32/1
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/mobilenet_v1_224_1.0_float.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/mobilenet_v1_224_1.0_float.tflite",
     entry_function="main",
-    input_types=["1x224x224x3xf32"])
+    input_types=["1x224x224x3xf32"],
+)
 
 MOBILENET_V2 = common_definitions.Model(
     id=unique_ids.MODEL_MOBILENET_V2,
@@ -91,10 +91,10 @@
     tags=["fp32", "imagenet"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://github.com/tensorflow/tflite-support/blob/master/tensorflow_lite_support/metadata/python/tests/testdata/image_classifier/mobilenet_v2_1.0_224.tflite
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/mobilenet_v2_1.0_224.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/mobilenet_v2_1.0_224.tflite",
     entry_function="main",
-    input_types=["1x224x224x3xf32"])
+    input_types=["1x224x224x3xf32"],
+)
 
 MOBILENET_V3SMALL = common_definitions.Model(
     id=unique_ids.MODEL_MOBILENET_V3SMALL,
@@ -103,10 +103,10 @@
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # https://tfhub.dev/google/imagenet/mobilenet_v3_small_100_224/classification/5
     # Manually exported to tflite with static batch dimension
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/MobileNetV3SmallStaticBatch.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/MobileNetV3SmallStaticBatch.tflite",
     entry_function="main",
-    input_types=["1x224x224x3xf32"])
+    input_types=["1x224x224x3xf32"],
+)
 
 PERSON_DETECT_INT8 = common_definitions.Model(
     id=unique_ids.MODEL_PERSON_DETECT_INT8,
@@ -114,10 +114,10 @@
     tags=["int8"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://github.com/tensorflow/tflite-micro/raw/aeac6f39e5c7475cea20c54e86d41e3a38312546/tensorflow/lite/micro/models/person_detect.tflite
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/person_detect.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/person_detect.tflite",
     entry_function="main",
-    input_types=["1x96x96x1xi8"])
+    input_types=["1x96x96x1xi8"],
+)
 
 EFFICIENTNET_INT8 = common_definitions.Model(
     id=unique_ids.MODEL_EFFICIENTNET_INT8,
@@ -125,10 +125,10 @@
     tags=["int8"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://tfhub.dev/tensorflow/lite-model/efficientnet/lite0/int8/2
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/efficientnet_lite0_int8_2.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/efficientnet_lite0_int8_2.tflite",
     entry_function="main",
-    input_types=["1x224x224x3xui8"])
+    input_types=["1x224x224x3xui8"],
+)
 
 MOBILENET_V2_INT8 = common_definitions.Model(
     name="MobileNetV2_int8",
@@ -136,7 +136,7 @@
     tags=["int8", "imagenet"],
     source_type=common_definitions.ModelSourceType.EXPORTED_TFLITE,
     # Mirror of https://tfhub.dev/tensorflow/lite-model/mobilenet_v2_1.0_224_quantized/1/default/1
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/mobilenet_v2_1.0_224_quantized.tflite",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/mobilenet_v2_1.0_224_quantized.tflite",
     entry_function="main",
-    input_types=["1x224x224x3xui8"])
+    input_types=["1x224x224x3xui8"],
+)
diff --git a/build_tools/python/e2e_test_framework/models/torch_models.py b/build_tools/python/e2e_test_framework/models/torch_models.py
index ce17438..aae2f6d 100644
--- a/build_tools/python/e2e_test_framework/models/torch_models.py
+++ b/build_tools/python/e2e_test_framework/models/torch_models.py
@@ -28,10 +28,10 @@
     name="ClipTextSeqLen64PT",
     tags=["fp32", "seqlen64"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/pytorch/torch_models_20230307.103_1678163233/SD_CLIP_TEXT_MODEL_SEQLEN64/linalg.mlir",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/pytorch/torch_models_20230307.103_1678163233/SD_CLIP_TEXT_MODEL_SEQLEN64/linalg.mlir",
     entry_function="forward",
-    input_types=["1x77xi64", "1x77xi64"])
+    input_types=["1x77xi64", "1x77xi64"],
+)
 
 # `Unet2d` consists of `ResNet` encoder and decoder blocks with cross-attention layers.
 #
@@ -51,10 +51,10 @@
     name="Unet2dPT",
     tags=["fp32"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/pytorch/torch_models_20230307.103_1678163233/SD_UNET_MODEL/linalg.mlir",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/pytorch/torch_models_20230307.103_1678163233/SD_UNET_MODEL/linalg.mlir",
     entry_function="forward",
-    input_types=["1x4x64x64xf32", "1x77x768xf32"])
+    input_types=["1x4x64x64xf32", "1x77x768xf32"],
+)
 
 # Converted from https://pytorch.org/vision/stable/models/generated/torchvision.models.efficientnet_v2_s.html#torchvision.models.efficientnet_v2_s
 EFFICIENTNET_V2_S_FP32_TORCH = common_definitions.Model(
@@ -62,10 +62,10 @@
     name="EfficientNetV2SPT",
     tags=["fp32", "cnn", "depthwise-conv"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/pytorch/torch_models_20230321.784_1679461251/EFFICIENTNET_V2_S/batch_1/linalg.mlir",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/pytorch/torch_models_20230321.784_1679461251/EFFICIENTNET_V2_S/batch_1/linalg.mlir",
     entry_function="forward",
-    input_types=["1x3x384x384xf32"])
+    input_types=["1x3x384x384xf32"],
+)
 
 # FP16
 EFFICIENTNET_V2_S_FP16_TORCH = common_definitions.Model(
@@ -73,10 +73,10 @@
     name="EfficientNetV2Sfp16PT",
     tags=["fp16", "cnn", "depthwise-conv"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/pytorch/torch_models_20230522.846_1684831160/EFFICIENTNET_V2_S_FP16/batch_1/linalg.mlir",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/pytorch/torch_models_20230522.846_1684831160/EFFICIENTNET_V2_S_FP16/batch_1/linalg.mlir",
     entry_function="forward",
-    input_types=["1x3x384x384xf16"])
+    input_types=["1x3x384x384xf16"],
+)
 
 # Converted from https://pytorch.org/vision/stable/models/generated/torchvision.models.efficientnet_b7.html#torchvision.models.efficientnet_b7
 EFFICIENTNET_B7_FP32_TORCH = common_definitions.Model(
@@ -84,10 +84,10 @@
     name="EfficientNetB7PT",
     tags=["fp32", "cnn", "depthwise-conv"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
-    source_url=
-    "https://storage.googleapis.com/iree-model-artifacts/pytorch/torch_models_20230321.784_1679461251/EFFICIENTNET_B7/batch_1/linalg.mlir",
+    source_url="https://storage.googleapis.com/iree-model-artifacts/pytorch/torch_models_20230321.784_1679461251/EFFICIENTNET_B7/batch_1/linalg.mlir",
     entry_function="forward",
-    input_types=["1x3x600x600xf32"])
+    input_types=["1x3x600x600xf32"],
+)
 
 ID_FORMAT = string.Template("${model_id}-batch-${batch_size}")
 NAME_FORMAT = string.Template("${name}Batch${batch_size}")
@@ -107,57 +107,69 @@
 # Converted from https://huggingface.co/docs/transformers/v4.27.2/en/model_doc/bert#transformers.BertModel
 BERT_LARGE_384_FP32_TORCH_BATCHES = model_utils.generate_batch_models(
     id_template=model_utils.partial_template_substitute(
-        ID_FORMAT, model_id=unique_ids.MODEL_BERT_LARGE_384_FP32_TORCH),
-    name_template=model_utils.partial_template_substitute(NAME_FORMAT,
-                                                          name="BertLargePT"),
+        ID_FORMAT, model_id=unique_ids.MODEL_BERT_LARGE_384_FP32_TORCH
+    ),
+    name_template=model_utils.partial_template_substitute(
+        NAME_FORMAT, name="BertLargePT"
+    ),
     tags=["fp32", "transformer", "seqlen384"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
     source_url_template=BERT_LARGE_FP32_URL,
     entry_function="forward",
     input_type_templates=[
         string.Template("${batch_size}x384xi64"),
-        string.Template("${batch_size}x384xi64")
+        string.Template("${batch_size}x384xi64"),
     ],
-    batch_sizes=[1, 16, 24, 32, 48, 64, 512, 1024, 1280])
+    batch_sizes=[1, 16, 24, 32, 48, 64, 512, 1024, 1280],
+)
 
 # FP16 Versions
 BERT_LARGE_384_FP16_TORCH_BATCHES = model_utils.generate_batch_models(
     id_template=model_utils.partial_template_substitute(
-        ID_FORMAT, model_id=unique_ids.MODEL_BERT_LARGE_384_FP16_TORCH),
+        ID_FORMAT, model_id=unique_ids.MODEL_BERT_LARGE_384_FP16_TORCH
+    ),
     name_template=model_utils.partial_template_substitute(
-        NAME_FORMAT, name="BertLargefp16PT"),
+        NAME_FORMAT, name="BertLargefp16PT"
+    ),
     tags=["fp16", "transformer", "seqlen384"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
     source_url_template=BERT_LARGE_FP16_URL,
     entry_function="forward",
     input_type_templates=[
         string.Template("${batch_size}x384xi64"),
-        string.Template("${batch_size}x384xi64")
+        string.Template("${batch_size}x384xi64"),
     ],
-    batch_sizes=[1, 16, 24, 32, 48, 64, 512, 1024, 1280])
+    batch_sizes=[1, 16, 24, 32, 48, 64, 512, 1024, 1280],
+)
 
 # Converted from https://pytorch.org/vision/main/models/generated/torchvision.models.resnet50.html
 RESNET50_3X224X224_FP32_TORCH_BATCHES = model_utils.generate_batch_models(
     id_template=model_utils.partial_template_substitute(
-        ID_FORMAT, model_id=unique_ids.MODEL_RESNET50_3X224X224_FP32_TORCH),
-    name_template=model_utils.partial_template_substitute(NAME_FORMAT,
-                                                          name="Resnet50PT"),
+        ID_FORMAT, model_id=unique_ids.MODEL_RESNET50_3X224X224_FP32_TORCH
+    ),
+    name_template=model_utils.partial_template_substitute(
+        NAME_FORMAT, name="Resnet50PT"
+    ),
     tags=["fp32", "cnn"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
     source_url_template=RESNET50_FP32_URL,
     entry_function="forward",
     input_type_templates=[string.Template("${batch_size}x3x224x224xf32")],
-    batch_sizes=[1, 8, 64, 128, 256, 2048])
+    batch_sizes=[1, 8, 64, 128, 256, 2048],
+)
 
 # FP16 Versions
 RESNET50_3X224X224_FP16_TORCH_BATCHES = model_utils.generate_batch_models(
     id_template=model_utils.partial_template_substitute(
-        ID_FORMAT, model_id=unique_ids.MODEL_RESNET50_3X224X224_FP16_TORCH),
+        ID_FORMAT, model_id=unique_ids.MODEL_RESNET50_3X224X224_FP16_TORCH
+    ),
     name_template=model_utils.partial_template_substitute(
-        NAME_FORMAT, name="Resnet50fp16PT"),
+        NAME_FORMAT, name="Resnet50fp16PT"
+    ),
     tags=["fp32", "cnn"],
     source_type=common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR,
     source_url_template=RESNET50_FP16_URL,
     entry_function="forward",
     input_type_templates=[string.Template("${batch_size}x3x224x224xf16")],
-    batch_sizes=[1, 8, 64, 128, 256, 2048])
+    batch_sizes=[1, 8, 64, 128, 256, 2048],
+)
diff --git a/build_tools/python/e2e_test_framework/models/utils.py b/build_tools/python/e2e_test_framework/models/utils.py
index 654b210..53f96b3 100644
--- a/build_tools/python/e2e_test_framework/models/utils.py
+++ b/build_tools/python/e2e_test_framework/models/utils.py
@@ -11,10 +11,11 @@
 from e2e_test_framework.definitions import common_definitions
 
 
-def partial_template_substitute(template: string.Template,
-                                **substitutions) -> string.Template:
-  """Partially substitutes keywords in the template and returns a template."""
-  return string.Template(template.safe_substitute(**substitutions))
+def partial_template_substitute(
+    template: string.Template, **substitutions
+) -> string.Template:
+    """Partially substitutes keywords in the template and returns a template."""
+    return string.Template(template.safe_substitute(**substitutions))
 
 
 def generate_batch_models(
@@ -27,28 +28,29 @@
     input_type_templates: Sequence[string.Template],
     batch_sizes: Sequence[int],
 ) -> Dict[int, common_definitions.Model]:
-  """Generate model definitions for different batch sizes by substituting
-  ${batch_size}` in the template strings.
+    """Generate model definitions for different batch sizes by substituting
+    ${batch_size}` in the template strings.
 
-  Only `*_template` parameters will be treated as templates and substituted. A
-  `batch-<batch size>` tag will be appended to the tags in each returned model.
+    Only `*_template` parameters will be treated as templates and substituted. A
+    `batch-<batch size>` tag will be appended to the tags in each returned model.
 
-  Returns:
-    Map of batch size to model.
-  """
-  model_map = {}
-  for batch_size in batch_sizes:
-    substituted_input_types = [
-        input_type.substitute(batch_size=batch_size)
-        for input_type in input_type_templates
-    ]
-    model_map[batch_size] = common_definitions.Model(
-        id=id_template.substitute(batch_size=batch_size),
-        name=name_template.substitute(batch_size=batch_size),
-        tags=list(tags) + [f"batch-{batch_size}"],
-        source_type=source_type,
-        source_url=source_url_template.substitute(batch_size=batch_size),
-        entry_function=entry_function,
-        input_types=substituted_input_types)
+    Returns:
+      Map of batch size to model.
+    """
+    model_map = {}
+    for batch_size in batch_sizes:
+        substituted_input_types = [
+            input_type.substitute(batch_size=batch_size)
+            for input_type in input_type_templates
+        ]
+        model_map[batch_size] = common_definitions.Model(
+            id=id_template.substitute(batch_size=batch_size),
+            name=name_template.substitute(batch_size=batch_size),
+            tags=list(tags) + [f"batch-{batch_size}"],
+            source_type=source_type,
+            source_url=source_url_template.substitute(batch_size=batch_size),
+            entry_function=entry_function,
+            input_types=substituted_input_types,
+        )
 
-  return model_map
+    return model_map
diff --git a/build_tools/python/e2e_test_framework/models/utils_test.py b/build_tools/python/e2e_test_framework/models/utils_test.py
index bf4011d..6d6034d 100644
--- a/build_tools/python/e2e_test_framework/models/utils_test.py
+++ b/build_tools/python/e2e_test_framework/models/utils_test.py
@@ -12,71 +12,75 @@
 
 
 class UtilsTest(unittest.TestCase):
+    def test_partial_template_substitute(self):
+        template = string.Template("${name}-${batch_size}")
 
-  def test_partial_template_substitute(self):
-    template = string.Template("${name}-${batch_size}")
+        result = model_utils.partial_template_substitute(template, name="xyz")
 
-    result = model_utils.partial_template_substitute(template, name="xyz")
+        self.assertEqual(result.substitute(batch_size=10), "xyz-10")
 
-    self.assertEqual(result.substitute(batch_size=10), "xyz-10")
+    def test_generate_batch_models(self):
+        models = model_utils.generate_batch_models(
+            id_template=string.Template("1234-${batch_size}"),
+            name_template=string.Template("model-batch-${batch_size}"),
+            tags=["abc"],
+            source_url_template=string.Template(
+                "https://example.com/x/${batch_size}.mlir"
+            ),
+            source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
+            entry_function="forward",
+            input_type_templates=[
+                string.Template("${batch_size}x128"),
+                string.Template("${batch_size}x256"),
+            ],
+            batch_sizes=[1, 4],
+        )
 
-  def test_generate_batch_models(self):
-    models = model_utils.generate_batch_models(
-        id_template=string.Template("1234-${batch_size}"),
-        name_template=string.Template("model-batch-${batch_size}"),
-        tags=["abc"],
-        source_url_template=string.Template(
-            "https://example.com/x/${batch_size}.mlir"),
-        source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
-        entry_function="forward",
-        input_type_templates=[
-            string.Template("${batch_size}x128"),
-            string.Template("${batch_size}x256")
-        ],
-        batch_sizes=[1, 4])
-
-    self.assertEqual(
-        models, {
-            1:
-                common_definitions.Model(
+        self.assertEqual(
+            models,
+            {
+                1: common_definitions.Model(
                     id="1234-1",
                     name="model-batch-1",
                     tags=["abc", "batch-1"],
                     source_url="https://example.com/x/1.mlir",
-                    source_type=common_definitions.ModelSourceType.
-                    EXPORTED_STABLEHLO_MLIR,
+                    source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
                     entry_function="forward",
-                    input_types=["1x128", "1x256"]),
-            4:
-                common_definitions.Model(
+                    input_types=["1x128", "1x256"],
+                ),
+                4: common_definitions.Model(
                     id="1234-4",
                     name="model-batch-4",
                     tags=["abc", "batch-4"],
                     source_url="https://example.com/x/4.mlir",
-                    source_type=common_definitions.ModelSourceType.
-                    EXPORTED_STABLEHLO_MLIR,
+                    source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
                     entry_function="forward",
-                    input_types=["4x128", "4x256"])
-        })
+                    input_types=["4x128", "4x256"],
+                ),
+            },
+        )
 
-  def test_generate_batch_models_missing_substitution(self):
-    id_template_with_unknown = string.Template("1234-${unknown}-${batch_size}")
+    def test_generate_batch_models_missing_substitution(self):
+        id_template_with_unknown = string.Template("1234-${unknown}-${batch_size}")
 
-    self.assertRaises(
-        KeyError, lambda: model_utils.generate_batch_models(
-            id_template=id_template_with_unknown,
-            name_template=string.Template("model-batch-${batch_size}"),
-            tags=["abc"],
-            source_url_template=string.Template(
-                "https://example.com/x/${batch_size}.mlir"),
-            source_type=common_definitions.ModelSourceType.
-            EXPORTED_STABLEHLO_MLIR,
-            entry_function="forward",
-            input_type_templates=[
-                string.Template("${batch_size}x128"),
-            ],
-            batch_sizes=[1, 4]))
+        self.assertRaises(
+            KeyError,
+            lambda: model_utils.generate_batch_models(
+                id_template=id_template_with_unknown,
+                name_template=string.Template("model-batch-${batch_size}"),
+                tags=["abc"],
+                source_url_template=string.Template(
+                    "https://example.com/x/${batch_size}.mlir"
+                ),
+                source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,
+                entry_function="forward",
+                input_type_templates=[
+                    string.Template("${batch_size}x128"),
+                ],
+                batch_sizes=[1, 4],
+            ),
+        )
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_test_framework/serialization.py b/build_tools/python/e2e_test_framework/serialization.py
index bc430c7..5cd3d35 100644
--- a/build_tools/python/e2e_test_framework/serialization.py
+++ b/build_tools/python/e2e_test_framework/serialization.py
@@ -18,227 +18,234 @@
 SUPPORTED_PRIMITIVE_TYPES = {str, int, float, bool, NONE_TYPE}
 
 
-def serialize_and_pack(obj,
-                       root_obj_field_name="root_obj",
-                       keyed_obj_map_field_name="keyed_obj_map"):
-  """Converts and packs the object into a serializable object.
-  
-  Args:
-    obj: object to be serialized.
-    root_obj_field_name: field name of the top-level object in the return dict.
-    keyed_obj_map_field_name: field name of the keyed object map in the return
-      dict.
-  Returns
-    A serializable dict.
-  """
+def serialize_and_pack(
+    obj, root_obj_field_name="root_obj", keyed_obj_map_field_name="keyed_obj_map"
+):
+    """Converts and packs the object into a serializable object.
 
-  if root_obj_field_name == keyed_obj_map_field_name:
-    raise ValueError(
-        f"root_obj and keyed_obj_map can't have the same field name.")
+    Args:
+      obj: object to be serialized.
+      root_obj_field_name: field name of the top-level object in the return dict.
+      keyed_obj_map_field_name: field name of the keyed object map in the return
+        dict.
+    Returns
+      A serializable dict.
+    """
 
-  keyed_obj_map = {}
-  root_obj = _serialize(obj=obj, keyed_obj_map=keyed_obj_map)
-  return {
-      root_obj_field_name: root_obj,
-      keyed_obj_map_field_name: keyed_obj_map
-  }
+    if root_obj_field_name == keyed_obj_map_field_name:
+        raise ValueError(f"root_obj and keyed_obj_map can't have the same field name.")
+
+    keyed_obj_map = {}
+    root_obj = _serialize(obj=obj, keyed_obj_map=keyed_obj_map)
+    return {root_obj_field_name: root_obj, keyed_obj_map_field_name: keyed_obj_map}
 
 
-T = TypeVar('T')
+T = TypeVar("T")
 
 
-def unpack_and_deserialize(data,
-                           root_type: Type[T],
-                           root_obj_field_name="root_obj",
-                           keyed_obj_map_field_name="keyed_obj_map") -> T:
-  """Unpacks and deserializes the data back to the typed object.
+def unpack_and_deserialize(
+    data,
+    root_type: Type[T],
+    root_obj_field_name="root_obj",
+    keyed_obj_map_field_name="keyed_obj_map",
+) -> T:
+    """Unpacks and deserializes the data back to the typed object.
 
-  Args:
-    data: serialized data dict.
-    root_type: top-level object type of the data.
-    root_obj_field_name: field name of the top-level object in the dict.
-    keyed_obj_map_field_name: field name of the keyed object map in the dict.
-  Returns:
-    A deserialized object.
-  """
-  obj = _deserialize(data=data[root_obj_field_name],
-                     obj_type=root_type,
-                     keyed_obj_map=data[keyed_obj_map_field_name])
-  return typing.cast(root_type, obj)
+    Args:
+      data: serialized data dict.
+      root_type: top-level object type of the data.
+      root_obj_field_name: field name of the top-level object in the dict.
+      keyed_obj_map_field_name: field name of the keyed object map in the dict.
+    Returns:
+      A deserialized object.
+    """
+    obj = _deserialize(
+        data=data[root_obj_field_name],
+        obj_type=root_type,
+        keyed_obj_map=data[keyed_obj_map_field_name],
+    )
+    return typing.cast(root_type, obj)
 
 
 def _serialize(obj, keyed_obj_map: Dict[str, Any]):
-  """Converts the object into a serializable object.
-  
-  Args:
-    obj: object to be serialized.
-    keyed_obj_map: mutable container to store the keyed serializable object.
-  Returns
-    A serializable object.
-  """
+    """Converts the object into a serializable object.
 
-  serialize_func = getattr(obj, SERIALIZE_FUNC_NAME, None)
-  if serialize_func is not None:
-    return serialize_func(keyed_obj_map)
+    Args:
+      obj: object to be serialized.
+      keyed_obj_map: mutable container to store the keyed serializable object.
+    Returns
+      A serializable object.
+    """
 
-  elif isinstance(obj, list):
-    return [_serialize(value, keyed_obj_map) for value in obj]
+    serialize_func = getattr(obj, SERIALIZE_FUNC_NAME, None)
+    if serialize_func is not None:
+        return serialize_func(keyed_obj_map)
 
-  elif isinstance(obj, Enum):
-    return obj.name
+    elif isinstance(obj, list):
+        return [_serialize(value, keyed_obj_map) for value in obj]
 
-  elif isinstance(obj, dict):
-    result_dict = {}
-    for key, value in obj.items():
-      if type(key) not in SUPPORTED_DICT_KEY_TYPES:
-        raise ValueError(f"Unsupported key {key} in the dict {obj}.")
-      result_dict[key] = _serialize(value, keyed_obj_map)
-    return result_dict
+    elif isinstance(obj, Enum):
+        return obj.name
 
-  elif type(obj) in SUPPORTED_PRIMITIVE_TYPES:
-    return obj
+    elif isinstance(obj, dict):
+        result_dict = {}
+        for key, value in obj.items():
+            if type(key) not in SUPPORTED_DICT_KEY_TYPES:
+                raise ValueError(f"Unsupported key {key} in the dict {obj}.")
+            result_dict[key] = _serialize(value, keyed_obj_map)
+        return result_dict
 
-  raise ValueError(f"Unsupported object: {obj}.")
+    elif type(obj) in SUPPORTED_PRIMITIVE_TYPES:
+        return obj
+
+    raise ValueError(f"Unsupported object: {obj}.")
 
 
-def _deserialize(data,
-                 obj_type: Type,
-                 keyed_obj_map: Dict[str, Any],
-                 obj_cache: Dict[str, Any] = {}):
-  """Deserializes the data back to the typed object.
+def _deserialize(
+    data, obj_type: Type, keyed_obj_map: Dict[str, Any], obj_cache: Dict[str, Any] = {}
+):
+    """Deserializes the data back to the typed object.
 
-  Args:
-    data: serialized data.
-    obj_type: type of the data.
-    keyed_obj_map: container of the keyed serializable object.
-  Returns:
-    A deserialized object.
-  """
+    Args:
+      data: serialized data.
+      obj_type: type of the data.
+      keyed_obj_map: container of the keyed serializable object.
+    Returns:
+      A deserialized object.
+    """
 
-  deserialize_func = getattr(obj_type, DESERIALIZE_FUNC_NAME, None)
-  if deserialize_func is not None:
-    return deserialize_func(data, keyed_obj_map, obj_cache)
+    deserialize_func = getattr(obj_type, DESERIALIZE_FUNC_NAME, None)
+    if deserialize_func is not None:
+        return deserialize_func(data, keyed_obj_map, obj_cache)
 
-  elif typing.get_origin(obj_type) == list:
-    subtype, = typing.get_args(obj_type)
-    return [
-        _deserialize(item, subtype, keyed_obj_map, obj_cache) for item in data
-    ]
+    elif typing.get_origin(obj_type) == list:
+        (subtype,) = typing.get_args(obj_type)
+        return [_deserialize(item, subtype, keyed_obj_map, obj_cache) for item in data]
 
-  elif typing.get_origin(obj_type) == dict:
-    _, value_type = typing.get_args(obj_type)
-    return dict((key, _deserialize(value, value_type, keyed_obj_map, obj_cache))
-                for key, value in data.items())
+    elif typing.get_origin(obj_type) == dict:
+        _, value_type = typing.get_args(obj_type)
+        return dict(
+            (key, _deserialize(value, value_type, keyed_obj_map, obj_cache))
+            for key, value in data.items()
+        )
 
-  elif typing.get_origin(obj_type) == Union:
-    subtypes = typing.get_args(obj_type)
-    if len(subtypes) != 2 or NONE_TYPE not in subtypes:
-      raise ValueError(f"Unsupported union type: {obj_type}.")
-    subtype = subtypes[0] if subtypes[1] == NONE_TYPE else subtypes[1]
-    return _deserialize(data, subtype, keyed_obj_map, obj_cache)
+    elif typing.get_origin(obj_type) == Union:
+        subtypes = typing.get_args(obj_type)
+        if len(subtypes) != 2 or NONE_TYPE not in subtypes:
+            raise ValueError(f"Unsupported union type: {obj_type}.")
+        subtype = subtypes[0] if subtypes[1] == NONE_TYPE else subtypes[1]
+        return _deserialize(data, subtype, keyed_obj_map, obj_cache)
 
-  elif issubclass(obj_type, Enum):
-    for member in obj_type:
-      if data == member.name:
-        return member
-    raise ValueError(f"Member {data} not found in the enum {obj_type}.")
+    elif issubclass(obj_type, Enum):
+        for member in obj_type:
+            if data == member.name:
+                return member
+        raise ValueError(f"Member {data} not found in the enum {obj_type}.")
 
-  return data
+    return data
 
 
-def serializable(cls=None,
-                 type_key: Optional[str] = None,
-                 id_field: str = "id"):
-  """Decorator to make a dataclass serializable.
-  
-  Args:
-    type_key: string defines the object type and indeicates that the class is a
-      keyed object, which is unique per id and will only have one copy in the
-      serialization per id.
-    id_field: field name of the id field of a keyed object.
+def serializable(cls=None, type_key: Optional[str] = None, id_field: str = "id"):
+    """Decorator to make a dataclass serializable.
 
-  Example:
-    @serializable
-    @dataclass
-    class A(object):
-      ...
+    Args:
+      type_key: string defines the object type and indeicates that the class is a
+        keyed object, which is unique per id and will only have one copy in the
+        serialization per id.
+      id_field: field name of the id field of a keyed object.
 
-    @serialzable(type_key="obj_b")
-    @dataclass
-    class B(object):
-      id: str
-  """
+    Example:
+      @serializable
+      @dataclass
+      class A(object):
+        ...
 
-  if type_key is not None and ":" in type_key:
-    raise ValueError("':' is the reserved character in type_key.")
+      @serialzable(type_key="obj_b")
+      @dataclass
+      class B(object):
+        id: str
+    """
 
-  def wrap(cls):
-    if not dataclasses.is_dataclass(cls):
-      raise ValueError(f"{cls} is not a dataclass.")
+    if type_key is not None and ":" in type_key:
+        raise ValueError("':' is the reserved character in type_key.")
 
-    fields = dataclasses.fields(cls)
-    if type_key is not None and all(field.name != id_field for field in fields):
-      raise ValueError(f'Id field "{id_field}" not found in the class {cls}.')
+    def wrap(cls):
+        if not dataclasses.is_dataclass(cls):
+            raise ValueError(f"{cls} is not a dataclass.")
 
-    def serialize(self, keyed_obj_map: Dict[str, Any]):
-      if type_key is None:
-        return _fields_to_dict(self, fields, keyed_obj_map)
+        fields = dataclasses.fields(cls)
+        if type_key is not None and all(field.name != id_field for field in fields):
+            raise ValueError(f'Id field "{id_field}" not found in the class {cls}.')
 
-      obj_id = getattr(self, id_field)
-      obj_key = f"{type_key}:{obj_id}"
-      if obj_key in keyed_obj_map:
-        # If the value in the map is None, it means we have visited this object
-        # before but not yet finished serializing it. This will only happen if
-        # there is a circular reference.
-        if keyed_obj_map[obj_key] is None:
-          raise ValueError(f"Circular reference is not supported: {obj_key}.")
-        return obj_id
+        def serialize(self, keyed_obj_map: Dict[str, Any]):
+            if type_key is None:
+                return _fields_to_dict(self, fields, keyed_obj_map)
 
-      # Populate the keyed_obj_map with None first to detect circular reference.
-      keyed_obj_map[obj_key] = None
-      obj_dict = _fields_to_dict(self, fields, keyed_obj_map)
-      keyed_obj_map[obj_key] = obj_dict
-      return obj_id
+            obj_id = getattr(self, id_field)
+            obj_key = f"{type_key}:{obj_id}"
+            if obj_key in keyed_obj_map:
+                # If the value in the map is None, it means we have visited this object
+                # before but not yet finished serializing it. This will only happen if
+                # there is a circular reference.
+                if keyed_obj_map[obj_key] is None:
+                    raise ValueError(f"Circular reference is not supported: {obj_key}.")
+                return obj_id
 
-    def deserialize(data, keyed_obj_map: Dict[str, Any], obj_cache: Dict[str,
-                                                                         Any]):
-      if type_key is None:
-        field_value_map = _dict_to_fields(data, fields, keyed_obj_map,
-                                          obj_cache)
-        return cls(**field_value_map)
+            # Populate the keyed_obj_map with None first to detect circular reference.
+            keyed_obj_map[obj_key] = None
+            obj_dict = _fields_to_dict(self, fields, keyed_obj_map)
+            keyed_obj_map[obj_key] = obj_dict
+            return obj_id
 
-      obj_id = data
-      obj_key = f"{type_key}:{obj_id}"
-      if obj_key in obj_cache:
-        return obj_cache[obj_key]
+        def deserialize(data, keyed_obj_map: Dict[str, Any], obj_cache: Dict[str, Any]):
+            if type_key is None:
+                field_value_map = _dict_to_fields(
+                    data, fields, keyed_obj_map, obj_cache
+                )
+                return cls(**field_value_map)
 
-      field_value_map = _dict_to_fields(keyed_obj_map[obj_key], fields,
-                                        keyed_obj_map, obj_cache)
-      derialized_obj = cls(**field_value_map)
-      obj_cache[obj_key] = derialized_obj
-      return derialized_obj
+            obj_id = data
+            obj_key = f"{type_key}:{obj_id}"
+            if obj_key in obj_cache:
+                return obj_cache[obj_key]
 
-    setattr(cls, SERIALIZE_FUNC_NAME, serialize)
-    setattr(cls, DESERIALIZE_FUNC_NAME, deserialize)
-    return cls
+            field_value_map = _dict_to_fields(
+                keyed_obj_map[obj_key], fields, keyed_obj_map, obj_cache
+            )
+            derialized_obj = cls(**field_value_map)
+            obj_cache[obj_key] = derialized_obj
+            return derialized_obj
 
-  # Trick to allow the decoration with `@serializable(...)`. In that case,
-  # `serializable` is called without cls and should return a decorator.
-  if cls is None:
-    return wrap
-  return wrap(cls)
+        setattr(cls, SERIALIZE_FUNC_NAME, serialize)
+        setattr(cls, DESERIALIZE_FUNC_NAME, deserialize)
+        return cls
+
+    # Trick to allow the decoration with `@serializable(...)`. In that case,
+    # `serializable` is called without cls and should return a decorator.
+    if cls is None:
+        return wrap
+    return wrap(cls)
 
 
-def _fields_to_dict(obj, fields: Sequence[dataclasses.Field],
-                    keyed_obj_map: Dict[str, Any]) -> Dict[str, Any]:
-  return dict((field.name, _serialize(getattr(obj, field.name), keyed_obj_map))
-              for field in fields)
+def _fields_to_dict(
+    obj, fields: Sequence[dataclasses.Field], keyed_obj_map: Dict[str, Any]
+) -> Dict[str, Any]:
+    return dict(
+        (field.name, _serialize(getattr(obj, field.name), keyed_obj_map))
+        for field in fields
+    )
 
 
-def _dict_to_fields(obj_dict, fields: Sequence[dataclasses.Field],
-                    keyed_obj_map: Dict[str, Any],
-                    obj_cache: Dict[str, Any]) -> Dict[str, Any]:
-  return dict(
-      (field.name,
-       _deserialize(obj_dict[field.name], field.type, keyed_obj_map, obj_cache))
-      for field in fields)
+def _dict_to_fields(
+    obj_dict,
+    fields: Sequence[dataclasses.Field],
+    keyed_obj_map: Dict[str, Any],
+    obj_cache: Dict[str, Any],
+) -> Dict[str, Any]:
+    return dict(
+        (
+            field.name,
+            _deserialize(obj_dict[field.name], field.type, keyed_obj_map, obj_cache),
+        )
+        for field in fields
+    )
diff --git a/build_tools/python/e2e_test_framework/serialization_test.py b/build_tools/python/e2e_test_framework/serialization_test.py
index 2d10775..e4fcca4 100644
--- a/build_tools/python/e2e_test_framework/serialization_test.py
+++ b/build_tools/python/e2e_test_framework/serialization_test.py
@@ -16,141 +16,162 @@
 
 
 class EnumX(enum.Enum):
-  OPTION_A = "a"
-  OPTION_B = "b"
-  OPTION_C = "c"
+    OPTION_A = "a"
+    OPTION_B = "b"
+    OPTION_C = "c"
 
 
 @serialization.serializable
 @dataclass
 class TestC(object):
-  float_val: float
+    float_val: float
 
 
 @serialization.serializable(type_key="test_b", id_field="key")
 @dataclass
 class TestB(object):
-  key: str
-  int_val: int
+    key: str
+    int_val: int
 
 
 @serialization.serializable
 @dataclass
 class TestA(object):
-  b_list: List[TestB]
-  c_obj: TestC
-  str_val: Optional[str]
-  enum_val: EnumX
+    b_list: List[TestB]
+    c_obj: TestC
+    str_val: Optional[str]
+    enum_val: EnumX
 
 
 @serialization.serializable
 @dataclass
 class TestUnsupported(object):
-  path: pathlib.PurePath
+    path: pathlib.PurePath
 
 
 @serialization.serializable(type_key="test_circular")
 @dataclass
 class TestCircularReference(object):
-  id: str
-  child: Optional["TestCircularReference"]
+    id: str
+    child: Optional["TestCircularReference"]
 
 
 class SerializationTest(unittest.TestCase):
+    def test_serialize_and_pack(self):
+        b_obj_a = TestB(key="id_a", int_val=10)
+        b_obj_b = TestB(key="id_b", int_val=20)
+        test_objs = [
+            TestA(
+                b_list=[b_obj_a, b_obj_b],
+                c_obj=TestC(float_val=0.1),
+                str_val="test1",
+                enum_val=EnumX.OPTION_B,
+            ),
+            TestA(
+                b_list=[b_obj_a],
+                c_obj=TestC(float_val=0.2),
+                str_val=None,
+                enum_val=EnumX.OPTION_C,
+            ),
+        ]
 
-  def test_serialize_and_pack(self):
-    b_obj_a = TestB(key="id_a", int_val=10)
-    b_obj_b = TestB(key="id_b", int_val=20)
-    test_objs = [
-        TestA(b_list=[b_obj_a, b_obj_b],
-              c_obj=TestC(float_val=0.1),
-              str_val="test1",
-              enum_val=EnumX.OPTION_B),
-        TestA(b_list=[b_obj_a],
-              c_obj=TestC(float_val=0.2),
-              str_val=None,
-              enum_val=EnumX.OPTION_C)
-    ]
+        results = serialization.serialize_and_pack(
+            test_objs,
+            root_obj_field_name="main_obj",
+            keyed_obj_map_field_name="obj_map",
+        )
 
-    results = serialization.serialize_and_pack(
-        test_objs,
-        root_obj_field_name="main_obj",
-        keyed_obj_map_field_name="obj_map")
+        self.maxDiff = None
+        self.assertEqual(
+            results,
+            {
+                "main_obj": [
+                    dict(
+                        b_list=["id_a", "id_b"],
+                        c_obj=dict(float_val=0.1),
+                        str_val="test1",
+                        enum_val="OPTION_B",
+                    ),
+                    dict(
+                        b_list=["id_a"],
+                        c_obj=dict(float_val=0.2),
+                        str_val=None,
+                        enum_val="OPTION_C",
+                    ),
+                ],
+                "obj_map": {
+                    "test_b:id_a": dict(key="id_a", int_val=10),
+                    "test_b:id_b": dict(key="id_b", int_val=20),
+                },
+            },
+        )
 
-    self.maxDiff = None
-    self.assertEqual(
-        results, {
-            "main_obj": [
-                dict(b_list=["id_a", "id_b"],
-                     c_obj=dict(float_val=0.1),
-                     str_val="test1",
-                     enum_val="OPTION_B"),
-                dict(b_list=["id_a"],
-                     c_obj=dict(float_val=0.2),
-                     str_val=None,
-                     enum_val="OPTION_C")
-            ],
-            "obj_map": {
-                "test_b:id_a": dict(key="id_a", int_val=10),
-                "test_b:id_b": dict(key="id_b", int_val=20)
-            }
-        })
+    def test_serialize_and_pack_with_unsupported_type(self):
+        self.assertRaises(
+            ValueError,
+            lambda: serialization.serialize_and_pack(
+                TestUnsupported(path=pathlib.PurePath("abc"))
+            ),
+        )
 
-  def test_serialize_and_pack_with_unsupported_type(self):
-    self.assertRaises(
-        ValueError, lambda: serialization.serialize_and_pack(
-            TestUnsupported(path=pathlib.PurePath("abc"))))
+    def test_serialize_and_pack_with_unsupported_dict_key(self):
+        self.assertRaises(
+            ValueError, lambda: serialization.serialize_and_pack({(0, 0): "test"})
+        )
 
-  def test_serialize_and_pack_with_unsupported_dict_key(self):
-    self.assertRaises(
-        ValueError, lambda: serialization.serialize_and_pack({(0, 0): "test"}))
+    def test_serialize_and_pack_with_circular_reference(self):
+        obj_a = TestCircularReference(id="0", child=None)
+        obj_b = TestCircularReference(id="1", child=obj_a)
+        obj_a.child = obj_b
 
-  def test_serialize_and_pack_with_circular_reference(self):
-    obj_a = TestCircularReference(id="0", child=None)
-    obj_b = TestCircularReference(id="1", child=obj_a)
-    obj_a.child = obj_b
+        self.assertRaises(ValueError, lambda: serialization.serialize_and_pack(obj_a))
 
-    self.assertRaises(ValueError,
-                      lambda: serialization.serialize_and_pack(obj_a))
+    def test_roundtrip(self):
+        b_obj_a = TestB(key="id_a", int_val=10)
+        b_obj_b = TestB(key="id_b", int_val=20)
+        test_objs = [
+            TestA(
+                b_list=[b_obj_a, b_obj_b],
+                c_obj=TestC(float_val=0.1),
+                str_val="test1",
+                enum_val=EnumX.OPTION_B,
+            ),
+            TestA(
+                b_list=[b_obj_a],
+                c_obj=TestC(float_val=0.2),
+                str_val=None,
+                enum_val=EnumX.OPTION_C,
+            ),
+            TestA(
+                b_list=[b_obj_b],
+                c_obj=TestC(float_val=0.3),
+                str_val="test3",
+                enum_val=EnumX.OPTION_A,
+            ),
+        ]
 
-  def test_roundtrip(self):
-    b_obj_a = TestB(key="id_a", int_val=10)
-    b_obj_b = TestB(key="id_b", int_val=20)
-    test_objs = [
-        TestA(b_list=[b_obj_a, b_obj_b],
-              c_obj=TestC(float_val=0.1),
-              str_val="test1",
-              enum_val=EnumX.OPTION_B),
-        TestA(b_list=[b_obj_a],
-              c_obj=TestC(float_val=0.2),
-              str_val=None,
-              enum_val=EnumX.OPTION_C),
-        TestA(b_list=[b_obj_b],
-              c_obj=TestC(float_val=0.3),
-              str_val="test3",
-              enum_val=EnumX.OPTION_A),
-    ]
+        results = serialization.unpack_and_deserialize(
+            serialization.serialize_and_pack(test_objs), typing.List[TestA]
+        )
 
-    results = serialization.unpack_and_deserialize(
-        serialization.serialize_and_pack(test_objs), typing.List[TestA])
+        self.assertEqual(results, test_objs)
 
-    self.assertEqual(results, test_objs)
+    def test_roundtrip_with_json(self):
+        b_obj_a = TestB(key="id_a", int_val=10)
+        b_obj_b = TestB(key="id_b", int_val=20)
 
-  def test_roundtrip_with_json(self):
-    b_obj_a = TestB(key="id_a", int_val=10)
-    b_obj_b = TestB(key="id_b", int_val=20)
+        objs = {
+            "x": b_obj_a,
+            "y": b_obj_b,
+        }
 
-    objs = {
-        "x": b_obj_a,
-        "y": b_obj_b,
-    }
+        json_str = json.dumps(serialization.serialize_and_pack(objs))
+        results = serialization.unpack_and_deserialize(
+            json.loads(json_str), typing.Dict[str, TestB]
+        )
 
-    json_str = json.dumps(serialization.serialize_and_pack(objs))
-    results = serialization.unpack_and_deserialize(json.loads(json_str),
-                                                   typing.Dict[str, TestB])
-
-    self.assertEqual(results, objs)
+        self.assertEqual(results, objs)
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/e2e_test_framework/unique_ids.py b/build_tools/python/e2e_test_framework/unique_ids.py
index 7589f4f..ef37787 100644
--- a/build_tools/python/e2e_test_framework/unique_ids.py
+++ b/build_tools/python/e2e_test_framework/unique_ids.py
@@ -24,40 +24,38 @@
 
 
 def hash_composite_id(keys: Sequence[str]) -> str:
-  """Computes the composite hash id from string keys.
+    """Computes the composite hash id from string keys.
 
-  String keys are the component ids that compose this composite object. We hash
-  the composite id since the id isn't designed to be inspected and insufficient
-  to reconstruct the original composite object.
+    String keys are the component ids that compose this composite object. We hash
+    the composite id since the id isn't designed to be inspected and insufficient
+    to reconstruct the original composite object.
 
-  Note that the output is sensitive to the order of the keys, and any key ==
-  TRANSPARENT_ID will be skipped. When adding a new key to the keys, the new key
-  should be always appended to the end. In this way, the composite id can be
-  unchanged for the existing composite object if they use TRANSPARENT_ID on the
-  new keyed field.
+    Note that the output is sensitive to the order of the keys, and any key ==
+    TRANSPARENT_ID will be skipped. When adding a new key to the keys, the new key
+    should be always appended to the end. In this way, the composite id can be
+    unchanged for the existing composite object if they use TRANSPARENT_ID on the
+    new keyed field.
 
-  The composite id is computed in the following steps:
-  1. Index each key with its position in the list from 0.
-  2. Remove any key == TRANSPARENT_ID
-  3. Get the SHA256 hex digest of "0-key_0:1-key_1:..."
+    The composite id is computed in the following steps:
+    1. Index each key with its position in the list from 0.
+    2. Remove any key == TRANSPARENT_ID
+    3. Get the SHA256 hex digest of "0-key_0:1-key_1:..."
 
-  Step 1 is needed to avoid the ambiguity between:
-  ["key_abc", TRANSPARENT_ID] and [TRANSPARENT_ID, "key_abc"]
-  since after removing TRANSPARENT_ID, they both become ["key_abc"] without the
-  position index.
+    Step 1 is needed to avoid the ambiguity between:
+    ["key_abc", TRANSPARENT_ID] and [TRANSPARENT_ID, "key_abc"]
+    since after removing TRANSPARENT_ID, they both become ["key_abc"] without the
+    position index.
 
-  Args:
-    keys: list of string keys.
+    Args:
+      keys: list of string keys.
 
-  Returns:
-    Unique composite id.
-  """
-  trimmed_indexed_key = [
-      f"{index}-{key}" for index, key in enumerate(keys)
-      if key != TRANSPARENT_ID
-  ]
-  return hashlib.sha256(
-      ":".join(trimmed_indexed_key).encode("utf-8")).hexdigest()
+    Returns:
+      Unique composite id.
+    """
+    trimmed_indexed_key = [
+        f"{index}-{key}" for index, key in enumerate(keys) if key != TRANSPARENT_ID
+    ]
+    return hashlib.sha256(":".join(trimmed_indexed_key).encode("utf-8")).hexdigest()
 
 
 # To generate an id, run `uuid.uuid4()`.
@@ -141,33 +139,69 @@
 # IREE benchmarks
 IREE_COMPILE_CONFIG_VMVX_GENERIC_EXPERIMENTAL = "75336abd-8108-462c-9ce3-15443e3f32f4"
 IREE_COMPILE_CONFIG_LINUX_CASCADELAKE = "e7e18b0f-c72d-4f1c-89b1-5afee70df6e9"
-IREE_COMPILE_CONFIG_LINUX_CASCADELAKE_FUSE_PADDING = "6d0d5716-5525-44ad-b71d-8075ee1583a6"
+IREE_COMPILE_CONFIG_LINUX_CASCADELAKE_FUSE_PADDING = (
+    "6d0d5716-5525-44ad-b71d-8075ee1583a6"
+)
 IREE_COMPILE_CONFIG_LINUX_RV64_GENERIC_DEFAULTS = "cdf579a9-5446-403b-a991-802a6c702e65"
 IREE_COMPILE_CONFIG_LINUX_RV32_GENERIC_DEFAULTS = "6d9ce240-ec14-4d8f-a8e4-1b20aa17b4e4"
 IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_DEFAULTS = "09cb5300-7f73-45cf-9f68-e114c77ca030"
-IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_MATMUL_UBENCH = "3f66ba98-5716-4d30-9a87-50bc78e5f714"
-IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_MATMUL_SPLITK_UBENCH = "54cf2ec3-d073-4281-9561-b6c1280bd0eb"
+IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_MATMUL_UBENCH = (
+    "3f66ba98-5716-4d30-9a87-50bc78e5f714"
+)
+IREE_COMPILE_CONFIG_LINUX_CUDA_SM80_MATMUL_SPLITK_UBENCH = (
+    "54cf2ec3-d073-4281-9561-b6c1280bd0eb"
+)
 IREE_COMPILE_CONFIG_LINUX_VULKAN_SD_SIMT = "da0ea6e6-719b-43ee-bfec-72eb3b1173bf"
 IREE_COMPILE_CONFIG_LINUX_VULKAN_SD_TENSORCORE = "97790694-4f0f-4d83-bc52-d74e019c1df9"
-IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_DEFAULTS = "8da35f2b-a042-4b7d-9dcf-5ebbc1728765"
-IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_EXPERIMENTAL = "32a56c8d-cc6c-41b8-8620-1f8eda0b8223"
-IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_EXPERIMENTAL_REPEATED_KERNEL = "6b601a8d-4824-42e0-bcc6-500c0c3fa346"
-IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_DEFAULTS = "1f2adf49-282e-4aff-9d4f-e63b1621f1e8"
-IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_MMT4D = "d463322c-24e6-4685-85ca-d541b41a405f"
-IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_MMT4D_DOTPROD = "f672a6b9-99fc-47ce-8b1b-8e5f44a541a1"
-IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_DEFAULTS = "c7eea358-d8d2-4199-9d75-bb741c399b1b"
-IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_FUSE_PADDING = "d3038b95-c889-456a-bff6-5cbabd10f1ad"
-IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_FUSE_PADDING_REPEATED_KERNEL = "70b823ca-2807-4531-8c00-e02af7d70466"
+IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_DEFAULTS = (
+    "8da35f2b-a042-4b7d-9dcf-5ebbc1728765"
+)
+IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_EXPERIMENTAL = (
+    "32a56c8d-cc6c-41b8-8620-1f8eda0b8223"
+)
+IREE_COMPILE_CONFIG_ANDROID_ARM_VALHALL_EXPERIMENTAL_REPEATED_KERNEL = (
+    "6b601a8d-4824-42e0-bcc6-500c0c3fa346"
+)
+IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_DEFAULTS = (
+    "1f2adf49-282e-4aff-9d4f-e63b1621f1e8"
+)
+IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_MMT4D = (
+    "d463322c-24e6-4685-85ca-d541b41a405f"
+)
+IREE_COMPILE_CONFIG_ANDROID_ARMV8_2_A_GENERIC_MMT4D_DOTPROD = (
+    "f672a6b9-99fc-47ce-8b1b-8e5f44a541a1"
+)
+IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_DEFAULTS = (
+    "c7eea358-d8d2-4199-9d75-bb741c399b1b"
+)
+IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_FUSE_PADDING = (
+    "d3038b95-c889-456a-bff6-5cbabd10f1ad"
+)
+IREE_COMPILE_CONFIG_ANDROID_QUALCOMM_ADRENO_FUSE_PADDING_REPEATED_KERNEL = (
+    "70b823ca-2807-4531-8c00-e02af7d70466"
+)
 IREE_MODULE_EXECUTION_CONFIG_LOCAL_SYNC = "13fc65a9-e5dc-4cbb-9c09-25b0b08f4c03"
 IREE_MODULE_EXECUTION_CONFIG_LOCAL_TASK_BASE = "c7c4a15e-b20c-4898-bb4a-864f34ff34b2"
-IREE_MODULE_EXECUTION_CONFIG_SYS_SCHED_LOCAL_TASK_BASE = "0dfb6b03-bd15-45a9-b82a-345c03f1fea6"
+IREE_MODULE_EXECUTION_CONFIG_SYS_SCHED_LOCAL_TASK_BASE = (
+    "0dfb6b03-bd15-45a9-b82a-345c03f1fea6"
+)
 IREE_MODULE_EXECUTION_CONFIG_CUDA = "f7c0ec98-f028-436a-b05a-7d35cf18ce2d"
-IREE_MODULE_EXECUTION_CONFIG_CUDA_BATCH_SIZE_100 = "ce15c338-b1d1-4ee3-b876-22d3cc5a831d"
+IREE_MODULE_EXECUTION_CONFIG_CUDA_BATCH_SIZE_100 = (
+    "ce15c338-b1d1-4ee3-b876-22d3cc5a831d"
+)
 IREE_MODULE_EXECUTION_CONFIG_VULKAN = "34ae13f0-d6d9-43f7-befb-15d024e88e89"
-IREE_MODULE_EXECUTION_CONFIG_VULKAN_BATCH_SIZE_16 = "b10737a8-5da4-4052-9b7a-5b07f21e02d0"
-IREE_MODULE_EXECUTION_CONFIG_VULKAN_BATCH_SIZE_32 = "c59f6ed8-ef78-4ddd-93ea-f173c5e4d6b8"
-IREE_MODULE_EXECUTION_CONFIG_VMVX_LOCAL_TASK_BASE = "953183e2-1e84-4a51-a43c-9b869bdc2218"
-IREE_MODULE_EXECUTION_CONFIG_VMVX_SYS_SCHED_LOCAL_TASK_BASE = "a1a9795e-2fc5-4d95-abc0-b0fb41b07557"
+IREE_MODULE_EXECUTION_CONFIG_VULKAN_BATCH_SIZE_16 = (
+    "b10737a8-5da4-4052-9b7a-5b07f21e02d0"
+)
+IREE_MODULE_EXECUTION_CONFIG_VULKAN_BATCH_SIZE_32 = (
+    "c59f6ed8-ef78-4ddd-93ea-f173c5e4d6b8"
+)
+IREE_MODULE_EXECUTION_CONFIG_VMVX_LOCAL_TASK_BASE = (
+    "953183e2-1e84-4a51-a43c-9b869bdc2218"
+)
+IREE_MODULE_EXECUTION_CONFIG_VMVX_SYS_SCHED_LOCAL_TASK_BASE = (
+    "a1a9795e-2fc5-4d95-abc0-b0fb41b07557"
+)
 IREE_MODEL_IMPORT_STABLEHLO_MLIR_DEFAULT = "8b2df698-f3ba-4207-8696-6c909776eac4"
 IREE_MODEL_IMPORT_TFLITE_DEFAULT = "16280d67-7ce0-4807-ab4b-0cb3c771d206"
 IREE_MODEL_IMPORT_LINALG_MLIR_DEFAULT = "8afc4561-e84d-4a91-af55-2b1917465fcc"
diff --git a/build_tools/python/e2e_test_framework/unique_ids_test.py b/build_tools/python/e2e_test_framework/unique_ids_test.py
index 2fdf8fc..85c9f56 100644
--- a/build_tools/python/e2e_test_framework/unique_ids_test.py
+++ b/build_tools/python/e2e_test_framework/unique_ids_test.py
@@ -11,42 +11,42 @@
 
 
 class UniqueIdsTest(unittest.TestCase):
+    def test_hash_composite_id(self):
+        output = unique_ids.hash_composite_id(["abc", "123"])
 
-  def test_hash_composite_id(self):
-    output = unique_ids.hash_composite_id(["abc", "123"])
+        self.assertEquals(
+            output, hashlib.sha256(f"0-abc:1-123".encode("utf-8")).hexdigest()
+        )
 
-    self.assertEquals(
-        output,
-        hashlib.sha256(f"0-abc:1-123".encode("utf-8")).hexdigest())
+    def test_hash_composite_id_diff_keys(self):
+        ids = [
+            unique_ids.hash_composite_id([]),
+            unique_ids.hash_composite_id(["abc", "123"]),
+            unique_ids.hash_composite_id(["123", "abc"]),
+            unique_ids.hash_composite_id(["123", unique_ids.TRANSPARENT_ID]),
+            unique_ids.hash_composite_id(["123", "abc", "xyz"]),
+            unique_ids.hash_composite_id(["123", unique_ids.TRANSPARENT_ID, "xyz"]),
+        ]
 
-  def test_hash_composite_id_diff_keys(self):
-    ids = [
-        unique_ids.hash_composite_id([]),
-        unique_ids.hash_composite_id(["abc", "123"]),
-        unique_ids.hash_composite_id(["123", "abc"]),
-        unique_ids.hash_composite_id(["123", unique_ids.TRANSPARENT_ID]),
-        unique_ids.hash_composite_id(["123", "abc", "xyz"]),
-        unique_ids.hash_composite_id(["123", unique_ids.TRANSPARENT_ID, "xyz"])
-    ]
+        # Check if they are all distinct.
+        self.assertCountEqual(set(ids), ids)
 
-    # Check if they are all distinct.
-    self.assertCountEqual(set(ids), ids)
+    def test_hash_composite_id_unchanged_with_transparent_id(self):
+        existing_id = unique_ids.hash_composite_id(["abc"])
+        new_id_a = unique_ids.hash_composite_id(["abc", unique_ids.TRANSPARENT_ID])
+        new_id_b = unique_ids.hash_composite_id(
+            ["abc", unique_ids.TRANSPARENT_ID, unique_ids.TRANSPARENT_ID]
+        )
 
-  def test_hash_composite_id_unchanged_with_transparent_id(self):
-    existing_id = unique_ids.hash_composite_id(["abc"])
-    new_id_a = unique_ids.hash_composite_id(["abc", unique_ids.TRANSPARENT_ID])
-    new_id_b = unique_ids.hash_composite_id(
-        ["abc", unique_ids.TRANSPARENT_ID, unique_ids.TRANSPARENT_ID])
+        self.assertEquals(existing_id, new_id_a)
+        self.assertEquals(existing_id, new_id_b)
 
-    self.assertEquals(existing_id, new_id_a)
-    self.assertEquals(existing_id, new_id_b)
+    def test_hash_composite_id_with_transparent_ids_in_diff_pos(self):
+        id_a = unique_ids.hash_composite_id([unique_ids.TRANSPARENT_ID, "abc"])
+        id_b = unique_ids.hash_composite_id(["abc", unique_ids.TRANSPARENT_ID])
 
-  def test_hash_composite_id_with_transparent_ids_in_diff_pos(self):
-    id_a = unique_ids.hash_composite_id([unique_ids.TRANSPARENT_ID, "abc"])
-    id_b = unique_ids.hash_composite_id(["abc", unique_ids.TRANSPARENT_ID])
-
-    self.assertNotEquals(id_a, id_b)
+        self.assertNotEquals(id_a, id_b)
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/build_tools/python/reporting/benchmark_comment.py b/build_tools/python/reporting/benchmark_comment.py
index 7f67e22..ce6135d 100644
--- a/build_tools/python/reporting/benchmark_comment.py
+++ b/build_tools/python/reporting/benchmark_comment.py
@@ -12,12 +12,13 @@
 
 @dataclass(frozen=True)
 class CommentData(object):
-  """Benchmark comment data."""
-  # Unique id to identify the same kind of comment.
-  type_id: str
-  # Abbreviated markdown to post as a comment.
-  abbr_md: str
-  # Abbreviated markdown to post on gist.
-  full_md: str
-  # Unverified PR number.
-  unverified_pr_number: int
+    """Benchmark comment data."""
+
+    # Unique id to identify the same kind of comment.
+    type_id: str
+    # Abbreviated markdown to post as a comment.
+    abbr_md: str
+    # Abbreviated markdown to post on gist.
+    full_md: str
+    # Unverified PR number.
+    unverified_pr_number: int
diff --git a/build_tools/python/reporting/common/html_utils.py b/build_tools/python/reporting/common/html_utils.py
index 96b970b..871953b 100644
--- a/build_tools/python/reporting/common/html_utils.py
+++ b/build_tools/python/reporting/common/html_utils.py
@@ -16,138 +16,158 @@
 
 
 def get_table_css():
-  styles = [
-      dict(selector="tr:hover", props=[("background", "#f4f4f4")]),
-      dict(selector="tbody tr", props=[("background-color", "#ffffff")]),
-      dict(selector="tbody td", props=[("border", "1px solid #dddfe1")]),
-      dict(selector="th",
-           props=[("background-color", "#54585d"), ("color", "#ffffff"),
-                  ("font-weight", "bold"), ("border", "1px solid #54585d"),
-                  ("padding", "10px")]),
-      dict(selector="td", props=[("padding", "10px")]),
-      dict(selector="",
-           props=[("border-collapse", "collapse"),
-                  ("font-family", "Tahoma, Geneva, sans-serif")]),
-      dict(selector="caption",
-           props=[("text-align", "center"), ("padding", "10px"),
-                  ("font-weight", "bold"), ("font-size", "1.2em"),
-                  ("color", "#636363")]),
-  ]
-  return styles
+    styles = [
+        dict(selector="tr:hover", props=[("background", "#f4f4f4")]),
+        dict(selector="tbody tr", props=[("background-color", "#ffffff")]),
+        dict(selector="tbody td", props=[("border", "1px solid #dddfe1")]),
+        dict(
+            selector="th",
+            props=[
+                ("background-color", "#54585d"),
+                ("color", "#ffffff"),
+                ("font-weight", "bold"),
+                ("border", "1px solid #54585d"),
+                ("padding", "10px"),
+            ],
+        ),
+        dict(selector="td", props=[("padding", "10px")]),
+        dict(
+            selector="",
+            props=[
+                ("border-collapse", "collapse"),
+                ("font-family", "Tahoma, Geneva, sans-serif"),
+            ],
+        ),
+        dict(
+            selector="caption",
+            props=[
+                ("text-align", "center"),
+                ("padding", "10px"),
+                ("font-weight", "bold"),
+                ("font-size", "1.2em"),
+                ("color", "#636363"),
+            ],
+        ),
+    ]
+    return styles
 
 
 def style_legend(v):
-  if _LEGEND_0 in v:
-    props = "background-color: #0277BD;"
-  elif _LEGEND_1 in v:
-    props = "background-color: #2E7D32;"
-  elif _LEGEND_2 in v:
-    props = "background-color: #66BB6A;"
-  elif _LEGEND_3 in v:
-    props = "background-color: #FBC02D;"
-  elif _LEGEND_4 in v:
-    props = "background-color: #E57373;"
-  elif _LEGEND_5 in v:
-    props = "background-color: #C62828;"
-  else:
-    props = "background-color: #880E4F"
-  return props
+    if _LEGEND_0 in v:
+        props = "background-color: #0277BD;"
+    elif _LEGEND_1 in v:
+        props = "background-color: #2E7D32;"
+    elif _LEGEND_2 in v:
+        props = "background-color: #66BB6A;"
+    elif _LEGEND_3 in v:
+        props = "background-color: #FBC02D;"
+    elif _LEGEND_4 in v:
+        props = "background-color: #E57373;"
+    elif _LEGEND_5 in v:
+        props = "background-color: #C62828;"
+    else:
+        props = "background-color: #880E4F"
+    return props
 
 
 def generate_header_and_legend(version_html):
-  html = "<style type='text/css'>:root { font-family: Tahoma, Geneva, sans-serif; color: #636363; } h3 {text-align: center; }</style>"
-  html = html + version_html
+    html = "<style type='text/css'>:root { font-family: Tahoma, Geneva, sans-serif; color: #636363; } h3 {text-align: center; }</style>"
+    html = html + version_html
 
-  legend = pd.DataFrame(columns=[""])
-  legend.loc[len(legend)] = [_LEGEND_0]
-  legend.loc[len(legend)] = [_LEGEND_1]
-  legend.loc[len(legend)] = [_LEGEND_2]
-  legend.loc[len(legend)] = [_LEGEND_3]
-  legend.loc[len(legend)] = [_LEGEND_4]
-  legend.loc[len(legend)] = [_LEGEND_5]
-  legend.loc[len(legend)] = [_LEGEND_6]
+    legend = pd.DataFrame(columns=[""])
+    legend.loc[len(legend)] = [_LEGEND_0]
+    legend.loc[len(legend)] = [_LEGEND_1]
+    legend.loc[len(legend)] = [_LEGEND_2]
+    legend.loc[len(legend)] = [_LEGEND_3]
+    legend.loc[len(legend)] = [_LEGEND_4]
+    legend.loc[len(legend)] = [_LEGEND_5]
+    legend.loc[len(legend)] = [_LEGEND_6]
 
-  styled_legend = legend.style.set_table_styles(get_table_css())
-  styled_legend.set_caption("Legend")
-  styled_legend = styled_legend.set_properties(**{"color": "#ffffff"})
-  styled_legend = styled_legend.set_properties(**{"width": "200px"})
-  styled_legend = styled_legend.applymap(style_legend)
-  styled_legend = styled_legend.hide(axis="index")
-  styled_legend = styled_legend.hide(axis="columns")
-  html = html + styled_legend.to_html() + "<br/>"
-  return html
+    styled_legend = legend.style.set_table_styles(get_table_css())
+    styled_legend.set_caption("Legend")
+    styled_legend = styled_legend.set_properties(**{"color": "#ffffff"})
+    styled_legend = styled_legend.set_properties(**{"width": "200px"})
+    styled_legend = styled_legend.applymap(style_legend)
+    styled_legend = styled_legend.hide(axis="index")
+    styled_legend = styled_legend.hide(axis="columns")
+    html = html + styled_legend.to_html() + "<br/>"
+    return html
 
 
 def style_speedup(v):
-  if v > 10.0:
-    props = "background-color: #0277BD;"
-  elif v > 2.0:
-    props = "background-color: #2E7D32;"
-  elif v >= 1.0:
-    props = "background-color: #66BB6A;"
-  else:
-    props = "background-color: #FBC02D;"
-  return props
+    if v > 10.0:
+        props = "background-color: #0277BD;"
+    elif v > 2.0:
+        props = "background-color: #2E7D32;"
+    elif v >= 1.0:
+        props = "background-color: #66BB6A;"
+    else:
+        props = "background-color: #FBC02D;"
+    return props
 
 
 def style_slowdown(v):
-  if v >= 10.0:
-    props = "background-color: #880E4F"
-  elif v >= 2.0:
-    props = "background-color: #C62828;"
-  elif v > 1.15:
-    props = "background-color: #E57373;"
-  else:
-    props = "background-color: #FBC02D;"
-  return props
+    if v >= 10.0:
+        props = "background-color: #880E4F"
+    elif v >= 2.0:
+        props = "background-color: #C62828;"
+    elif v > 1.15:
+        props = "background-color: #E57373;"
+    else:
+        props = "background-color: #FBC02D;"
+    return props
 
 
 def style_performance(v):
-  if "faster" in v:
-    return style_speedup(float(v.split("x")[0]))
-  else:
-    return style_slowdown(float(v.split("x")[0]))
+    if "faster" in v:
+        return style_speedup(float(v.split("x")[0]))
+    else:
+        return style_slowdown(float(v.split("x")[0]))
 
 
 def style_latency(v):
-  if v == "nan":
-    return "color: #636363"
-  if "faster" in v:
-    return style_speedup(float(v.split("x")[0]))
-  else:
-    return style_slowdown(float(v.split("x")[0]))
+    if v == "nan":
+        return "color: #636363"
+    if "faster" in v:
+        return style_speedup(float(v.split("x")[0]))
+    else:
+        return style_slowdown(float(v.split("x")[0]))
 
 
 def style_memory(v):
-  if v == "nan":
-    return "color: #636363"
-  if "smaller" in v:
-    return style_speedup(float(v.split("x")[0]))
-  else:
-    return style_slowdown(float(v.split("x")[0]))
+    if v == "nan":
+        return "color: #636363"
+    if "smaller" in v:
+        return style_speedup(float(v.split("x")[0]))
+    else:
+        return style_slowdown(float(v.split("x")[0]))
 
 
 def format_latency_comparison(iree_latency, baseline_latency):
-  if iree_latency == 0 or baseline_latency == 0:
-    return "nan"
+    if iree_latency == 0 or baseline_latency == 0:
+        return "nan"
 
-  speedup = baseline_latency / iree_latency
-  slowdown = iree_latency / baseline_latency
-  faster_label = "{:.2f}x faster"
-  slower_label = "{:.2f}x slower"
-  latency = faster_label.format(
-      speedup) if speedup >= 1.0 else slower_label.format(slowdown)
-  return latency
+    speedup = baseline_latency / iree_latency
+    slowdown = iree_latency / baseline_latency
+    faster_label = "{:.2f}x faster"
+    slower_label = "{:.2f}x slower"
+    latency = (
+        faster_label.format(speedup)
+        if speedup >= 1.0
+        else slower_label.format(slowdown)
+    )
+    return latency
 
 
 def format_memory_comparison(iree_memory, baseline_memory):
-  if iree_memory == 0 or baseline_memory == 0:
-    return "nan"
+    if iree_memory == 0 or baseline_memory == 0:
+        return "nan"
 
-  smaller = baseline_memory / iree_memory
-  larger = iree_memory / baseline_memory
-  smaller_label = "{:.2f}x smaller"
-  larger_label = "{:0.2f}x larger"
-  memory = smaller_label.format(
-      smaller) if smaller >= 1.0 else larger_label.format(larger)
-  return memory
+    smaller = baseline_memory / iree_memory
+    larger = iree_memory / baseline_memory
+    smaller_label = "{:.2f}x smaller"
+    larger_label = "{:0.2f}x larger"
+    memory = (
+        smaller_label.format(smaller) if smaller >= 1.0 else larger_label.format(larger)
+    )
+    return memory
diff --git a/build_tools/scripts/add_license_header.py b/build_tools/scripts/add_license_header.py
index eebc17e..305e3c5 100755
--- a/build_tools/scripts/add_license_header.py
+++ b/build_tools/scripts/add_license_header.py
@@ -27,163 +27,191 @@
 
 """
 
-class CommentSyntax(object):
 
-  def __init__(self, start_comment, middle_comment=None, end_comment=""):
-    self.start_comment = start_comment
-    self.middle_comment = middle_comment if middle_comment else start_comment
-    self.end_comment = end_comment
+class CommentSyntax(object):
+    def __init__(self, start_comment, middle_comment=None, end_comment=""):
+        self.start_comment = start_comment
+        self.middle_comment = middle_comment if middle_comment else start_comment
+        self.end_comment = end_comment
 
 
 def comment_arg_parser(v):
-  """Can be used to parse a comment syntax triple."""
-  if v is None:
-    return None
-  if not isinstance(v, str):
-    raise argparse.ArgumentTypeError("String expected")
-  return CommentSyntax(*v.split(","))
+    """Can be used to parse a comment syntax triple."""
+    if v is None:
+        return None
+    if not isinstance(v, str):
+        raise argparse.ArgumentTypeError("String expected")
+    return CommentSyntax(*v.split(","))
 
 
 def create_multikey(d):
-  # pylint: disable=g-complex-comprehension
-  return {k: v for keys, v in d.items() for k in keys}
+    # pylint: disable=g-complex-comprehension
+    return {k: v for keys, v in d.items() for k in keys}
 
 
-filename_to_comment = create_multikey({
-    ("BUILD", "CMakeLists.txt"): CommentSyntax("#"),
-})
+filename_to_comment = create_multikey(
+    {
+        ("BUILD", "CMakeLists.txt"): CommentSyntax("#"),
+    }
+)
 
-ext_to_comment = create_multikey({
-    (".bzl", ".cfg", ".cmake", ".overlay", ".py", ".sh", ".yml"):
-        CommentSyntax("#"),
-    (".cc", ".cpp", ".comp", ".fbs", ".h", ".hpp", ".inc", ".td"):
-        CommentSyntax("//"),
-    (".def",):
-        CommentSyntax(";;"),
-})
+ext_to_comment = create_multikey(
+    {
+        (".bzl", ".cfg", ".cmake", ".overlay", ".py", ".sh", ".yml"): CommentSyntax(
+            "#"
+        ),
+        (".cc", ".cpp", ".comp", ".fbs", ".h", ".hpp", ".inc", ".td"): CommentSyntax(
+            "//"
+        ),
+        (".def",): CommentSyntax(";;"),
+    }
+)
 
 
 def get_comment_syntax(args):
-  """Deterime the comment syntax to use."""
-  if args.comment:
-    return args.comment
-  basename = os.path.basename(args.filename)
-  from_filename = filename_to_comment.get(basename)
-  if from_filename:
-    return from_filename
-  _, ext = os.path.splitext(args.filename)
-  return ext_to_comment.get(ext, args.default_comment)
+    """Deterime the comment syntax to use."""
+    if args.comment:
+        return args.comment
+    basename = os.path.basename(args.filename)
+    from_filename = filename_to_comment.get(basename)
+    if from_filename:
+        return from_filename
+    _, ext = os.path.splitext(args.filename)
+    return ext_to_comment.get(ext, args.default_comment)
 
 
 def parse_arguments():
-  """Parses command line arguments."""
-  current_year = datetime.date.today().year
-  parser = argparse.ArgumentParser()
-  input_group = parser.add_mutually_exclusive_group()
-  input_group.add_argument("infile",
-                           nargs="?",
-                           type=argparse.FileType("r", encoding="UTF-8"),
-                           help="Input file to format. Default: stdin",
-                           default=sys.stdin)
-  parser.add_argument(
-      "--filename",
-      "--assume-filename",
-      type=str,
-      default=None,
-      help=(
-          "Filename to use for determining comment syntax. Default: actual name"
-          "of input file."))
-  parser.add_argument(
-      "--year",
-      "-y",
-      help="Year to add copyright. Default: the current year ({})".format(
-          current_year),
-      default=current_year)
-  parser.add_argument("--holder",
-                      help="Copyright holder. Default: The IREE Authors",
-                      default="The IREE Authors")
-  parser.add_argument(
-      "--quiet",
-      help=("Don't raise a runtime error on encountering an unhandled filetype."
-            "Useful for running across many files at once. Default: False"),
-      action="store_true",
-      default=False)
-  output_group = parser.add_mutually_exclusive_group()
-  output_group.add_argument("-o",
-                            "--outfile",
-                            "--output",
-                            help="File to send output. Default: stdout",
-                            type=argparse.FileType("w", encoding="UTF-8"),
-                            default=sys.stdout)
-  output_group.add_argument("--in_place",
-                            "-i",
-                            action="store_true",
-                            help="Run formatting in place. Default: False",
-                            default=False)
-  comment_group = parser.add_mutually_exclusive_group()
-  comment_group.add_argument("--comment",
-                             "-c",
-                             type=comment_arg_parser,
-                             help="Override comment syntax.",
-                             default=None)
-  comment_group.add_argument(
-      "--default_comment",
-      type=comment_arg_parser,
-      help="Fallback comment syntax if filename is unknown. Default: None",
-      default=None)
-  args = parser.parse_args()
+    """Parses command line arguments."""
+    current_year = datetime.date.today().year
+    parser = argparse.ArgumentParser()
+    input_group = parser.add_mutually_exclusive_group()
+    input_group.add_argument(
+        "infile",
+        nargs="?",
+        type=argparse.FileType("r", encoding="UTF-8"),
+        help="Input file to format. Default: stdin",
+        default=sys.stdin,
+    )
+    parser.add_argument(
+        "--filename",
+        "--assume-filename",
+        type=str,
+        default=None,
+        help=(
+            "Filename to use for determining comment syntax. Default: actual name"
+            "of input file."
+        ),
+    )
+    parser.add_argument(
+        "--year",
+        "-y",
+        help="Year to add copyright. Default: the current year ({})".format(
+            current_year
+        ),
+        default=current_year,
+    )
+    parser.add_argument(
+        "--holder",
+        help="Copyright holder. Default: The IREE Authors",
+        default="The IREE Authors",
+    )
+    parser.add_argument(
+        "--quiet",
+        help=(
+            "Don't raise a runtime error on encountering an unhandled filetype."
+            "Useful for running across many files at once. Default: False"
+        ),
+        action="store_true",
+        default=False,
+    )
+    output_group = parser.add_mutually_exclusive_group()
+    output_group.add_argument(
+        "-o",
+        "--outfile",
+        "--output",
+        help="File to send output. Default: stdout",
+        type=argparse.FileType("w", encoding="UTF-8"),
+        default=sys.stdout,
+    )
+    output_group.add_argument(
+        "--in_place",
+        "-i",
+        action="store_true",
+        help="Run formatting in place. Default: False",
+        default=False,
+    )
+    comment_group = parser.add_mutually_exclusive_group()
+    comment_group.add_argument(
+        "--comment",
+        "-c",
+        type=comment_arg_parser,
+        help="Override comment syntax.",
+        default=None,
+    )
+    comment_group.add_argument(
+        "--default_comment",
+        type=comment_arg_parser,
+        help="Fallback comment syntax if filename is unknown. Default: None",
+        default=None,
+    )
+    args = parser.parse_args()
 
-  if args.in_place and args.infile == sys.stdin:
-    raise parser.error("Cannot format stdin in place")
+    if args.in_place and args.infile == sys.stdin:
+        raise parser.error("Cannot format stdin in place")
 
-  if not args.filename and args.infile != sys.stdin:
-    args.filename = args.infile.name
+    if not args.filename and args.infile != sys.stdin:
+        args.filename = args.infile.name
 
-  return args
+    return args
 
 
 def main(args):
-  first_line = args.infile.readline()
-  already_has_license = False
-  shebang = ""
-  content_lines = []
-  if first_line.startswith("#!"):
-    shebang = first_line
-  else:
-    content_lines = [first_line]
-  content_lines.extend(args.infile.readlines())
-  for line in content_lines:
-    if COPYRIGHT_PATTERN.search(line):
-      already_has_license = True
-      break
-  if already_has_license:
-    header = shebang
-  else:
-    comment_syntax = get_comment_syntax(args)
-    if not comment_syntax:
-      if args.quiet:
-        header = shebang
-      else:
-        raise ValueError("Could not determine comment syntax for " +
-                         args.filename)
+    first_line = args.infile.readline()
+    already_has_license = False
+    shebang = ""
+    content_lines = []
+    if first_line.startswith("#!"):
+        shebang = first_line
     else:
-      header = LICENSE_HEADER_FORMATTER.format(
-          # Add a blank line between shebang and license.
-          shebang=(shebang + "\n" if shebang else ""),
-          start_comment=comment_syntax.start_comment,
-          middle_comment=comment_syntax.middle_comment,
-          # Add a blank line before the end comment.
-          end_comment=("\n" + comment_syntax.end_comment
-                       if comment_syntax.end_comment else ""),
-          year=args.year,
-          holder=args.holder)
+        content_lines = [first_line]
+    content_lines.extend(args.infile.readlines())
+    for line in content_lines:
+        if COPYRIGHT_PATTERN.search(line):
+            already_has_license = True
+            break
+    if already_has_license:
+        header = shebang
+    else:
+        comment_syntax = get_comment_syntax(args)
+        if not comment_syntax:
+            if args.quiet:
+                header = shebang
+            else:
+                raise ValueError(
+                    "Could not determine comment syntax for " + args.filename
+                )
+        else:
+            header = LICENSE_HEADER_FORMATTER.format(
+                # Add a blank line between shebang and license.
+                shebang=(shebang + "\n" if shebang else ""),
+                start_comment=comment_syntax.start_comment,
+                middle_comment=comment_syntax.middle_comment,
+                # Add a blank line before the end comment.
+                end_comment=(
+                    "\n" + comment_syntax.end_comment
+                    if comment_syntax.end_comment
+                    else ""
+                ),
+                year=args.year,
+                holder=args.holder,
+            )
 
-  # Have to open for write after we're done reading.
-  if args.in_place:
-    args.outfile = open(args.filename, "w", encoding="UTF-8")
-  args.outfile.write(header)
-  args.outfile.writelines(content_lines)
+    # Have to open for write after we're done reading.
+    if args.in_place:
+        args.outfile = open(args.filename, "w", encoding="UTF-8")
+    args.outfile.write(header)
+    args.outfile.writelines(content_lines)
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())
diff --git a/build_tools/scripts/check_path_lengths.py b/build_tools/scripts/check_path_lengths.py
index 645ba7d..42d95c2 100755
--- a/build_tools/scripts/check_path_lengths.py
+++ b/build_tools/scripts/check_path_lengths.py
@@ -30,70 +30,71 @@
 
 
 def parse_arguments():
-  parser = argparse.ArgumentParser(description="Path length checker")
-  # The default limit was selected based on repository state when this script
-  # was added. If the max path length decreases, consider lowering this too.
-  parser.add_argument("--limit",
-                      help="Path length limit (inclusive)",
-                      type=int,
-                      default=75)
-  parser.add_argument(
-      "--include_tests",
-      help=
-      "Includes /test directories. False by default as these don't usually generate problematic files during the build",
-      action="store_true",
-      default=False)
-  parser.add_argument("--verbose",
-                      help="Outputs detailed information about path lengths",
-                      action="store_true",
-                      default=False)
-  args = parser.parse_args()
-  return args
+    parser = argparse.ArgumentParser(description="Path length checker")
+    # The default limit was selected based on repository state when this script
+    # was added. If the max path length decreases, consider lowering this too.
+    parser.add_argument(
+        "--limit", help="Path length limit (inclusive)", type=int, default=75
+    )
+    parser.add_argument(
+        "--include_tests",
+        help="Includes /test directories. False by default as these don't usually generate problematic files during the build",
+        action="store_true",
+        default=False,
+    )
+    parser.add_argument(
+        "--verbose",
+        help="Outputs detailed information about path lengths",
+        action="store_true",
+        default=False,
+    )
+    args = parser.parse_args()
+    return args
 
 
 def main(args):
-  repo_root = pathlib.Path(__file__).parent.parent.parent
+    repo_root = pathlib.Path(__file__).parent.parent.parent
 
-  # Just look at the compiler directory for now, since it has historically had
-  # by far the longest paths.
-  walk_root = os.path.join(repo_root, "compiler")
+    # Just look at the compiler directory for now, since it has historically had
+    # by far the longest paths.
+    walk_root = os.path.join(repo_root, "compiler")
 
-  longest_path_length = -1
-  long_paths = []
-  short_paths = []
-  for dirpath, dirnames, _ in os.walk(walk_root):
-    # Don't descend into test directories, since they typically don't generate
-    # object files or binaries that could trip up the build system.
-    if not args.include_tests and "test" in dirnames:
-      dirnames.remove("test")
+    longest_path_length = -1
+    long_paths = []
+    short_paths = []
+    for dirpath, dirnames, _ in os.walk(walk_root):
+        # Don't descend into test directories, since they typically don't generate
+        # object files or binaries that could trip up the build system.
+        if not args.include_tests and "test" in dirnames:
+            dirnames.remove("test")
 
-    path = pathlib.Path(dirpath).relative_to(repo_root).as_posix()
-    if len(path) > args.limit:
-      long_paths.append(path)
+        path = pathlib.Path(dirpath).relative_to(repo_root).as_posix()
+        if len(path) > args.limit:
+            long_paths.append(path)
+        else:
+            short_paths.append(path)
+        longest_path_length = max(longest_path_length, len(path))
+    long_paths.sort(key=len)
+    short_paths.sort(key=len)
+
+    if args.verbose and short_paths:
+        print(f"These paths are shorter than the limit of {args.limit} characters:")
+        for path in short_paths:
+            print("{:3d}, {}".format(len(path), path))
+
+    if long_paths:
+        print(f"These paths are longer than the limit of {args.limit} characters:")
+        for path in long_paths:
+            print("{:3d}, {}".format(len(path), path))
+        print(
+            f"Error: {len(long_paths)} source paths are longer than {args.limit} characters."
+        )
+        print("  Long paths can be problematic when building on Windows.")
+        print("  Please look at the output above and trim the paths.")
+        sys.exit(1)
     else:
-      short_paths.append(path)
-    longest_path_length = max(longest_path_length, len(path))
-  long_paths.sort(key=len)
-  short_paths.sort(key=len)
-
-  if args.verbose and short_paths:
-    print(f"These paths are shorter than the limit of {args.limit} characters:")
-    for path in short_paths:
-      print("{:3d}, {}".format(len(path), path))
-
-  if long_paths:
-    print(f"These paths are longer than the limit of {args.limit} characters:")
-    for path in long_paths:
-      print("{:3d}, {}".format(len(path), path))
-    print(
-        f"Error: {len(long_paths)} source paths are longer than {args.limit} characters."
-    )
-    print("  Long paths can be problematic when building on Windows.")
-    print("  Please look at the output above and trim the paths.")
-    sys.exit(1)
-  else:
-    print(f"All path lengths are under the limit of {args.limit} characters.")
+        print(f"All path lengths are under the limit of {args.limit} characters.")
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())
diff --git a/build_tools/scripts/download_file.py b/build_tools/scripts/download_file.py
index da1a1d3..ffa4220 100755
--- a/build_tools/scripts/download_file.py
+++ b/build_tools/scripts/download_file.py
@@ -25,84 +25,91 @@
 
 
 def parse_arguments():
-  """Parses command line arguments."""
-  parser = argparse.ArgumentParser(
-      description="Downloads a file from the web "
-      "and decompresses it if necessary. NEVER Use this tool to download from "
-      "untrusted sources, it doesn't unpack the file safely.")
-  parser.add_argument("source_url",
-                      type=str,
-                      metavar="<source-url>",
-                      help="Source URL to download")
-  parser.add_argument("-o",
-                      "--output",
-                      type=str,
-                      required=True,
-                      metavar="<output-file>",
-                      help="Output file path")
-  parser.add_argument("--unpack",
-                      action='store_true',
-                      default=False,
-                      help="Unpack the downloaded file if it's an archive")
-  parser.add_argument("--max-tries",
-                      metavar="<max-tries>",
-                      type=int,
-                      default=DEFAULT_MAX_TRIES,
-                      help="Number of tries before giving up")
-  return parser.parse_args()
+    """Parses command line arguments."""
+    parser = argparse.ArgumentParser(
+        description="Downloads a file from the web "
+        "and decompresses it if necessary. NEVER Use this tool to download from "
+        "untrusted sources, it doesn't unpack the file safely."
+    )
+    parser.add_argument(
+        "source_url", type=str, metavar="<source-url>", help="Source URL to download"
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        type=str,
+        required=True,
+        metavar="<output-file>",
+        help="Output file path",
+    )
+    parser.add_argument(
+        "--unpack",
+        action="store_true",
+        default=False,
+        help="Unpack the downloaded file if it's an archive",
+    )
+    parser.add_argument(
+        "--max-tries",
+        metavar="<max-tries>",
+        type=int,
+        default=DEFAULT_MAX_TRIES,
+        help="Number of tries before giving up",
+    )
+    return parser.parse_args()
 
 
 def download_and_extract(source_url: str, output: str, unpack: bool):
-  # Open the URL and get the file-like streaming object.
-  with urllib.request.urlopen(source_url) as response:
-    if response.status != 200:
-      raise RuntimeError(
-          f"Failed to download file with status {response.status} {response.msg}"
-      )
+    # Open the URL and get the file-like streaming object.
+    with urllib.request.urlopen(source_url) as response:
+        if response.status != 200:
+            raise RuntimeError(
+                f"Failed to download file with status {response.status} {response.msg}"
+            )
 
-    if unpack:
-      if source_url.endswith(".tar.gz"):
-        # Open tar.gz in the streaming mode.
-        with tarfile.open(fileobj=response, mode="r|*") as tar_file:
-          if os.path.exists(output):
-            shutil.rmtree(output)
-          os.makedirs(output)
-          tar_file.extractall(output)
-        return
-      elif source_url.endswith(".gz"):
-        # Open gzip from a file-like object, which will be in the streaming mode.
-        with gzip.open(filename=response, mode="rb") as input_file:
-          with open(output, "wb") as output_file:
-            shutil.copyfileobj(input_file, output_file)
-        return
+        if unpack:
+            if source_url.endswith(".tar.gz"):
+                # Open tar.gz in the streaming mode.
+                with tarfile.open(fileobj=response, mode="r|*") as tar_file:
+                    if os.path.exists(output):
+                        shutil.rmtree(output)
+                    os.makedirs(output)
+                    tar_file.extractall(output)
+                return
+            elif source_url.endswith(".gz"):
+                # Open gzip from a file-like object, which will be in the streaming mode.
+                with gzip.open(filename=response, mode="rb") as input_file:
+                    with open(output, "wb") as output_file:
+                        shutil.copyfileobj(input_file, output_file)
+                return
 
-    # Fallback to download the file only.
-    with open(output, "wb") as output_file:
-      # Streaming copy.
-      shutil.copyfileobj(response, output_file)
+        # Fallback to download the file only.
+        with open(output, "wb") as output_file:
+            # Streaming copy.
+            shutil.copyfileobj(response, output_file)
 
 
 def main(args):
-  output_dir = os.path.dirname(args.output)
+    output_dir = os.path.dirname(args.output)
 
-  if not os.path.isdir(output_dir):
-    os.makedirs(output_dir)
+    if not os.path.isdir(output_dir):
+        os.makedirs(output_dir)
 
-  remaining_tries = args.max_tries
-  while remaining_tries > 0:
-    try:
-      download_and_extract(args.source_url, args.output, args.unpack)
-      break
-    except (ConnectionResetError, ConnectionRefusedError,
-            urllib.error.URLError):
-      remaining_tries -= 1
-      if remaining_tries == 0:
-        raise
-      else:
-        logging.warning(f"Connection error, remaining {remaining_tries} tries",
-                        exc_info=True)
-        time.sleep(RETRY_COOLDOWN_TIME)
+    remaining_tries = args.max_tries
+    while remaining_tries > 0:
+        try:
+            download_and_extract(args.source_url, args.output, args.unpack)
+            break
+        except (ConnectionResetError, ConnectionRefusedError, urllib.error.URLError):
+            remaining_tries -= 1
+            if remaining_tries == 0:
+                raise
+            else:
+                logging.warning(
+                    f"Connection error, remaining {remaining_tries} tries",
+                    exc_info=True,
+                )
+                time.sleep(RETRY_COOLDOWN_TIME)
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())
diff --git a/build_tools/scripts/generate_compilation_flagfile.py b/build_tools/scripts/generate_compilation_flagfile.py
index cf0cb13..adda56e 100755
--- a/build_tools/scripts/generate_compilation_flagfile.py
+++ b/build_tools/scripts/generate_compilation_flagfile.py
@@ -16,23 +16,24 @@
 
 
 def parse_arguments():
-  """Parses command line arguments."""
-  parser = argparse.ArgumentParser()
-  parser.add_argument("--output",
-                      type=str,
-                      required=True,
-                      help="output file to write to")
-  parser.add_argument("compilation_flags",
-                      metavar="<compilation-flags>",
-                      nargs="*",
-                      help="list of compilation flags")
-  return parser.parse_args()
+    """Parses command line arguments."""
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--output", type=str, required=True, help="output file to write to"
+    )
+    parser.add_argument(
+        "compilation_flags",
+        metavar="<compilation-flags>",
+        nargs="*",
+        help="list of compilation flags",
+    )
+    return parser.parse_args()
 
 
 def main(args):
-  with open(args.output, "w") as f:
-    f.write("\n".join(args.compilation_flags) + "\n")
+    with open(args.output, "w") as f:
+        f.write("\n".join(args.compilation_flags) + "\n")
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())
diff --git a/build_tools/scripts/generate_flagfile.py b/build_tools/scripts/generate_flagfile.py
index f0330e0..fb1effd 100755
--- a/build_tools/scripts/generate_flagfile.py
+++ b/build_tools/scripts/generate_flagfile.py
@@ -12,54 +12,67 @@
 
 
 def parse_arguments():
-  """Parses command line arguments."""
-  parser = argparse.ArgumentParser()
-  parser.add_argument("--module",
-                      type=str,
-                      required=True,
-                      metavar="<module>",
-                      help="The name of the module file")
-  parser.add_argument("--device",
-                      type=str,
-                      required=True,
-                      metavar="<device>",
-                      help="The name of the HAL device")
-  parser.add_argument("--function",
-                      type=str,
-                      required=True,
-                      metavar="<function>",
-                      help="The name of the entry function")
-  parser.add_argument("--inputs",
-                      type=str,
-                      required=True,
-                      metavar="<inputs>",
-                      help="A list of comma-separated function inputs")
-  parser.add_argument("--additional_args",
-                      type=str,
-                      required=True,
-                      metavar="<additional-cl-args>",
-                      help="Additional command-line arguments")
-  parser.add_argument("-o",
-                      "--output",
-                      type=str,
-                      required=True,
-                      metavar="<output-file>",
-                      help="Output file to write to")
-  return parser.parse_args()
+    """Parses command line arguments."""
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--module",
+        type=str,
+        required=True,
+        metavar="<module>",
+        help="The name of the module file",
+    )
+    parser.add_argument(
+        "--device",
+        type=str,
+        required=True,
+        metavar="<device>",
+        help="The name of the HAL device",
+    )
+    parser.add_argument(
+        "--function",
+        type=str,
+        required=True,
+        metavar="<function>",
+        help="The name of the entry function",
+    )
+    parser.add_argument(
+        "--inputs",
+        type=str,
+        required=True,
+        metavar="<inputs>",
+        help="A list of comma-separated function inputs",
+    )
+    parser.add_argument(
+        "--additional_args",
+        type=str,
+        required=True,
+        metavar="<additional-cl-args>",
+        help="Additional command-line arguments",
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        type=str,
+        required=True,
+        metavar="<output-file>",
+        help="Output file to write to",
+    )
+    return parser.parse_args()
 
 
 def main(args):
-  lines = [
-      f"--device={args.device}", f"--module={args.module}",
-      f"--function={args.function}"
-  ]
-  lines.extend([("--input=" + e) for e in args.inputs.split(",")])
-  lines.extend(args.additional_args.split(";"))
-  content = "\n".join(lines) + "\n"
+    lines = [
+        f"--device={args.device}",
+        f"--module={args.module}",
+        f"--function={args.function}",
+    ]
+    lines.extend([("--input=" + e) for e in args.inputs.split(",")])
+    lines.extend(args.additional_args.split(";"))
+    content = "\n".join(lines) + "\n"
 
-  with open(args.output, "w") as f:
-    f.writelines(content)
+    with open(args.output, "w") as f:
+        f.writelines(content)
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())
diff --git a/build_tools/scripts/generate_release_index.py b/build_tools/scripts/generate_release_index.py
index 0e7ea94..70a4eeb 100755
--- a/build_tools/scripts/generate_release_index.py
+++ b/build_tools/scripts/generate_release_index.py
@@ -19,63 +19,74 @@
 
 
 def parse_arguments():
-  parser = argparse.ArgumentParser()
-  parser.add_argument("--repo",
-                      "--repository",
-                      default="openxla/iree",
-                      help="The GitHub repository to fetch releases from.")
-  parser.add_argument(
-      "--output",
-      default="-",
-      help="The file to write the HTML to or '-' for stdout (the default)")
-  return parser.parse_args()
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--repo",
+        "--repository",
+        default="openxla/iree",
+        help="The GitHub repository to fetch releases from.",
+    )
+    parser.add_argument(
+        "--output",
+        default="-",
+        help="The file to write the HTML to or '-' for stdout (the default)",
+    )
+    return parser.parse_args()
 
 
 class ReleaseFetcher:
+    def __init__(self, repo, per_page=100):
+        self._session = requests.Session()
+        self._repo = repo
+        self._per_page = per_page
 
-  def __init__(self, repo, per_page=100):
-    self._session = requests.Session()
-    self._repo = repo
-    self._per_page = per_page
+    def get_all(self):
+        url = f"https://api.github.com/repos/{self._repo}/releases"
+        page = 1
 
-  def get_all(self):
-    url = f"https://api.github.com/repos/{self._repo}/releases"
-    page = 1
-
-    while True:
-      response = self._session.get(url,
-                                   params={
-                                       "page": page,
-                                       "per_page": self._per_page,
-                                   })
-      for release in response.json():
-        yield release
-      if "next" not in response.links:
-        break
-      page += 1
+        while True:
+            response = self._session.get(
+                url,
+                params={
+                    "page": page,
+                    "per_page": self._per_page,
+                },
+            )
+            for release in response.json():
+                yield release
+            if "next" not in response.links:
+                break
+            page += 1
 
 
 def main(args):
-  fetcher = ReleaseFetcher(repo=args.repo)
-  with (sys.stdout if args.output == "-" else open(args.output, "w")) as f:
-    f.write(
-        textwrap.dedent("""\
+    fetcher = ReleaseFetcher(repo=args.repo)
+    with sys.stdout if args.output == "-" else open(args.output, "w") as f:
+        f.write(
+            textwrap.dedent(
+                """\
             <!DOCTYPE html>
             <html>
               <body>
-            """))
-    for release in fetcher.get_all():
-      if release["draft"]:
-        continue
-      for asset in release["assets"]:
-        url = html.escape(asset['browser_download_url'])
-        name = html.escape(asset['name'])
-        f.write(f"    <a href={url}>{name}</a><br />\n")
-    f.write(textwrap.dedent("""\
+            """
+            )
+        )
+        for release in fetcher.get_all():
+            if release["draft"]:
+                continue
+            for asset in release["assets"]:
+                url = html.escape(asset["browser_download_url"])
+                name = html.escape(asset["name"])
+                f.write(f"    <a href={url}>{name}</a><br />\n")
+        f.write(
+            textwrap.dedent(
+                """\
       </body>
     </html>
-    """))
+    """
+            )
+        )
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())
diff --git a/build_tools/scripts/get_e2e_artifacts.py b/build_tools/scripts/get_e2e_artifacts.py
index 634ee31..8875438 100755
--- a/build_tools/scripts/get_e2e_artifacts.py
+++ b/build_tools/scripts/get_e2e_artifacts.py
@@ -29,153 +29,156 @@
 from absl import flags
 
 SUITE_NAME_TO_TARGET = {
-    'e2e_tests':
-        '//integrations/tensorflow/e2e:e2e_tests',
-    'mobile_bert_squad_tests':
-        '//integrations/tensorflow/e2e:mobile_bert_squad_tests',
-    'layers_tests':
-        '//integrations/tensorflow/e2e/keras/layers:layers_tests',
-    'layers_dynamic_batch_tests':
-        '//integrations/tensorflow/e2e/keras/layers:layers_dynamic_batch_tests',
-    'layers_training_tests':
-        '//integrations/tensorflow/e2e/keras/layers:layers_training_tests',
-    'keyword_spotting_tests':
-        '//integrations/tensorflow/e2e/keras:keyword_spotting_tests',
-    'keyword_spotting_internal_streaming_tests':
-        '//integrations/tensorflow/e2e/keras:keyword_spotting_internal_streaming_tests',
-    'imagenet_non_hermetic_tests':
-        '//integrations/tensorflow/e2e/keras/applications:imagenet_non_hermetic_tests',
-    'slim_vision_tests':
-        '//integrations/tensorflow/e2e/slim_vision_models:slim_vision_tests',
+    "e2e_tests": "//integrations/tensorflow/e2e:e2e_tests",
+    "mobile_bert_squad_tests": "//integrations/tensorflow/e2e:mobile_bert_squad_tests",
+    "layers_tests": "//integrations/tensorflow/e2e/keras/layers:layers_tests",
+    "layers_dynamic_batch_tests": "//integrations/tensorflow/e2e/keras/layers:layers_dynamic_batch_tests",
+    "layers_training_tests": "//integrations/tensorflow/e2e/keras/layers:layers_training_tests",
+    "keyword_spotting_tests": "//integrations/tensorflow/e2e/keras:keyword_spotting_tests",
+    "keyword_spotting_internal_streaming_tests": "//integrations/tensorflow/e2e/keras:keyword_spotting_internal_streaming_tests",
+    "imagenet_non_hermetic_tests": "//integrations/tensorflow/e2e/keras/applications:imagenet_non_hermetic_tests",
+    "slim_vision_tests": "//integrations/tensorflow/e2e/slim_vision_models:slim_vision_tests",
 }
-SUITES_HELP = [f'`{name}`' for name in SUITE_NAME_TO_TARGET]
+SUITES_HELP = [f"`{name}`" for name in SUITE_NAME_TO_TARGET]
 SUITES_HELP = f'{", ".join(SUITES_HELP[:-1])} and {SUITES_HELP[-1]}'
 
 FLAGS = flags.FLAGS
 
 flags.DEFINE_bool(
-    'dry_run', False,
-    'Run without extracting files. Useful for quickly checking for artifact '
-    'collisions.')
+    "dry_run",
+    False,
+    "Run without extracting files. Useful for quickly checking for artifact "
+    "collisions.",
+)
 flags.DEFINE_string(
-    'artifacts_dir', os.path.join(tempfile.gettempdir(), 'iree', 'modules'),
-    'Directory to transfer the benchmarking artifacts to. Defaults to '
-    '/tmp/iree/modules/')
-flags.DEFINE_bool('run_test_suites', True, 'Run any specified test suites.')
-flags.DEFINE_list('test_suites', list(SUITE_NAME_TO_TARGET.keys()),
-                  f'Any combination of {SUITES_HELP}.')
+    "artifacts_dir",
+    os.path.join(tempfile.gettempdir(), "iree", "modules"),
+    "Directory to transfer the benchmarking artifacts to. Defaults to "
+    "/tmp/iree/modules/",
+)
+flags.DEFINE_bool("run_test_suites", True, "Run any specified test suites.")
+flags.DEFINE_list(
+    "test_suites",
+    list(SUITE_NAME_TO_TARGET.keys()),
+    f"Any combination of {SUITES_HELP}.",
+)
 
-EXPECTED_COLLISIONS = [
-    '/tf_ref/', 'tf_input.mlir', 'iree_input.mlir', '/saved_model/'
-]
+EXPECTED_COLLISIONS = ["/tf_ref/", "tf_input.mlir", "iree_input.mlir", "/saved_model/"]
 
 
 def _target_to_testlogs_path(target: str) -> str:
-  """Convert target into the path where Bazel stores the artifacts we want."""
-  return os.path.join('bazel-testlogs',
-                      target.replace('//', '').replace(':', os.sep))
+    """Convert target into the path where Bazel stores the artifacts we want."""
+    return os.path.join("bazel-testlogs", target.replace("//", "").replace(":", os.sep))
 
 
 def _target_to_test_name(target: str, test_suite_path: str) -> str:
-  """Get test_name from `suite_name_test_name__tf__backend_name`."""
-  return target.split('__')[0].replace(f'{test_suite_path}_', '')
+    """Get test_name from `suite_name_test_name__tf__backend_name`."""
+    return target.split("__")[0].replace(f"{test_suite_path}_", "")
 
 
 def get_test_paths_and_names(test_suite_path: str):
-  """Get the paths Bazel stores test outputs in and the matching test names."""
-  targets = utils.get_test_targets(test_suite_path)
-  test_paths = [_target_to_testlogs_path(target) for target in targets]
-  test_names = [
-      _target_to_test_name(target, test_suite_path) for target in targets
-  ]
-  return test_paths, test_names
+    """Get the paths Bazel stores test outputs in and the matching test names."""
+    targets = utils.get_test_targets(test_suite_path)
+    test_paths = [_target_to_testlogs_path(target) for target in targets]
+    test_names = [_target_to_test_name(target, test_suite_path) for target in targets]
+    return test_paths, test_names
 
 
-def check_collision(filename: str, test_name: str, written_paths: Set[str],
-                    paths_to_tests: Dict[str, str]):
-  """Check that we aren't overwriting files unless we expect to."""
-  # Note: We can't use a check that the files have identical contents because
-  # tf_input.mlir can have random numbers appended to its function names.
-  # See https://github.com/openxla/iree/issues/3375
+def check_collision(
+    filename: str,
+    test_name: str,
+    written_paths: Set[str],
+    paths_to_tests: Dict[str, str],
+):
+    """Check that we aren't overwriting files unless we expect to."""
+    # Note: We can't use a check that the files have identical contents because
+    # tf_input.mlir can have random numbers appended to its function names.
+    # See https://github.com/openxla/iree/issues/3375
 
-  expected_collision = any([name in filename for name in EXPECTED_COLLISIONS])
-  if filename in written_paths and not expected_collision:
-    raise ValueError(f'Collision found on {filename} between {test_name}.py '
-                     f'and {paths_to_tests[filename]}.py')
-  else:
-    written_paths.add(filename)
-    paths_to_tests[filename] = test_name
+    expected_collision = any([name in filename for name in EXPECTED_COLLISIONS])
+    if filename in written_paths and not expected_collision:
+        raise ValueError(
+            f"Collision found on {filename} between {test_name}.py "
+            f"and {paths_to_tests[filename]}.py"
+        )
+    else:
+        written_paths.add(filename)
+        paths_to_tests[filename] = test_name
 
 
 def update_path(archive_path: str):
-  """Update the --module flag with the new location of the compiled.vmfb"""
-  backend_path = archive_path.split('traces')[0]  # 'ModuleName/backend_name'.
-  compiled_path = os.path.join(FLAGS.artifacts_dir, backend_path,
-                               'compiled.vmfb')
-  flagfile_path = os.path.join(FLAGS.artifacts_dir, archive_path)
-  for line in fileinput.input(files=[flagfile_path], inplace=True):
-    if line.strip().startswith('--module'):
-      print(f'--module={compiled_path}\n', end='')
-    else:
-      print(line, end='')
+    """Update the --module flag with the new location of the compiled.vmfb"""
+    backend_path = archive_path.split("traces")[0]  # 'ModuleName/backend_name'.
+    compiled_path = os.path.join(FLAGS.artifacts_dir, backend_path, "compiled.vmfb")
+    flagfile_path = os.path.join(FLAGS.artifacts_dir, archive_path)
+    for line in fileinput.input(files=[flagfile_path], inplace=True):
+        if line.strip().startswith("--module"):
+            print(f"--module={compiled_path}\n", end="")
+        else:
+            print(line, end="")
 
 
-def extract_artifacts(test_path: str, test_name: str, written_paths: Set[str],
-                      paths_to_tests: Dict[str, str]):
-  """Unzips all of the benchmarking artifacts for a given test and backend."""
-  outputs = os.path.join(test_path, 'test.outputs', 'outputs.zip')
-  if FLAGS.dry_run and not os.path.exists(outputs):
-    # The artifacts may or may not be present on disk during a dry run. If they
-    # are then we want to collision check them, but if they aren't that's fine.
-    return
+def extract_artifacts(
+    test_path: str,
+    test_name: str,
+    written_paths: Set[str],
+    paths_to_tests: Dict[str, str],
+):
+    """Unzips all of the benchmarking artifacts for a given test and backend."""
+    outputs = os.path.join(test_path, "test.outputs", "outputs.zip")
+    if FLAGS.dry_run and not os.path.exists(outputs):
+        # The artifacts may or may not be present on disk during a dry run. If they
+        # are then we want to collision check them, but if they aren't that's fine.
+        return
 
-  archive = zipfile.ZipFile(outputs)
-  # Filter out directory names.
-  filenames = [name for name in archive.namelist() if name[-1] != os.sep]
+    archive = zipfile.ZipFile(outputs)
+    # Filter out directory names.
+    filenames = [name for name in archive.namelist() if name[-1] != os.sep]
 
-  for filename in filenames:
-    # Check for collisions.
-    check_collision(filename, test_name, written_paths, paths_to_tests)
+    for filename in filenames:
+        # Check for collisions.
+        check_collision(filename, test_name, written_paths, paths_to_tests)
 
-    # Extract and update flagfile path.
-    if not FLAGS.dry_run:
-      archive.extract(filename, FLAGS.artifacts_dir)
-      if filename.endswith('flagfile'):
-        update_path(filename)
+        # Extract and update flagfile path.
+        if not FLAGS.dry_run:
+            archive.extract(filename, FLAGS.artifacts_dir)
+            if filename.endswith("flagfile"):
+                update_path(filename)
 
 
 def main(argv):
-  del argv  # Unused.
+    del argv  # Unused.
 
-  print(
-      "The bazel integrations build and tests are deprecated. This script "
-      "may be reworked in the future. For the time being refer to "
-      "https://github.com/openxla/iree/blob/main/docs/developers/developing_iree/e2e_benchmarking.md "
-      "for information on how to run TensorFlow benchmarks.")
-  exit(1)
+    print(
+        "The bazel integrations build and tests are deprecated. This script "
+        "may be reworked in the future. For the time being refer to "
+        "https://github.com/openxla/iree/blob/main/docs/developers/developing_iree/e2e_benchmarking.md "
+        "for information on how to run TensorFlow benchmarks."
+    )
+    exit(1)
 
-  # Convert test suite shorthands to full test suite targets.
-  test_suites = [SUITE_NAME_TO_TARGET[suite] for suite in FLAGS.test_suites]
+    # Convert test suite shorthands to full test suite targets.
+    test_suites = [SUITE_NAME_TO_TARGET[suite] for suite in FLAGS.test_suites]
 
-  if FLAGS.run_test_suites:
-    # Use bazel test to execute all of the test suites in parallel.
-    command = ['bazel', 'test', *test_suites, '--color=yes']
-    print(f'Running: `{" ".join(command)}`')
-    if not FLAGS.dry_run:
-      subprocess.run(command, check=True)
-    print()
+    if FLAGS.run_test_suites:
+        # Use bazel test to execute all of the test suites in parallel.
+        command = ["bazel", "test", *test_suites, "--color=yes"]
+        print(f'Running: `{" ".join(command)}`')
+        if not FLAGS.dry_run:
+            subprocess.run(command, check=True)
+        print()
 
-  written_paths = set()
-  paths_to_tests = dict()
+    written_paths = set()
+    paths_to_tests = dict()
 
-  for test_suite in test_suites:
-    # Extract all of the artifacts for this test suite.
-    test_paths, test_names = get_test_paths_and_names(test_suite)
-    for i, (test_path, test_name) in enumerate(zip(test_paths, test_names)):
-      print(f'\rTransfering {test_suite} {i + 1}/{len(test_paths)}', end='')
-      extract_artifacts(test_path, test_name, written_paths, paths_to_tests)
-    print('\n')
+    for test_suite in test_suites:
+        # Extract all of the artifacts for this test suite.
+        test_paths, test_names = get_test_paths_and_names(test_suite)
+        for i, (test_path, test_name) in enumerate(zip(test_paths, test_names)):
+            print(f"\rTransfering {test_suite} {i + 1}/{len(test_paths)}", end="")
+            extract_artifacts(test_path, test_name, written_paths, paths_to_tests)
+        print("\n")
 
 
-if __name__ == '__main__':
-  app.run(main)
+if __name__ == "__main__":
+    app.run(main)
diff --git a/build_tools/scripts/git/check_submodule_init.py b/build_tools/scripts/git/check_submodule_init.py
index 611c32f..b878ef3 100644
--- a/build_tools/scripts/git/check_submodule_init.py
+++ b/build_tools/scripts/git/check_submodule_init.py
@@ -12,37 +12,47 @@
 
 
 def run():
-  parser = argparse.ArgumentParser()
-  parser.add_argument(
-      "--runtime_only",
-      help=("Only check the initialization of the submodules for the"
-            "runtime-dependent submodules. Default: False"),
-      action="store_true",
-      default=False)
-  args = parser.parse_args()
-  # No-op if we're not in a git repository.
-  try:
-    subprocess.check_call(['git', 'rev-parse', '--is-inside-work-tree'],
-                          stdout=subprocess.DEVNULL,
-                          stderr=subprocess.DEVNULL)
-  except:
-    return
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--runtime_only",
+        help=(
+            "Only check the initialization of the submodules for the"
+            "runtime-dependent submodules. Default: False"
+        ),
+        action="store_true",
+        default=False,
+    )
+    args = parser.parse_args()
+    # No-op if we're not in a git repository.
+    try:
+        subprocess.check_call(
+            ["git", "rev-parse", "--is-inside-work-tree"],
+            stdout=subprocess.DEVNULL,
+            stderr=subprocess.DEVNULL,
+        )
+    except:
+        return
 
-  output = os.popen("git submodule status")
-  submodules = output.readlines()
+    output = os.popen("git submodule status")
+    submodules = output.readlines()
 
-  runtime_submodules = pathlib.Path(__file__).with_name(
-      "runtime_submodules.txt").read_text().split("\n")
+    runtime_submodules = (
+        pathlib.Path(__file__)
+        .with_name("runtime_submodules.txt")
+        .read_text()
+        .split("\n")
+    )
 
-  for submodule in submodules:
-    prefix = submodule.strip()[0]
-    name = submodule.split()[1]
-    if prefix == "-" and (not args.runtime_only or name in runtime_submodules):
-      print(
-          "The git submodule '%s' is not initialized. Please run `git submodule update --init`"
-          % (name))
-      sys.exit(1)
+    for submodule in submodules:
+        prefix = submodule.strip()[0]
+        name = submodule.split()[1]
+        if prefix == "-" and (not args.runtime_only or name in runtime_submodules):
+            print(
+                "The git submodule '%s' is not initialized. Please run `git submodule update --init`"
+                % (name)
+            )
+            sys.exit(1)
 
 
 if __name__ == "__main__":
-  run()
+    run()
diff --git a/build_tools/scripts/integrate/bump_llvm.py b/build_tools/scripts/integrate/bump_llvm.py
index 7dd770b..459da10 100755
--- a/build_tools/scripts/integrate/bump_llvm.py
+++ b/build_tools/scripts/integrate/bump_llvm.py
@@ -44,92 +44,103 @@
 
 
 def main(args):
-  if not args.disable_setup_remote:
-    iree_utils.git_setup_remote(args.upstream_remote, args.upstream_repository)
+    if not args.disable_setup_remote:
+        iree_utils.git_setup_remote(args.upstream_remote, args.upstream_repository)
 
-  iree_utils.git_check_porcelain()
-  print(f"Fetching remote repository: {args.upstream_remote}")
-  iree_utils.git_fetch(repository=args.upstream_remote)
+    iree_utils.git_check_porcelain()
+    print(f"Fetching remote repository: {args.upstream_remote}")
+    iree_utils.git_fetch(repository=args.upstream_remote)
 
-  # If re-using a branch, make sure we are not on that branch.
-  if args.reuse_branch:
-    iree_utils.git_checkout("main")
+    # If re-using a branch, make sure we are not on that branch.
+    if args.reuse_branch:
+        iree_utils.git_checkout("main")
 
-  # Create branch.
-  branch_name = args.branch_name
-  if not branch_name:
-    branch_name = f"bump-llvm-{date.today().strftime('%Y%m%d')}"
-  print(f"Creating branch {branch_name} (override with --branch-name=)")
-  iree_utils.git_create_branch(branch_name,
-                               checkout=True,
-                               ref=f"{args.upstream_remote}/main",
-                               force=args.reuse_branch)
+    # Create branch.
+    branch_name = args.branch_name
+    if not branch_name:
+        branch_name = f"bump-llvm-{date.today().strftime('%Y%m%d')}"
+    print(f"Creating branch {branch_name} (override with --branch-name=)")
+    iree_utils.git_create_branch(
+        branch_name,
+        checkout=True,
+        ref=f"{args.upstream_remote}/main",
+        force=args.reuse_branch,
+    )
 
-  # Reset the llvm-project submodule to track upstream.
-  # This will discard any cherrypicks that may have been committed locally,
-  # but the assumption is that if doing a main llvm version bump, the
-  # cherrypicks will be incorporated at the new commit. If not, well, ymmv
-  # and you will find out.
-  iree_utils.git_submodule_set_origin(
-      "third_party/llvm-project",
-      url="https://github.com/iree-org/iree-llvm-fork.git",
-      branch="--default")
+    # Reset the llvm-project submodule to track upstream.
+    # This will discard any cherrypicks that may have been committed locally,
+    # but the assumption is that if doing a main llvm version bump, the
+    # cherrypicks will be incorporated at the new commit. If not, well, ymmv
+    # and you will find out.
+    iree_utils.git_submodule_set_origin(
+        "third_party/llvm-project",
+        url="https://github.com/iree-org/iree-llvm-fork.git",
+        branch="--default",
+    )
 
-  # Remove the branch pin file, reverting us to pure upstream.
-  branch_pin_file = os.path.join(
-      iree_utils.get_repo_root(),
-      iree_modules.MODULE_INFOS["llvm-project"].branch_pin_file)
-  if os.path.exists(branch_pin_file):
-    os.remove(branch_pin_file)
+    # Remove the branch pin file, reverting us to pure upstream.
+    branch_pin_file = os.path.join(
+        iree_utils.get_repo_root(),
+        iree_modules.MODULE_INFOS["llvm-project"].branch_pin_file,
+    )
+    if os.path.exists(branch_pin_file):
+        os.remove(branch_pin_file)
 
-  # Update the LLVM submodule.
-  llvm_commit = args.llvm_commit
-  print(f"Updating LLVM submodule to {llvm_commit}")
-  llvm_root = iree_utils.get_submodule_root("llvm-project")
-  iree_utils.git_fetch(repository="origin",
-                       ref="refs/heads/main",
-                       repo_dir=llvm_root)
-  if llvm_commit == "HEAD":
-    llvm_commit = "origin/main"
-  iree_utils.git_reset(llvm_commit, repo_dir=llvm_root)
-  llvm_commit, llvm_summary = iree_utils.git_current_commit(repo_dir=llvm_root)
-  print(f"LLVM submodule reset to:\n  {llvm_summary}\n")
+    # Update the LLVM submodule.
+    llvm_commit = args.llvm_commit
+    print(f"Updating LLVM submodule to {llvm_commit}")
+    llvm_root = iree_utils.get_submodule_root("llvm-project")
+    iree_utils.git_fetch(repository="origin", ref="refs/heads/main", repo_dir=llvm_root)
+    if llvm_commit == "HEAD":
+        llvm_commit = "origin/main"
+    iree_utils.git_reset(llvm_commit, repo_dir=llvm_root)
+    llvm_commit, llvm_summary = iree_utils.git_current_commit(repo_dir=llvm_root)
+    print(f"LLVM submodule reset to:\n  {llvm_summary}\n")
 
-  # Create a commit.
-  print("Create commit...")
-  iree_utils.git_create_commit(
-      message=(f"Integrate llvm-project at {llvm_commit}\n\n"
-               f"* Reset third_party/llvm-project: {llvm_summary}"),
-      add_all=True)
+    # Create a commit.
+    print("Create commit...")
+    iree_utils.git_create_commit(
+        message=(
+            f"Integrate llvm-project at {llvm_commit}\n\n"
+            f"* Reset third_party/llvm-project: {llvm_summary}"
+        ),
+        add_all=True,
+    )
 
-  # Push.
-  print("Pushing...")
-  iree_utils.git_push_branch(args.upstream_remote, branch_name)
+    # Push.
+    print("Pushing...")
+    iree_utils.git_push_branch(args.upstream_remote, branch_name)
 
 
 def parse_arguments(argv):
-  parser = argparse.ArgumentParser(description="IREE LLVM-bump-inator")
-  parser.add_argument("--upstream-remote",
-                      help="Upstream remote",
-                      default="UPSTREAM_AUTOMATION")
-  parser.add_argument("--upstream-repository",
-                      help="Upstream repository URL",
-                      default="git@github.com:openxla/iree.git")
-  parser.add_argument("--disable-setup-remote",
-                      help="Disable remote setup",
-                      action="store_true",
-                      default=False)
-  parser.add_argument("--llvm-commit", help="LLVM commit sha", default="HEAD")
-  parser.add_argument("--branch-name",
-                      help="Integrate branch to create",
-                      default=None)
-  parser.add_argument("--reuse-branch",
-                      help="Allow re-use of an existing branch",
-                      action="store_true",
-                      default=False)
-  args = parser.parse_args(argv)
-  return args
+    parser = argparse.ArgumentParser(description="IREE LLVM-bump-inator")
+    parser.add_argument(
+        "--upstream-remote", help="Upstream remote", default="UPSTREAM_AUTOMATION"
+    )
+    parser.add_argument(
+        "--upstream-repository",
+        help="Upstream repository URL",
+        default="git@github.com:openxla/iree.git",
+    )
+    parser.add_argument(
+        "--disable-setup-remote",
+        help="Disable remote setup",
+        action="store_true",
+        default=False,
+    )
+    parser.add_argument("--llvm-commit", help="LLVM commit sha", default="HEAD")
+    parser.add_argument(
+        "--branch-name", help="Integrate branch to create", default=None
+    )
+    parser.add_argument(
+        "--reuse-branch",
+        help="Allow re-use of an existing branch",
+        action="store_true",
+        default=False,
+    )
+    args = parser.parse_args(argv)
+    return args
 
 
 if __name__ == "__main__":
-  main(parse_arguments(sys.argv[1:]))
+    main(parse_arguments(sys.argv[1:]))
diff --git a/build_tools/scripts/integrate/iree_modules.py b/build_tools/scripts/integrate/iree_modules.py
index fec8ff3..5587333 100644
--- a/build_tools/scripts/integrate/iree_modules.py
+++ b/build_tools/scripts/integrate/iree_modules.py
@@ -6,40 +6,43 @@
 
 
 class ModuleInfo:
-
-  def __init__(self, *, name: str, path: str, branch_pin_file: str,
-               default_repository_url: str, fork_repository_push: str,
-               fork_repository_pull: str, branch_prefix: str):
-    self.name = name
-    self.path = path
-    self.branch_pin_file = branch_pin_file
-    self.default_repository_url = default_repository_url
-    self.fork_repository_push = fork_repository_push
-    self.fork_repository_pull = fork_repository_pull
-    self.branch_prefix = branch_prefix
+    def __init__(
+        self,
+        *,
+        name: str,
+        path: str,
+        branch_pin_file: str,
+        default_repository_url: str,
+        fork_repository_push: str,
+        fork_repository_pull: str,
+        branch_prefix: str
+    ):
+        self.name = name
+        self.path = path
+        self.branch_pin_file = branch_pin_file
+        self.default_repository_url = default_repository_url
+        self.fork_repository_push = fork_repository_push
+        self.fork_repository_pull = fork_repository_pull
+        self.branch_prefix = branch_prefix
 
 
 MODULE_INFOS = {
-    "llvm-project":
-        ModuleInfo(
-            name="llvm-project",
-            path="third_party/llvm-project",
-            branch_pin_file="third_party/llvm-project.branch-pin",
-            default_repository_url=
-            "https://github.com/iree-org/iree-llvm-fork.git",
-            fork_repository_push="git@github.com:iree-org/iree-llvm-fork.git",
-            fork_repository_pull=
-            "https://github.com/iree-org/iree-llvm-fork.git",
-            branch_prefix="patched-llvm-project-",
-        ),
-    "stablehlo":
-        ModuleInfo(
-            name="stablehlo",
-            path="third_party/stablehlo",
-            branch_pin_file="third_party/stablehlo.branch-pin",
-            default_repository_url="https://github.com/iree-org/stablehlo.git",
-            fork_repository_push="git@github.com:iree-org/stablehlo.git",
-            fork_repository_pull="https://github.com/iree-org/stablehlo.git",
-            branch_prefix="patched-stablehlo-",
-        )
+    "llvm-project": ModuleInfo(
+        name="llvm-project",
+        path="third_party/llvm-project",
+        branch_pin_file="third_party/llvm-project.branch-pin",
+        default_repository_url="https://github.com/iree-org/iree-llvm-fork.git",
+        fork_repository_push="git@github.com:iree-org/iree-llvm-fork.git",
+        fork_repository_pull="https://github.com/iree-org/iree-llvm-fork.git",
+        branch_prefix="patched-llvm-project-",
+    ),
+    "stablehlo": ModuleInfo(
+        name="stablehlo",
+        path="third_party/stablehlo",
+        branch_pin_file="third_party/stablehlo.branch-pin",
+        default_repository_url="https://github.com/iree-org/stablehlo.git",
+        fork_repository_push="git@github.com:iree-org/stablehlo.git",
+        fork_repository_pull="https://github.com/iree-org/stablehlo.git",
+        branch_prefix="patched-stablehlo-",
+    ),
 }
diff --git a/build_tools/scripts/integrate/iree_utils.py b/build_tools/scripts/integrate/iree_utils.py
index 3a81ba8..21e5454 100644
--- a/build_tools/scripts/integrate/iree_utils.py
+++ b/build_tools/scripts/integrate/iree_utils.py
@@ -15,186 +15,200 @@
 
 
 def get_repo_root() -> str:
-  global _repo_root
-  if _repo_root is None:
-    _repo_root = os.getcwd()
-    _validate_repo_root()
-  return _repo_root
+    global _repo_root
+    if _repo_root is None:
+        _repo_root = os.getcwd()
+        _validate_repo_root()
+    return _repo_root
 
 
 def get_submodule_root(submodule) -> str:
-  path = os.path.join(get_repo_root(), "third_party", submodule)
-  if not os.path.isdir(path):
-    raise SystemExit(f"Could not find submodule: {path}")
-  return path
+    path = os.path.join(get_repo_root(), "third_party", submodule)
+    if not os.path.isdir(path):
+        raise SystemExit(f"Could not find submodule: {path}")
+    return path
 
 
 def _validate_repo_root():
-  # Look for something we know is there.
-  known_dir = os.path.join(_repo_root, "compiler")
-  if not os.path.isdir(known_dir):
-    raise SystemExit(f"ERROR: Must run from the iree repository root. "
-                     f"Actually in: {_repo_root}")
+    # Look for something we know is there.
+    known_dir = os.path.join(_repo_root, "compiler")
+    if not os.path.isdir(known_dir):
+        raise SystemExit(
+            f"ERROR: Must run from the iree repository root. "
+            f"Actually in: {_repo_root}"
+        )
 
 
 def git_setup_remote(remote_alias, url, *, repo_dir=None):
-  needs_create = False
-  try:
-    existing_url = git_exec(["remote", "get-url", remote_alias],
-                            capture_output=True,
-                            repo_dir=repo_dir,
-                            quiet=True)
-    existing_url = existing_url.strip()
-    if existing_url == url:
-      return
-  except subprocess.CalledProcessError:
-    # Does not exist.
-    needs_create = True
+    needs_create = False
+    try:
+        existing_url = git_exec(
+            ["remote", "get-url", remote_alias],
+            capture_output=True,
+            repo_dir=repo_dir,
+            quiet=True,
+        )
+        existing_url = existing_url.strip()
+        if existing_url == url:
+            return
+    except subprocess.CalledProcessError:
+        # Does not exist.
+        needs_create = True
 
-  if needs_create:
-    git_exec(["remote", "add", "--no-tags", remote_alias, url],
-             repo_dir=repo_dir)
-  else:
-    git_exec(["remote", "set-url", remote_alias, url], repo_dir=repo_dir)
+    if needs_create:
+        git_exec(["remote", "add", "--no-tags", remote_alias, url], repo_dir=repo_dir)
+    else:
+        git_exec(["remote", "set-url", remote_alias, url], repo_dir=repo_dir)
 
 
 def git_is_porcelain(*, repo_dir=None):
-  output = git_exec(["status", "--porcelain", "--untracked-files=no"],
-                    capture_output=True,
-                    quiet=True,
-                    repo_dir=repo_dir).strip()
-  return not bool(output)
+    output = git_exec(
+        ["status", "--porcelain", "--untracked-files=no"],
+        capture_output=True,
+        quiet=True,
+        repo_dir=repo_dir,
+    ).strip()
+    return not bool(output)
 
 
 def git_check_porcelain(*, repo_dir=None):
-  output = git_exec(["status", "--porcelain", "--untracked-files=no"],
-                    capture_output=True,
-                    quiet=True,
-                    repo_dir=repo_dir).strip()
-  if output:
-    actual_repo_dir = get_repo_root() if repo_dir is None else repo_dir
-    raise SystemExit(f"ERROR: git directory {actual_repo_dir} is not clean. "
-                     f"Please stash changes:\n{output}")
+    output = git_exec(
+        ["status", "--porcelain", "--untracked-files=no"],
+        capture_output=True,
+        quiet=True,
+        repo_dir=repo_dir,
+    ).strip()
+    if output:
+        actual_repo_dir = get_repo_root() if repo_dir is None else repo_dir
+        raise SystemExit(
+            f"ERROR: git directory {actual_repo_dir} is not clean. "
+            f"Please stash changes:\n{output}"
+        )
 
 
 def git_fetch(*, repository=None, ref=None, repo_dir=None):
-  args = ["fetch"]
-  if repository:
-    args.append(repository)
-  if ref is not None:
-    args.append(ref)
-  git_exec(args, repo_dir=repo_dir)
+    args = ["fetch"]
+    if repository:
+        args.append(repository)
+    if ref is not None:
+        args.append(ref)
+    git_exec(args, repo_dir=repo_dir)
 
 
 def git_checkout(ref, *, repo_dir=None):
-  git_exec(["checkout", ref], repo_dir=repo_dir)
+    git_exec(["checkout", ref], repo_dir=repo_dir)
 
 
-def git_create_branch(branch_name,
-                      *,
-                      checkout=True,
-                      ref=None,
-                      force=False,
-                      repo_dir=None):
-  branch_args = ["branch"]
-  if force:
-    branch_args.append("-f")
-  branch_args.append(branch_name)
-  if ref is not None:
-    branch_args.append(ref)
-  git_exec(branch_args, repo_dir=repo_dir)
+def git_create_branch(
+    branch_name, *, checkout=True, ref=None, force=False, repo_dir=None
+):
+    branch_args = ["branch"]
+    if force:
+        branch_args.append("-f")
+    branch_args.append(branch_name)
+    if ref is not None:
+        branch_args.append(ref)
+    git_exec(branch_args, repo_dir=repo_dir)
 
-  if checkout:
-    git_exec(["checkout", branch_name], repo_dir=repo_dir)
+    if checkout:
+        git_exec(["checkout", branch_name], repo_dir=repo_dir)
 
 
 def git_push_branch(repository, branch_name, *, force=False, repo_dir=None):
-  push_args = ["push", "--set-upstream"]
-  if force:
-    push_args.append("-f")
-  push_args.append(repository)
-  push_args.append(f"{branch_name}:{branch_name}")
-  git_exec(push_args, repo_dir=repo_dir)
+    push_args = ["push", "--set-upstream"]
+    if force:
+        push_args.append("-f")
+    push_args.append(repository)
+    push_args.append(f"{branch_name}:{branch_name}")
+    git_exec(push_args, repo_dir=repo_dir)
 
 
 def git_branch_exists(branch_name, *, repo_dir=None):
-  output = git_exec(["branch", "-l", branch_name],
-                    repo_dir=repo_dir,
-                    quiet=True,
-                    capture_output=True).strip()
-  return bool(output)
+    output = git_exec(
+        ["branch", "-l", branch_name],
+        repo_dir=repo_dir,
+        quiet=True,
+        capture_output=True,
+    ).strip()
+    return bool(output)
 
 
 def git_submodule_set_origin(path, *, url=None, branch=None, repo_dir=None):
-  if url is not None:
-    git_exec(["submodule", "set-url", "--", path, url], repo_dir=repo_dir)
+    if url is not None:
+        git_exec(["submodule", "set-url", "--", path, url], repo_dir=repo_dir)
 
-  if branch is not None:
-    try:
-      if branch == "--default":
-        git_exec(["submodule", "set-branch", "--default", "--", path],
-                 repo_dir=repo_dir)
-      else:
-        git_exec(["submodule", "set-branch", "--branch", branch, "--", path],
-                 repo_dir=repo_dir)
-    except subprocess.CalledProcessError:
-      # The set-branch command returns 0 on change and !0 on no change.
-      # This is a bit unfortunate.
-      ...
+    if branch is not None:
+        try:
+            if branch == "--default":
+                git_exec(
+                    ["submodule", "set-branch", "--default", "--", path],
+                    repo_dir=repo_dir,
+                )
+            else:
+                git_exec(
+                    ["submodule", "set-branch", "--branch", branch, "--", path],
+                    repo_dir=repo_dir,
+                )
+        except subprocess.CalledProcessError:
+            # The set-branch command returns 0 on change and !0 on no change.
+            # This is a bit unfortunate.
+            ...
 
 
 def git_reset(ref, *, hard=True, repo_dir=None):
-  args = ["reset"]
-  if hard:
-    args.append("--hard")
-  args.append(ref)
-  git_exec(args, repo_dir=repo_dir)
+    args = ["reset"]
+    if hard:
+        args.append("--hard")
+    args.append(ref)
+    git_exec(args, repo_dir=repo_dir)
 
 
 def git_current_commit(*, repo_dir=None) -> Tuple[str, str]:
-  output = git_exec(["log", "-n", "1", "--pretty=format:%H (%ci): %s"],
-                    capture_output=True,
-                    repo_dir=repo_dir,
-                    quiet=True)
-  output = output.strip()
-  parts = output.split(" ")
-  # Return commit, full_summary
-  return parts[0], output
+    output = git_exec(
+        ["log", "-n", "1", "--pretty=format:%H (%ci): %s"],
+        capture_output=True,
+        repo_dir=repo_dir,
+        quiet=True,
+    )
+    output = output.strip()
+    parts = output.split(" ")
+    # Return commit, full_summary
+    return parts[0], output
 
 
 def git_create_commit(*, message, add_all=False, repo_dir=None):
-  if add_all:
-    git_exec(["add", "-A"], repo_dir=repo_dir)
-  git_exec(["commit", "-m", message])
+    if add_all:
+        git_exec(["add", "-A"], repo_dir=repo_dir)
+    git_exec(["commit", "-m", message])
 
 
 def git_ls_remote_branches(repository_url, *, filter=None, repo_dir=None):
-  args = ["ls-remote", "-h", repository_url]
-  if filter:
-    args.extend(filter)
-  output = git_exec(args, quiet=True, capture_output=True)
-  lines = output.strip().splitlines(keepends=False)
+    args = ["ls-remote", "-h", repository_url]
+    if filter:
+        args.extend(filter)
+    output = git_exec(args, quiet=True, capture_output=True)
+    lines = output.strip().splitlines(keepends=False)
 
-  # Format is <commit> refs/heads/branch_name
-  def extract_branch(line):
-    parts = re.split("\\s+", line)
-    ref = parts[1]
-    prefix = "refs/heads/"
-    if ref.startswith(prefix):
-      ref = ref[len(prefix):]
-    return ref
+    # Format is <commit> refs/heads/branch_name
+    def extract_branch(line):
+        parts = re.split("\\s+", line)
+        ref = parts[1]
+        prefix = "refs/heads/"
+        if ref.startswith(prefix):
+            ref = ref[len(prefix) :]
+        return ref
 
-  return [extract_branch(l) for l in lines]
+    return [extract_branch(l) for l in lines]
 
 
 def git_exec(args, *, repo_dir=None, quiet=False, capture_output=False):
-  full_args = ["git"] + args
-  full_args_quoted = [shlex.quote(a) for a in full_args]
-  if not repo_dir:
-    repo_dir = get_repo_root()
-  if not quiet:
-    print(f"  ++ EXEC: (cd {repo_dir} && {' '.join(full_args_quoted)})")
-  if capture_output:
-    return subprocess.check_output(full_args, cwd=repo_dir).decode("utf-8")
-  else:
-    subprocess.check_call(full_args, cwd=repo_dir)
+    full_args = ["git"] + args
+    full_args_quoted = [shlex.quote(a) for a in full_args]
+    if not repo_dir:
+        repo_dir = get_repo_root()
+    if not quiet:
+        print(f"  ++ EXEC: (cd {repo_dir} && {' '.join(full_args_quoted)})")
+    if capture_output:
+        return subprocess.check_output(full_args, cwd=repo_dir).decode("utf-8")
+    else:
+        subprocess.check_call(full_args, cwd=repo_dir)
diff --git a/build_tools/scripts/integrate/patch_module.py b/build_tools/scripts/integrate/patch_module.py
index 2184bfe..fbe2230 100755
--- a/build_tools/scripts/integrate/patch_module.py
+++ b/build_tools/scripts/integrate/patch_module.py
@@ -32,78 +32,77 @@
 
 
 def main(args):
-  module_info = iree_modules.MODULE_INFOS.get(args.module)
-  if not module_info:
-    raise SystemExit(f"ERROR: Bad value for --module. Must be one of: "
-                     f"{', '.join(iree_modules.MODULE_INFOS.keys())}")
+    module_info = iree_modules.MODULE_INFOS.get(args.module)
+    if not module_info:
+        raise SystemExit(
+            f"ERROR: Bad value for --module. Must be one of: "
+            f"{', '.join(iree_modules.MODULE_INFOS.keys())}"
+        )
 
-  if args.command == "patch":
-    main_patch(args, module_info)
-  else:
-    raise SystemExit(
-        f"ERROR: Unrecognized --command. Must be one of: patch, unpatch")
+    if args.command == "patch":
+        main_patch(args, module_info)
+    else:
+        raise SystemExit(
+            f"ERROR: Unrecognized --command. Must be one of: patch, unpatch"
+        )
 
 
 def main_patch(args, module_info: iree_modules.ModuleInfo):
-  module_root = os.path.join(iree_utils.get_repo_root(), module_info.path)
-  setup_module_remotes(module_root, module_info)
+    module_root = os.path.join(iree_utils.get_repo_root(), module_info.path)
+    setup_module_remotes(module_root, module_info)
 
-  branch_name = find_unused_branch_name(module_info)
-  print(f"Allocated branch: {branch_name}")
-  current_commit, summary = iree_utils.git_current_commit(repo_dir=module_root)
-  print(f"Module is currently at: {summary}")
-  print(
-      f"*** Pushing branch {branch_name} to {module_info.fork_repository_push} ***"
-  )
-  print(f"(Please ignore any messages below about creating a PR)\n")
-  iree_utils.git_exec([
-      "push", PATCH_REMOTE_ALIAS, f"{current_commit}:refs/heads/{branch_name}"
-  ],
-                      repo_dir=module_root)
-  print(f"*** Branch {branch_name} pushed ***")
+    branch_name = find_unused_branch_name(module_info)
+    print(f"Allocated branch: {branch_name}")
+    current_commit, summary = iree_utils.git_current_commit(repo_dir=module_root)
+    print(f"Module is currently at: {summary}")
+    print(f"*** Pushing branch {branch_name} to {module_info.fork_repository_push} ***")
+    print(f"(Please ignore any messages below about creating a PR)\n")
+    iree_utils.git_exec(
+        ["push", PATCH_REMOTE_ALIAS, f"{current_commit}:refs/heads/{branch_name}"],
+        repo_dir=module_root,
+    )
+    print(f"*** Branch {branch_name} pushed ***")
 
-  print(f"******* Congratulations *******")
-  print(
-      f"You have pushed your commits to {branch_name} on {module_info.fork_repository_push}."
-  )
-  print(
-      f"Your main repository should now show that the submodule has been edited."
-  )
-  print(f"Make a commit, referencing the above branch cherry-picks and ")
-  print(f"land the resulting PR.")
-  print(f"You can push more commits to this module's patch branch via:")
-  print(
-      f"  (cd {module_info.path} && git push {PATCH_REMOTE_ALIAS} HEAD:{branch_name})"
-  )
+    print(f"******* Congratulations *******")
+    print(
+        f"You have pushed your commits to {branch_name} on {module_info.fork_repository_push}."
+    )
+    print(f"Your main repository should now show that the submodule has been edited.")
+    print(f"Make a commit, referencing the above branch cherry-picks and ")
+    print(f"land the resulting PR.")
+    print(f"You can push more commits to this module's patch branch via:")
+    print(
+        f"  (cd {module_info.path} && git push {PATCH_REMOTE_ALIAS} HEAD:{branch_name})"
+    )
 
 
-def setup_module_remotes(module_root: str,
-                         module_info: iree_modules.ModuleInfo):
-  iree_utils.git_setup_remote(PATCH_REMOTE_ALIAS,
-                              url=module_info.fork_repository_push,
-                              repo_dir=module_root)
+def setup_module_remotes(module_root: str, module_info: iree_modules.ModuleInfo):
+    iree_utils.git_setup_remote(
+        PATCH_REMOTE_ALIAS, url=module_info.fork_repository_push, repo_dir=module_root
+    )
 
 
 def find_unused_branch_name(module_info: iree_modules.ModuleInfo):
-  branch_base = f"{module_info.branch_prefix}{date.today().strftime('%Y%m%d')}"
-  branch_name = branch_base
-  existing_branches = iree_utils.git_ls_remote_branches(
-      module_info.fork_repository_pull,
-      filter=[f"refs/heads/{module_info.branch_prefix}*"])
-  i = 1
-  while branch_name in existing_branches:
-    branch_name = f"{branch_base}.{i}"
-    i += 1
-  return branch_name
+    branch_base = f"{module_info.branch_prefix}{date.today().strftime('%Y%m%d')}"
+    branch_name = branch_base
+    existing_branches = iree_utils.git_ls_remote_branches(
+        module_info.fork_repository_pull,
+        filter=[f"refs/heads/{module_info.branch_prefix}*"],
+    )
+    i = 1
+    while branch_name in existing_branches:
+        branch_name = f"{branch_base}.{i}"
+        i += 1
+    return branch_name
 
 
 def parse_arguments(argv):
-  parser = argparse.ArgumentParser(description="IREE Submodule Patcher")
-  parser.add_argument("--module", help="Submodule to operate on", default=None)
-  parser.add_argument("--command", help="Command to execute", default="patch")
-  args = parser.parse_args(argv)
-  return args
+    parser = argparse.ArgumentParser(description="IREE Submodule Patcher")
+    parser.add_argument("--module", help="Submodule to operate on", default=None)
+    parser.add_argument("--command", help="Command to execute", default="patch")
+    args = parser.parse_args(argv)
+    return args
 
 
 if __name__ == "__main__":
-  main(parse_arguments(sys.argv[1:]))
+    main(parse_arguments(sys.argv[1:]))
diff --git a/build_tools/scripts/ir_to_markdown.py b/build_tools/scripts/ir_to_markdown.py
index 2642f42..476dff3 100644
--- a/build_tools/scripts/ir_to_markdown.py
+++ b/build_tools/scripts/ir_to_markdown.py
@@ -34,71 +34,74 @@
 
 
 def parse_arguments():
-  """Parses command line arguments."""
+    """Parses command line arguments."""
 
-  parser = argparse.ArgumentParser()
-  parser.add_argument(
-      'input_file_path',
-      type=str,
-      nargs='?',
-      metavar="<input_file_path>",
-      help='Input IR dump (.mlir from -mlir-print-ir-after-all)')
-  parser.add_argument('-o,',
-                      '--output',
-                      type=str,
-                      required=True,
-                      metavar="<output>",
-                      help='Output file path (e.g. translation_ir.md)')
-  # TODO(scotttodd): flags for original IR path and compilation command line
-  #                  .md could then show original IR + flags -> output
-  # TODO(scotttodd): flag for markdown flavor (mkdocs, github, etc.)
-  # TODO(scotttodd): flag for diff view (correlate IR before and IR after)?
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "input_file_path",
+        type=str,
+        nargs="?",
+        metavar="<input_file_path>",
+        help="Input IR dump (.mlir from -mlir-print-ir-after-all)",
+    )
+    parser.add_argument(
+        "-o,",
+        "--output",
+        type=str,
+        required=True,
+        metavar="<output>",
+        help="Output file path (e.g. translation_ir.md)",
+    )
+    # TODO(scotttodd): flags for original IR path and compilation command line
+    #                  .md could then show original IR + flags -> output
+    # TODO(scotttodd): flag for markdown flavor (mkdocs, github, etc.)
+    # TODO(scotttodd): flag for diff view (correlate IR before and IR after)?
 
-  return parser.parse_args()
+    return parser.parse_args()
 
 
 def main(args):
-  input_file_path = args.input_file_path
-  output_file_path = args.output
-  print("Converting input file '%s'" % (input_file_path))
-  print("     into output file '%s'" % (output_file_path))
+    input_file_path = args.input_file_path
+    output_file_path = args.output
+    print("Converting input file '%s'" % (input_file_path))
+    print("     into output file '%s'" % (output_file_path))
 
-  with open(input_file_path, "r") as input_file:
-    with open(output_file_path, "w") as output_file:
+    with open(input_file_path, "r") as input_file:
+        with open(output_file_path, "w") as output_file:
+            # Iterate line by line through the input file, collecting text into
+            # blocks and writing them into the output file with markdown formatting
+            # as we go.
+            #
+            # Note: we could parse through and find/replace within the file using
+            # regex (or sed), but iterating this way is easier to understand and
+            # uses a predictable amount of memory.
 
-      # Iterate line by line through the input file, collecting text into
-      # blocks and writing them into the output file with markdown formatting
-      # as we go.
-      #
-      # Note: we could parse through and find/replace within the file using
-      # regex (or sed), but iterating this way is easier to understand and
-      # uses a predictable amount of memory.
+            current_block_lines = []
+            dump_after_regex = re.compile(
+                MLIR_START_SEQUENCE + "\s(.*)\s" + MLIR_END_SEQUENCE
+            )
 
-      current_block_lines = []
-      dump_after_regex = re.compile(MLIR_START_SEQUENCE + "\s(.*)\s" +
-                                    MLIR_END_SEQUENCE)
+            def finish_block():
+                nonlocal current_block_lines
+                if len(current_block_lines) != 0:
+                    current_block_lines.append("```\n\n")
+                    output_file.writelines(current_block_lines)
+                    current_block_lines = []
 
-      def finish_block():
-        nonlocal current_block_lines
-        if len(current_block_lines) != 0:
-          current_block_lines.append("```\n\n")
-          output_file.writelines(current_block_lines)
-          current_block_lines = []
+            for input_line in input_file:
+                if input_line == "\n":
+                    continue
 
-      for input_line in input_file:
-        if input_line == "\n":
-          continue
+                if input_line.startswith(MLIR_START_SEQUENCE):
+                    finish_block()
+                    header_text = dump_after_regex.match(input_line).group(1)
+                    current_block_lines.append("### " + header_text + "\n\n")
+                    current_block_lines.append("```mlir\n")
+                else:
+                    current_block_lines.append(input_line)
 
-        if input_line.startswith(MLIR_START_SEQUENCE):
-          finish_block()
-          header_text = dump_after_regex.match(input_line).group(1)
-          current_block_lines.append("### " + header_text + "\n\n")
-          current_block_lines.append("```mlir\n")
-        else:
-          current_block_lines.append(input_line)
-
-      finish_block()
+            finish_block()
 
 
-if __name__ == '__main__':
-  main(parse_arguments())
+if __name__ == "__main__":
+    main(parse_arguments())
diff --git a/build_tools/scripts/local_web_server.py b/build_tools/scripts/local_web_server.py
index 835a760..a073273 100644
--- a/build_tools/scripts/local_web_server.py
+++ b/build_tools/scripts/local_web_server.py
@@ -20,47 +20,53 @@
 
 
 class CORSHTTPRequestHandler(server.SimpleHTTPRequestHandler):
+    def __init__(self, *args, **kwargs):
+        # Include MIME types for files we expect to be serving.
+        # https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types
+        self.extensions_map.update(
+            {
+                ".js": "application/javascript",
+                ".wasm": "application/wasm",
+            }
+        )
+        super().__init__(*args, **kwargs)
 
-  def __init__(self, *args, **kwargs):
-    # Include MIME types for files we expect to be serving.
-    # https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types
-    self.extensions_map.update({
-        ".js": "application/javascript",
-        ".wasm": "application/wasm",
-    })
-    super().__init__(*args, **kwargs)
+    # Inspiration for this hack: https://stackoverflow.com/a/13354482
+    def end_headers(self):
+        self.send_cors_headers()
 
-  # Inspiration for this hack: https://stackoverflow.com/a/13354482
-  def end_headers(self):
-    self.send_cors_headers()
+        server.SimpleHTTPRequestHandler.end_headers(self)
 
-    server.SimpleHTTPRequestHandler.end_headers(self)
-
-  def send_cors_headers(self):
-    # Emscripten uses SharedArrayBuffer for its multithreading, which requires
-    # Cross Origin Opener Policy and Cross Origin Embedder Policy headers:
-    #   * https://emscripten.org/docs/porting/pthreads.html
-    #   * https://developer.chrome.com/blog/enabling-shared-array-buffer/
-    self.send_header("Cross-Origin-Embedder-Policy", "require-corp")
-    self.send_header("Cross-Origin-Opener-Policy", "same-origin")
+    def send_cors_headers(self):
+        # Emscripten uses SharedArrayBuffer for its multithreading, which requires
+        # Cross Origin Opener Policy and Cross Origin Embedder Policy headers:
+        #   * https://emscripten.org/docs/porting/pthreads.html
+        #   * https://developer.chrome.com/blog/enabling-shared-array-buffer/
+        self.send_header("Cross-Origin-Embedder-Policy", "require-corp")
+        self.send_header("Cross-Origin-Opener-Policy", "same-origin")
 
 
-if __name__ == '__main__':
-  import argparse
-  parser = argparse.ArgumentParser()
-  parser.add_argument('--directory',
-                      '-d',
-                      default=os.getcwd(),
-                      help='Specify alternative directory '
-                      '[default:current directory]')
-  parser.add_argument('port',
-                      action='store',
-                      default=8000,
-                      type=int,
-                      nargs='?',
-                      help='Specify alternate port [default: 8000]')
-  args = parser.parse_args()
+if __name__ == "__main__":
+    import argparse
 
-  server.test(HandlerClass=partial(CORSHTTPRequestHandler,
-                                   directory=args.directory),
-              port=args.port)
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--directory",
+        "-d",
+        default=os.getcwd(),
+        help="Specify alternative directory " "[default:current directory]",
+    )
+    parser.add_argument(
+        "port",
+        action="store",
+        default=8000,
+        type=int,
+        nargs="?",
+        help="Specify alternate port [default: 8000]",
+    )
+    args = parser.parse_args()
+
+    server.test(
+        HandlerClass=partial(CORSHTTPRequestHandler, directory=args.directory),
+        port=args.port,
+    )
diff --git a/build_tools/scripts/update_tflite_models.py b/build_tools/scripts/update_tflite_models.py
index e2ea887..2af25fb 100644
--- a/build_tools/scripts/update_tflite_models.py
+++ b/build_tools/scripts/update_tflite_models.py
@@ -26,45 +26,44 @@
 import urllib
 
 FLAGS = flags.FLAGS
-flags.DEFINE_string('file', '', 'file to update')
+flags.DEFINE_string("file", "", "file to update")
 
-file_dict = dict({
-    "mobilenet_v1.tflite":
-        "https://tfhub.dev/tensorflow/lite-model/mobilenet_v1_1.0_160/1/default/1?lite-format=tflite",
-    "posenet_i8.tflite":
-        "https://tfhub.dev/google/lite-model/movenet/singlepose/lightning/tflite/int8/4?lite-format=tflite",
-    "posenet_i8_input.jpg":
-        "https://github.com/tensorflow/examples/raw/master/lite/examples/pose_estimation/raspberry_pi/test_data/image3.jpeg"
-})
+file_dict = dict(
+    {
+        "mobilenet_v1.tflite": "https://tfhub.dev/tensorflow/lite-model/mobilenet_v1_1.0_160/1/default/1?lite-format=tflite",
+        "posenet_i8.tflite": "https://tfhub.dev/google/lite-model/movenet/singlepose/lightning/tflite/int8/4?lite-format=tflite",
+        "posenet_i8_input.jpg": "https://github.com/tensorflow/examples/raw/master/lite/examples/pose_estimation/raspberry_pi/test_data/image3.jpeg",
+    }
+)
 
 BUCKET_NAME = "iree-model-artifacts"
 FOLDER_NAME = "tflite-integration-tests"
 
 
 def upload_model(source, destination, tmpfile):
-  """Uploads a file to the bucket."""
-  urllib.request.urlretrieve(source, tmpfile)
+    """Uploads a file to the bucket."""
+    urllib.request.urlretrieve(source, tmpfile)
 
-  storage_client = storage.Client()
-  bucket = storage_client.get_bucket(BUCKET_NAME)
-  blob = bucket.blob("/".join([FOLDER_NAME, destination]))
-  blob.upload_from_filename(tmpfile)
+    storage_client = storage.Client()
+    bucket = storage_client.get_bucket(BUCKET_NAME)
+    blob = bucket.blob("/".join([FOLDER_NAME, destination]))
+    blob.upload_from_filename(tmpfile)
 
 
 def main(argv):
-  tf = tempfile.NamedTemporaryFile()
+    tf = tempfile.NamedTemporaryFile()
 
-  items = file_dict.items()
+    items = file_dict.items()
 
-  if FLAGS.file in file_dict:
-    items = [(FLAGS.file, file_dict[FLAGS.file])]
-  elif FLAGS.file != "all":
-    print('Unknown file to upload: ', "\"" + FLAGS.file + "\"")
-    exit()
+    if FLAGS.file in file_dict:
+        items = [(FLAGS.file, file_dict[FLAGS.file])]
+    elif FLAGS.file != "all":
+        print("Unknown file to upload: ", '"' + FLAGS.file + '"')
+        exit()
 
-  for dst, src in items:
-    upload_model(src, dst, tf.name)
+    for dst, src in items:
+        upload_model(src, dst, tf.name)
 
 
-if __name__ == '__main__':
-  app.run(main)
+if __name__ == "__main__":
+    app.run(main)
diff --git a/build_tools/scripts/utils.py b/build_tools/scripts/utils.py
index 7713f5d..36b31ae 100644
--- a/build_tools/scripts/utils.py
+++ b/build_tools/scripts/utils.py
@@ -14,38 +14,46 @@
 
 
 def create_markdown_table(rows: Sequence[Sequence[str]]):
-  """Converts a 2D array to a Markdown table."""
-  return '\n'.join([' | '.join(row) for row in rows])
+    """Converts a 2D array to a Markdown table."""
+    return "\n".join([" | ".join(row) for row in rows])
 
 
-def check_and_get_output_lines(command: Sequence[str],
-                               dry_run: bool = False,
-                               log_stderr: bool = True):
-  print(f'Running: `{" ".join(command)}`')
-  if dry_run:
-    return None, None
-  return subprocess.run(command, stdout=subprocess.PIPE, text=true,
-                        check=True).stdout.splitlines()
+def check_and_get_output_lines(
+    command: Sequence[str], dry_run: bool = False, log_stderr: bool = True
+):
+    print(f'Running: `{" ".join(command)}`')
+    if dry_run:
+        return None, None
+    return subprocess.run(
+        command, stdout=subprocess.PIPE, text=true, check=True
+    ).stdout.splitlines()
 
 
 def get_test_targets(test_suite_path: str):
-  """Returns a list of test targets for the given test suite."""
-  # Check if the suite exists (which may not be true for failing suites).
-  # We use two queries here because the return code for a failed query is
-  # unfortunately the same as the return code for a bazel configuration error.
-  target_dir = test_suite_path.split(':')[0]
-  query = [
-      'bazel', 'query', '--ui_event_filters=-DEBUG',
-      '--noshow_loading_progress', '--noshow_progress', f'{target_dir}/...'
-  ]
-  targets = check_and_get_output_lines(query)
-  if test_suite_path not in targets:
-    return []
+    """Returns a list of test targets for the given test suite."""
+    # Check if the suite exists (which may not be true for failing suites).
+    # We use two queries here because the return code for a failed query is
+    # unfortunately the same as the return code for a bazel configuration error.
+    target_dir = test_suite_path.split(":")[0]
+    query = [
+        "bazel",
+        "query",
+        "--ui_event_filters=-DEBUG",
+        "--noshow_loading_progress",
+        "--noshow_progress",
+        f"{target_dir}/...",
+    ]
+    targets = check_and_get_output_lines(query)
+    if test_suite_path not in targets:
+        return []
 
-  query = [
-      'bazel', 'query', '--ui_event_filters=-DEBUG',
-      '--noshow_loading_progress', '--noshow_progress',
-      f'tests({test_suite_path})'
-  ]
-  tests = check_and_get_output_lines(query)
-  return tests
+    query = [
+        "bazel",
+        "query",
+        "--ui_event_filters=-DEBUG",
+        "--noshow_loading_progress",
+        "--noshow_progress",
+        f"tests({test_suite_path})",
+    ]
+    tests = check_and_get_output_lines(query)
+    return tests
diff --git a/build_tools/testing/gen_test_matrix.py b/build_tools/testing/gen_test_matrix.py
index 6b9001e..f91548b 100755
--- a/build_tools/testing/gen_test_matrix.py
+++ b/build_tools/testing/gen_test_matrix.py
@@ -86,11 +86,11 @@
 import shutil
 
 try:
-  import yaml
+    import yaml
 except ModuleNotFoundError as e:
-  raise RuntimeError(
-      f"PyYAML is not installed. Typically: 'python -m pip install PyYAML"
-  ) from e
+    raise RuntimeError(
+        f"PyYAML is not installed. Typically: 'python -m pip install PyYAML"
+    ) from e
 
 ################################################################################
 # Base classes and types
@@ -98,74 +98,75 @@
 
 
 class Environment:
-  """Runtime environment for processing a directory."""
+    """Runtime environment for processing a directory."""
 
-  def __init__(self, args, root_dir: str, output_dir: str):
-    self.args = args
-    self.root_dir = root_dir
-    self.output_dir = output_dir
-    # Set of directories containing purely generated files.
-    self.gen_dirs = set()  # type: Set[str]
-    # Set of (gen_dir, file_name) for all files in a given directory that have
-    # been generated.
-    self.gen_files = set()  # type: Set[Tuple[str, str]]
+    def __init__(self, args, root_dir: str, output_dir: str):
+        self.args = args
+        self.root_dir = root_dir
+        self.output_dir = output_dir
+        # Set of directories containing purely generated files.
+        self.gen_dirs = set()  # type: Set[str]
+        # Set of (gen_dir, file_name) for all files in a given directory that have
+        # been generated.
+        self.gen_files = set()  # type: Set[Tuple[str, str]]
 
-  def remember_gen_file(self, gen_file_path: str):
-    gen_dir = os.path.dirname(gen_file_path)
-    gen_file = os.path.basename(gen_file_path)
-    self.gen_dirs.add(gen_dir)
-    self.gen_files.add((gen_dir, gen_file))
+    def remember_gen_file(self, gen_file_path: str):
+        gen_dir = os.path.dirname(gen_file_path)
+        gen_file = os.path.basename(gen_file_path)
+        self.gen_dirs.add(gen_dir)
+        self.gen_files.add((gen_dir, gen_file))
 
-  def prune_gen_files(self):
-    found_gen_files = set()
-    for gen_dir in self.gen_dirs:
-      dir_listing = os.listdir(gen_dir)
-      for fname in dir_listing:
-        found_gen_files.add((gen_dir, fname))
-    obsolete_gen_files = found_gen_files - self.gen_files
-    if obsolete_gen_files:
-      for gen_dir, fname in obsolete_gen_files:
-        obsolete_path = os.path.join(gen_dir, fname)
-        log(f"Removing obsolete file {obsolete_path}")
-        if os.path.isdir(obsolete_path):
-          shutil.rmtree(obsolete_path)
-        else:
-          os.remove(obsolete_path)
+    def prune_gen_files(self):
+        found_gen_files = set()
+        for gen_dir in self.gen_dirs:
+            dir_listing = os.listdir(gen_dir)
+            for fname in dir_listing:
+                found_gen_files.add((gen_dir, fname))
+        obsolete_gen_files = found_gen_files - self.gen_files
+        if obsolete_gen_files:
+            for gen_dir, fname in obsolete_gen_files:
+                obsolete_path = os.path.join(gen_dir, fname)
+                log(f"Removing obsolete file {obsolete_path}")
+                if os.path.isdir(obsolete_path):
+                    shutil.rmtree(obsolete_path)
+                else:
+                    os.remove(obsolete_path)
 
 
 class Runner:
-  """Base class for a runner."""
-  RUNNER_IDENT = None
+    """Base class for a runner."""
 
-  def __init__(self, env: Environment, test_id: str):
-    self.env = env
-    self.test_id = test_id
-    self.gen_dir = os.path.join(self.env.output_dir, "generated")
-    self.xfail = False
+    RUNNER_IDENT = None
 
-  @property
-  def runner_ident(self) -> str:
-    assert self.RUNNER_IDENT, "Must define RUNNER_IDENT"
-    return self.RUNNER_IDENT
+    def __init__(self, env: Environment, test_id: str):
+        self.env = env
+        self.test_id = test_id
+        self.gen_dir = os.path.join(self.env.output_dir, "generated")
+        self.xfail = False
 
-  def create_gen_file(self, file_name: str, mode: str = "wt"):
-    os.makedirs(self.gen_dir, exist_ok=True)
-    full_path = os.path.join(self.gen_dir, file_name)
-    handle = open(full_path, mode)
-    self.env.remember_gen_file(full_path)
-    return handle
+    @property
+    def runner_ident(self) -> str:
+        assert self.RUNNER_IDENT, "Must define RUNNER_IDENT"
+        return self.RUNNER_IDENT
 
-  def link_file(self, from_path: str, to_path: str):
-    if from_path == to_path:
-      return
-    from_path = os.path.realpath(from_path)
-    os.makedirs(os.path.dirname(to_path), exist_ok=True)
-    if os.path.exists(to_path):
-      os.remove(to_path)
-    os.symlink(from_path, to_path)
+    def create_gen_file(self, file_name: str, mode: str = "wt"):
+        os.makedirs(self.gen_dir, exist_ok=True)
+        full_path = os.path.join(self.gen_dir, file_name)
+        handle = open(full_path, mode)
+        self.env.remember_gen_file(full_path)
+        return handle
 
-  def generate(self):
-    raise NotImplementedError(f"Generate not implemented for {self.__class__}")
+    def link_file(self, from_path: str, to_path: str):
+        if from_path == to_path:
+            return
+        from_path = os.path.realpath(from_path)
+        os.makedirs(os.path.dirname(to_path), exist_ok=True)
+        if os.path.exists(to_path):
+            os.remove(to_path)
+        os.symlink(from_path, to_path)
+
+    def generate(self):
+        raise NotImplementedError(f"Generate not implemented for {self.__class__}")
 
 
 ################################################################################
@@ -174,105 +175,103 @@
 
 
 def parse_arguments():
-  parser = argparse.ArgumentParser(description="Test matrix generator")
-  parser.add_argument("--dir",
-                      required=True,
-                      type=str,
-                      help="Directory to process")
-  parser.add_argument("--output_dir",
-                      required=True,
-                      type=str,
-                      help="Output directory")
-  args = parser.parse_args()
-  return args
+    parser = argparse.ArgumentParser(description="Test matrix generator")
+    parser.add_argument("--dir", required=True, type=str, help="Directory to process")
+    parser.add_argument(
+        "--output_dir", required=True, type=str, help="Output directory"
+    )
+    args = parser.parse_args()
+    return args
 
 
 def main(args):
-  env = Environment(args, args.dir, args.output_dir)
-  process_directory(env)
+    env = Environment(args, args.dir, args.output_dir)
+    process_directory(env)
 
 
 def process_directory(env: Environment):
-  dir = os.path.realpath(env.root_dir)
-  try:
-    config_sections = read_directory_config(dir)
-  except Exception as e:
-    raise RuntimeError(f"Could not read configuration from {dir}") from e
-  for section in config_sections:
-    require_mapping(section)
-    for config_key, config_value in section.items():
-      if config_key == "lists":
-        # Ignore: a place to stash anchors and references.
-        pass
-      elif config_key == "test_groups":
-        require_list(config_value)
-        for test_group in config_value:
-          require_mapping(test_group)
-          process_test_group(env, test_group)
-      else:
-        raise ValueError(f"Unexpected top-level section {config_key}")
+    dir = os.path.realpath(env.root_dir)
+    try:
+        config_sections = read_directory_config(dir)
+    except Exception as e:
+        raise RuntimeError(f"Could not read configuration from {dir}") from e
+    for section in config_sections:
+        require_mapping(section)
+        for config_key, config_value in section.items():
+            if config_key == "lists":
+                # Ignore: a place to stash anchors and references.
+                pass
+            elif config_key == "test_groups":
+                require_list(config_value)
+                for test_group in config_value:
+                    require_mapping(test_group)
+                    process_test_group(env, test_group)
+            else:
+                raise ValueError(f"Unexpected top-level section {config_key}")
 
-  env.prune_gen_files()
+    env.prune_gen_files()
 
 
 def process_test_group(env: Environment, test_group):
-  group_id = get_mapping_key(test_group, "id", require_str)
-  matrix = generate_matrix(
-      get_mapping_key(test_group, "matrix", require_mapping))
-  matrix_id_map = {group_id.format(**m): m for m in matrix}
-  for runner_map in get_mapping_key(test_group, "runner", require_list):
-    for matrix_id, matrix_map in matrix_id_map.items():
-      runner = create_runner(env, matrix_id, runner_map, matrix_map)
-      runner.xfail = (evaluate_xfail(test_group, matrix_map) and
-                      not evaluate_xpass(test_group, matrix_map))
-      runner.generate()
+    group_id = get_mapping_key(test_group, "id", require_str)
+    matrix = generate_matrix(get_mapping_key(test_group, "matrix", require_mapping))
+    matrix_id_map = {group_id.format(**m): m for m in matrix}
+    for runner_map in get_mapping_key(test_group, "runner", require_list):
+        for matrix_id, matrix_map in matrix_id_map.items():
+            runner = create_runner(env, matrix_id, runner_map, matrix_map)
+            runner.xfail = evaluate_xfail(
+                test_group, matrix_map
+            ) and not evaluate_xpass(test_group, matrix_map)
+            runner.generate()
 
 
 def evaluate_xfail(test_group, matrix_map) -> bool:
-  try:
-    xfail_list = flatten_lists(require_list(test_group["xfail"]))
-  except KeyError:
+    try:
+        xfail_list = flatten_lists(require_list(test_group["xfail"]))
+    except KeyError:
+        return False
+    for xfail_group in xfail_list:
+        if evaluate_matrix_map_predicate(matrix_map, xfail_group):
+            return True
     return False
-  for xfail_group in xfail_list:
-    if evaluate_matrix_map_predicate(matrix_map, xfail_group):
-      return True
-  return False
 
 
 def evaluate_xpass(test_group, matrix_map) -> bool:
-  try:
-    xpass_list = flatten_lists(require_list(test_group["xpass"]))
-  except KeyError:
+    try:
+        xpass_list = flatten_lists(require_list(test_group["xpass"]))
+    except KeyError:
+        return False
+    for xpass_group in xpass_list:
+        if evaluate_matrix_map_predicate(matrix_map, xpass_group):
+            return True
     return False
-  for xpass_group in xpass_list:
-    if evaluate_matrix_map_predicate(matrix_map, xpass_group):
-      return True
-  return False
 
 
 def evaluate_matrix_map_predicate(matrix_map, predicate_group) -> bool:
-  # Each key is something like 'matrix.<key>' which are and'ed
-  # together. Each value is either a literal or a list that is
-  # or'd together.
-  for pred_key, pred_value in predicate_group.items():
-    match_value = None
-    if pred_key.startswith("matrix."):
-      try:
-        match_value = matrix_map[pred_key[len("matrix."):]]
-      except KeyError:
-        raise ValueError(
-            f"Could not match matrix predicate to matrix value: {pred_key}")
-    else:
-      raise ValueError(
-          f"Expected a matrix predicate (i.e. matrix.) but got {pred_key}")
-    # Match list (OR) or literal (==)
-    if isinstance(pred_value, list):
-      if match_value not in flatten_lists(pred_value):
-        return False
-    else:
-      if pred_value != match_value:
-        return False
-  return True
+    # Each key is something like 'matrix.<key>' which are and'ed
+    # together. Each value is either a literal or a list that is
+    # or'd together.
+    for pred_key, pred_value in predicate_group.items():
+        match_value = None
+        if pred_key.startswith("matrix."):
+            try:
+                match_value = matrix_map[pred_key[len("matrix.") :]]
+            except KeyError:
+                raise ValueError(
+                    f"Could not match matrix predicate to matrix value: {pred_key}"
+                )
+        else:
+            raise ValueError(
+                f"Expected a matrix predicate (i.e. matrix.) but got {pred_key}"
+            )
+        # Match list (OR) or literal (==)
+        if isinstance(pred_value, list):
+            if match_value not in flatten_lists(pred_value):
+                return False
+        else:
+            if pred_value != match_value:
+                return False
+    return True
 
 
 ################################################################################
@@ -281,84 +280,84 @@
 
 
 def generate_matrix(matrix_map):
-  # List of (key, [value, value, ...])
-  matrix_entries = [(k, flatten_lists(v)) for k, v in matrix_map.items()]
-  # Permute.
-  permuted = []
+    # List of (key, [value, value, ...])
+    matrix_entries = [(k, flatten_lists(v)) for k, v in matrix_map.items()]
+    # Permute.
+    permuted = []
 
-  def accumulate(prior: dict, i: int):
-    if i == len(matrix_entries):
-      permuted.append(prior)
-      return
-    next_key, next_values = matrix_entries[i]
-    for next_value in next_values:
-      current = dict(prior)
-      current[next_key] = next_value
-      accumulate(current, i + 1)
+    def accumulate(prior: dict, i: int):
+        if i == len(matrix_entries):
+            permuted.append(prior)
+            return
+        next_key, next_values = matrix_entries[i]
+        for next_value in next_values:
+            current = dict(prior)
+            current[next_key] = next_value
+            accumulate(current, i + 1)
 
-  accumulate({}, 0)
-  return permuted
+    accumulate({}, 0)
+    return permuted
 
 
 def read_directory_config(dir: str) -> list:
-  sections = []
-  matrix_path = os.path.join(dir, "test_matrix.yaml")
-  with open(matrix_path, "r") as stream:
-    for section in yaml.safe_load_all(stream):
-      sections.append(section)
-  return sections
+    sections = []
+    matrix_path = os.path.join(dir, "test_matrix.yaml")
+    with open(matrix_path, "r") as stream:
+        for section in yaml.safe_load_all(stream):
+            sections.append(section)
+    return sections
 
 
 INDENT = 0
 
 
 def log(msg: str):
-  print("  " * INDENT + msg)
+    print("  " * INDENT + msg)
 
 
 @contextmanager
 def indent():
-  global INDENT
-  INDENT += 1
-  yield
-  INDENT -= 1
+    global INDENT
+    INDENT += 1
+    yield
+    INDENT -= 1
 
 
 def flatten_lists(l):
-  results = list()
-  for item in l:
-    if isinstance(item, list):
-      results.extend(flatten_lists(item))
-    else:
-      results.append(item)
-  return results
+    results = list()
+    for item in l:
+        if isinstance(item, list):
+            results.extend(flatten_lists(item))
+        else:
+            results.append(item)
+    return results
 
 
 def require_mapping(v):
-  if isinstance(v, dict):
-    return v
-  raise ValueError(f"Expected a YAML mapping for {v}")
+    if isinstance(v, dict):
+        return v
+    raise ValueError(f"Expected a YAML mapping for {v}")
 
 
 def require_list(v):
-  if isinstance(v, list):
-    return v
-  raise ValueError(f"Expected YAML list for {v}")
+    if isinstance(v, list):
+        return v
+    raise ValueError(f"Expected YAML list for {v}")
 
 
 def require_str(v):
-  if isinstance(v, str):
-    return v
-  raise ValueError(f"Expected str for {v}")
+    if isinstance(v, str):
+        return v
+    raise ValueError(f"Expected str for {v}")
 
 
 def get_mapping_key(mapping, key: str, checker=None):
-  if key not in mapping:
-    raise ValueError(f"Expected key '{key}' in {mapping}")
-  value = mapping[key]
-  if checker:
-    checker(value)
-  return value
+    if key not in mapping:
+        raise ValueError(f"Expected key '{key}' in {mapping}")
+    value = mapping[key]
+    if checker:
+        checker(value)
+    return value
 
 
 ################################################################################
@@ -417,42 +416,45 @@
 
 
 class TfHostRunner(Runner):
-  """Runner for tf e2e host tests."""
-  RUNNER_IDENT = "tfhost"
+    """Runner for tf e2e host tests."""
 
-  def __init__(self, env: Environment, test_id: str, runner_map: dict,
-               matrix_map: dict):
-    super().__init__(env=env, test_id=test_id)
-    self.main_file = get_mapping_key(runner_map, "main", require_str)
-    raw_arg_list = get_mapping_key(runner_map, "args", require_list)
-    self.args = [
-        require_str(raw_arg).format(**matrix_map) for raw_arg in raw_arg_list
-    ]
+    RUNNER_IDENT = "tfhost"
 
-  def generate(self):
-    # Generate the runner script.
-    file_name = (
-        f"{'XFAIL_' if self.xfail else ''}{self.test_id}_{self.runner_ident}.py"
-    )
-    with self.create_gen_file(file_name) as f:
-      parts = [
-          "import os",
-          "import sys",
-          "REQUIRE_IMPORTS = ['iree.tf.support.tf_utils', 'iree.tf.support.tf_test_utils']",
-          f"ARGS = {repr(self.args)}",
-          f"MAIN = os.path.join(os.path.dirname(__file__), '..', {repr(self.main_file)})",
-          f"XFAIL = {self.xfail}",
-          PYRUNNER_STUB,
-      ]
-      f.write("\n".join(parts))
+    def __init__(
+        self, env: Environment, test_id: str, runner_map: dict, matrix_map: dict
+    ):
+        super().__init__(env=env, test_id=test_id)
+        self.main_file = get_mapping_key(runner_map, "main", require_str)
+        raw_arg_list = get_mapping_key(runner_map, "args", require_list)
+        self.args = [
+            require_str(raw_arg).format(**matrix_map) for raw_arg in raw_arg_list
+        ]
 
-    # Copy/link the main file.
-    main_file_src_path = os.path.join(self.env.root_dir, self.main_file)
-    main_file_dst_path = os.path.join(self.env.output_dir, self.main_file)
-    if not os.path.exists(main_file_src_path):
-      raise RuntimeError(
-          f"Referenced main file '{main_file_src_path}' does not exist")
-    self.link_file(main_file_src_path, main_file_dst_path)
+    def generate(self):
+        # Generate the runner script.
+        file_name = (
+            f"{'XFAIL_' if self.xfail else ''}{self.test_id}_{self.runner_ident}.py"
+        )
+        with self.create_gen_file(file_name) as f:
+            parts = [
+                "import os",
+                "import sys",
+                "REQUIRE_IMPORTS = ['iree.tf.support.tf_utils', 'iree.tf.support.tf_test_utils']",
+                f"ARGS = {repr(self.args)}",
+                f"MAIN = os.path.join(os.path.dirname(__file__), '..', {repr(self.main_file)})",
+                f"XFAIL = {self.xfail}",
+                PYRUNNER_STUB,
+            ]
+            f.write("\n".join(parts))
+
+        # Copy/link the main file.
+        main_file_src_path = os.path.join(self.env.root_dir, self.main_file)
+        main_file_dst_path = os.path.join(self.env.output_dir, self.main_file)
+        if not os.path.exists(main_file_src_path):
+            raise RuntimeError(
+                f"Referenced main file '{main_file_src_path}' does not exist"
+            )
+        self.link_file(main_file_src_path, main_file_dst_path)
 
 
 RUNNER_CLASSES = {
@@ -460,18 +462,16 @@
 }
 
 
-def create_runner(env: Environment, test_id: str, runner_map: dict,
-                  matrix_map: dict):
-  runner_type = get_mapping_key(runner_map, "type", require_str)
-  try:
-    runner_class = RUNNER_CLASSES[runner_type]
-  except KeyError:
-    raise ValueError(f"Unknown runner type '{runner_type}'")
-  return runner_class(env=env,
-                      test_id=test_id,
-                      runner_map=runner_map,
-                      matrix_map=matrix_map)
+def create_runner(env: Environment, test_id: str, runner_map: dict, matrix_map: dict):
+    runner_type = get_mapping_key(runner_map, "type", require_str)
+    try:
+        runner_class = RUNNER_CLASSES[runner_type]
+    except KeyError:
+        raise ValueError(f"Unknown runner type '{runner_type}'")
+    return runner_class(
+        env=env, test_id=test_id, runner_map=runner_map, matrix_map=matrix_map
+    )
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())
diff --git a/build_tools/testing/generate_cmake_e2e_model_tests.py b/build_tools/testing/generate_cmake_e2e_model_tests.py
index 44f3a1a..1cdafe7 100755
--- a/build_tools/testing/generate_cmake_e2e_model_tests.py
+++ b/build_tools/testing/generate_cmake_e2e_model_tests.py
@@ -19,30 +19,32 @@
 
 TEMPLATE_DIR = pathlib.Path(__file__).parent
 GENERATED_E2E_MODEL_TESTS_CMAKE_TEMPLATE = string.Template(
-    (TEMPLATE_DIR / "generated_e2e_model_tests_template.cmake").read_text())
+    (TEMPLATE_DIR / "generated_e2e_model_tests_template.cmake").read_text()
+)
 
 
 def parse_arguments():
-  """Parses command-line options."""
+    """Parses command-line options."""
 
-  parser = argparse.ArgumentParser()
-  parser.add_argument("--output",
-                      required=True,
-                      help="Path to write the generated cmake file.")
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--output", required=True, help="Path to write the generated cmake file."
+    )
 
-  return parser.parse_args()
+    return parser.parse_args()
 
 
 def main(args: argparse.Namespace):
-  (gen_configs,
-   _) = benchmark_suites.iree.benchmark_collections.generate_benchmarks()
-  cmake_rules = e2e_model_tests.cmake_generator.generate_rules(
-      module_generation_configs=gen_configs)
-  output = GENERATED_E2E_MODEL_TESTS_CMAKE_TEMPLATE.substitute(
-      __TEST_RULES="\n".join(cmake_rules))
-  with open(args.output, "w") as output_file:
-    output_file.write(output)
+    (gen_configs, _) = benchmark_suites.iree.benchmark_collections.generate_benchmarks()
+    cmake_rules = e2e_model_tests.cmake_generator.generate_rules(
+        module_generation_configs=gen_configs
+    )
+    output = GENERATED_E2E_MODEL_TESTS_CMAKE_TEMPLATE.substitute(
+        __TEST_RULES="\n".join(cmake_rules)
+    )
+    with open(args.output, "w") as output_file:
+        output_file.write(output)
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())
diff --git a/build_tools/testing/generate_cmake_e2e_test_artifacts_suite.py b/build_tools/testing/generate_cmake_e2e_test_artifacts_suite.py
index 7b90648..85ab921 100755
--- a/build_tools/testing/generate_cmake_e2e_test_artifacts_suite.py
+++ b/build_tools/testing/generate_cmake_e2e_test_artifacts_suite.py
@@ -23,49 +23,64 @@
 PACKAGE_NAME_CMAKE_VARIABLE = "PACKAGE_NAME"
 ROOT_ARTIFACTS_DIR_CMAKE_VARIABLE = "ROOT_ARTIFACTS_DIR"
 
-GENERATED_E2E_TEST_FETCH_MODELS_CMAKE_FILENAMAE = "generated_e2e_test_fetch_models.cmake"
-GENERATED_E2E_TEST_IREE_ARTIFACTS_CMAKE_FILENAME = "generated_e2e_test_iree_artifacts.cmake"
+GENERATED_E2E_TEST_FETCH_MODELS_CMAKE_FILENAMAE = (
+    "generated_e2e_test_fetch_models.cmake"
+)
+GENERATED_E2E_TEST_IREE_ARTIFACTS_CMAKE_FILENAME = (
+    "generated_e2e_test_iree_artifacts.cmake"
+)
 
 
 def parse_arguments():
-  """Parses command-line options."""
+    """Parses command-line options."""
 
-  parser = argparse.ArgumentParser()
-  parser.add_argument("--output_dir",
-                      required=True,
-                      help="Dir path to write the generated cmake files.")
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--output_dir",
+        required=True,
+        help="Dir path to write the generated cmake files.",
+    )
 
-  return parser.parse_args()
+    return parser.parse_args()
 
 
 def main(args: argparse.Namespace):
-  # Currently benchmark is the only source of module generation configs.
-  (iree_module_generation_configs,
-   _) = benchmark_suites.iree.benchmark_collections.generate_benchmarks()
+    # Currently benchmark is the only source of module generation configs.
+    (
+        iree_module_generation_configs,
+        _,
+    ) = benchmark_suites.iree.benchmark_collections.generate_benchmarks()
 
-  dependent_model_map = iree_artifacts.get_dependent_model_map(
-      iree_module_generation_configs)
+    dependent_model_map = iree_artifacts.get_dependent_model_map(
+        iree_module_generation_configs
+    )
 
-  root_path = pathlib.PurePath("${%s}" % ROOT_ARTIFACTS_DIR_CMAKE_VARIABLE)
-  model_rule_map = model_rule_generator.generate_model_rule_map(
-      root_path=root_path, models=dependent_model_map.values())
+    root_path = pathlib.PurePath("${%s}" % ROOT_ARTIFACTS_DIR_CMAKE_VARIABLE)
+    model_rule_map = model_rule_generator.generate_model_rule_map(
+        root_path=root_path, models=dependent_model_map.values()
+    )
 
-  output_dir = pathlib.Path(args.output_dir)
-  fetch_models_cmake_file = output_dir / GENERATED_E2E_TEST_FETCH_MODELS_CMAKE_FILENAMAE
-  model_cmake_rules = itertools.chain.from_iterable(
-      rule.cmake_rules for rule in model_rule_map.values())
-  fetch_models_cmake_file.write_text("\n".join(model_cmake_rules))
+    output_dir = pathlib.Path(args.output_dir)
+    fetch_models_cmake_file = (
+        output_dir / GENERATED_E2E_TEST_FETCH_MODELS_CMAKE_FILENAMAE
+    )
+    model_cmake_rules = itertools.chain.from_iterable(
+        rule.cmake_rules for rule in model_rule_map.values()
+    )
+    fetch_models_cmake_file.write_text("\n".join(model_cmake_rules))
 
-  package_name = "${%s}" % PACKAGE_NAME_CMAKE_VARIABLE
-  iree_cmake_rules = iree_rule_generator.generate_rules(
-      package_name=package_name,
-      root_path=root_path,
-      module_generation_configs=iree_module_generation_configs,
-      model_rule_map=model_rule_map)
+    package_name = "${%s}" % PACKAGE_NAME_CMAKE_VARIABLE
+    iree_cmake_rules = iree_rule_generator.generate_rules(
+        package_name=package_name,
+        root_path=root_path,
+        module_generation_configs=iree_module_generation_configs,
+        model_rule_map=model_rule_map,
+    )
 
-  (output_dir / GENERATED_E2E_TEST_IREE_ARTIFACTS_CMAKE_FILENAME).write_text(
-      "\n".join(iree_cmake_rules))
+    (output_dir / GENERATED_E2E_TEST_IREE_ARTIFACTS_CMAKE_FILENAME).write_text(
+        "\n".join(iree_cmake_rules)
+    )
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+    main(parse_arguments())