Initiate the benchmark script for Linux (#8649)

* Add utils to collect Linux device information.
* Move out the common arguments.

Co-authored-by: Geoffrey Martin-Noble <gcmn@google.com>
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a362156..6ed11ac 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -651,3 +651,9 @@
     )
   endif()
 endif()
+
+#-------------------------------------------------------------------------------
+# IREE benchmark tools
+#-------------------------------------------------------------------------------
+
+add_subdirectory(build_tools/benchmarks)
diff --git a/build_tools/benchmarks/CMakeLists.txt b/build_tools/benchmarks/CMakeLists.txt
new file mode 100644
index 0000000..a64ba0e
--- /dev/null
+++ b/build_tools/benchmarks/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set(BENCHMARKS_TOOL_PYTHON_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
+
+add_subdirectory(common)
diff --git a/build_tools/benchmarks/common/CMakeLists.txt b/build_tools/benchmarks/common/CMakeLists.txt
new file mode 100644
index 0000000..4395cf8
--- /dev/null
+++ b/build_tools/benchmarks/common/CMakeLists.txt
@@ -0,0 +1,31 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+################################################################################
+# Tests
+################################################################################
+
+iree_py_test(
+  NAME
+    linux_device_utils_test
+  SRCS
+    "linux_device_utils_test.py"
+)
+
+iree_py_test(
+  NAME
+    common_arguments_test
+  SRCS
+    "common_arguments_test.py"
+)
+
+if(IREE_BUILD_TESTS)
+  # TODO(#8708): Temporary solution to fix python path for tests.
+  set_property(TEST "build_tools/benchmarks/common/linux_device_utils_test"
+      APPEND PROPERTY ENVIRONMENT "PYTHONPATH=${BENCHMARKS_TOOL_PYTHON_DIR}:$ENV{PYTHONPATH}")
+  set_property(TEST "build_tools/benchmarks/common/common_arguments_test"
+    APPEND PROPERTY ENVIRONMENT "PYTHONPATH=${BENCHMARKS_TOOL_PYTHON_DIR}:$ENV{PYTHONPATH}")
+endif()
diff --git a/build_tools/benchmarks/common/benchmark_definition.py b/build_tools/benchmarks/common/benchmark_definition.py
index 7bb85a3..85b2e8b 100644
--- a/build_tools/benchmarks/common/benchmark_definition.py
+++ b/build_tools/benchmarks/common/benchmark_definition.py
@@ -86,6 +86,7 @@
 
 class PlatformType(Enum):
   ANDROID = "Android"
+  LINUX = "Linux"
 
 
 @dataclass
diff --git a/build_tools/benchmarks/common/common_arguments.py b/build_tools/benchmarks/common/common_arguments.py
new file mode 100644
index 0000000..84028e2
--- /dev/null
+++ b/build_tools/benchmarks/common/common_arguments.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+import os
+import argparse
+
+
+def build_common_argument_parser():
+  """Returns an argument parser with common options."""
+
+  def check_dir_path(path):
+    if os.path.isdir(path):
+      return path
+    else:
+      raise argparse.ArgumentTypeError(path)
+
+  def check_exe_path(path):
+    if os.access(path, os.X_OK):
+      return path
+    else:
+      raise argparse.ArgumentTypeError(f"'{path}' is not an executable")
+
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      "build_dir",
+      metavar="<build-dir>",
+      type=check_dir_path,
+      help="Path to the build directory containing benchmark suites")
+  parser.add_argument(
+      "--normal_benchmark_tool_dir",
+      "--normal-benchmark-tool-dir",
+      type=check_exe_path,
+      default=None,
+      help="Path to the normal (non-tracing) iree tool directory")
+  parser.add_argument("--traced_benchmark_tool_dir",
+                      "--traced-benchmark-tool-dir",
+                      type=check_exe_path,
+                      default=None,
+                      help="Path to the tracing-enabled iree tool directory")
+  parser.add_argument("--trace_capture_tool",
+                      "--trace-capture-tool",
+                      type=check_exe_path,
+                      default=None,
+                      help="Path to the tool for collecting captured traces")
+  parser.add_argument(
+      "--driver-filter-regex",
+      "--driver_filter_regex",
+      type=str,
+      default=None,
+      help="Only run benchmarks matching the given driver regex")
+  parser.add_argument(
+      "--model-name-regex",
+      "--model_name_regex",
+      type=str,
+      default=None,
+      help="Only run benchmarks matching the given model name regex")
+  parser.add_argument(
+      "--mode-regex",
+      "--mode_regex",
+      type=str,
+      default=None,
+      help="Only run benchmarks matching the given benchmarking mode regex")
+  parser.add_argument("--output",
+                      "-o",
+                      default=None,
+                      help="Path to the output file")
+  parser.add_argument("--capture_tarball",
+                      "--capture-tarball",
+                      default=None,
+                      help="Path to the tarball for captures")
+  parser.add_argument("--no-clean",
+                      action="store_true",
+                      help="Do not clean up the temporary directory used for "
+                      "benchmarking on the Android device")
+  parser.add_argument("--verbose",
+                      action="store_true",
+                      help="Print internal information during execution")
+  parser.add_argument(
+      "--pin-cpu-freq",
+      "--pin_cpu_freq",
+      action="store_true",
+      help="Pin CPU frequency for all cores to the maximum. Requires root")
+  parser.add_argument("--pin-gpu-freq",
+                      "--pin_gpu_freq",
+                      action="store_true",
+                      help="Pin GPU frequency to the maximum. Requires root")
+  parser.add_argument(
+      "--keep_going",
+      "--keep-going",
+      action="store_true",
+      help="Continue running after a failed benchmark. The overall exit status"
+      " will still indicate failure and all errors will be reported at the end."
+  )
+  parser.add_argument(
+      "--tmp_dir",
+      "--tmp-dir",
+      "--tmpdir",
+      default="/tmp/iree-benchmarks",
+      help="Base directory in which to store temporary files. A subdirectory"
+      " with a name matching the git commit hash will be created.")
+  parser.add_argument(
+      "--continue_from_directory",
+      "--continue-from-directory",
+      default=None,
+      help="Path to directory with previous benchmark temporary files. This"
+      " should be for the specific commit (not the general tmp-dir). Previous"
+      " benchmark and capture results from here will not be rerun and will be"
+      " combined with the new runs.")
+  parser.add_argument(
+      "--benchmark_min_time",
+      "--benchmark-min-time",
+      default=0,
+      type=float,
+      help="If specified, this will be passed as --benchmark_min_time to the"
+      "iree-benchmark-module (minimum number of seconds to repeat running "
+      "for). In that case, no --benchmark_repetitions flag will be passed."
+      " If not specified, a --benchmark_repetitions will be passed "
+      "instead.")
+
+  return parser
diff --git a/build_tools/benchmarks/common/common_arguments_test.py b/build_tools/benchmarks/common/common_arguments_test.py
new file mode 100644
index 0000000..8660c84
--- /dev/null
+++ b/build_tools/benchmarks/common/common_arguments_test.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+import unittest
+
+from common.common_arguments import build_common_argument_parser
+
+
+class CommonArgumentsTest(unittest.TestCase):
+
+  def test_build_common_argument_parser(self):
+    arg_parser = build_common_argument_parser()
+    arg_parser.parse_args([
+        "--normal_benchmark_tool_dir=/tmp", "--traced_benchmark_tool_dir=/tmp",
+        "--trace_capture_tool=/bin/ls", "."
+    ])
+
+  def test_build_common_argument_parser_check_build_dir(self):
+    arg_parser = build_common_argument_parser()
+    with self.assertRaises(SystemExit):
+      arg_parser.parse_args(["nonexistent"])
+
+  def test_build_common_argument_parser_check_normal_benchmark_tool(self):
+    arg_parser = build_common_argument_parser()
+    with self.assertRaises(SystemExit):
+      arg_parser.parse_args(["--normal_benchmark_tool_dir=nonexistent", "."])
+
+  def test_build_common_argument_parser_check_traced_benchmark_tool(self):
+    arg_parser = build_common_argument_parser()
+    with self.assertRaises(SystemExit):
+      arg_parser.parse_args(["--traced_benchmark_tool_dir=nonexistent", "."])
+
+  def test_build_common_argument_parser_check_trace_capture_tool(self):
+    arg_parser = build_common_argument_parser()
+    with self.assertRaises(SystemExit):
+      arg_parser.parse_args(["--trace_capture_tool=nonexistent", "."])
+
+
+if __name__ == "__main__":
+  unittest.main()
diff --git a/build_tools/benchmarks/common/linux_device_utils.py b/build_tools/benchmarks/common/linux_device_utils.py
new file mode 100644
index 0000000..d1f469f
--- /dev/null
+++ b/build_tools/benchmarks/common/linux_device_utils.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+"""Utils for accessing Linux device information."""
+
+import re
+from typing import Sequence
+
+from .benchmark_definition import (execute_cmd_and_get_output, DeviceInfo,
+                                   PlatformType)
+
+
+def _get_lscpu_field(field_name: str, verbose: bool = False) -> str:
+  output = execute_cmd_and_get_output(["lscpu"], verbose)
+  (value,) = re.findall(f"^{field_name}:\s*(.+)", output, re.MULTILINE)
+  return value
+
+
+def get_linux_cpu_arch(verbose: bool = False) -> str:
+  """Returns CPU Architecture, e.g., 'x86_64'."""
+  return _get_lscpu_field("Architecture", verbose)
+
+
+def get_linux_cpu_features(verbose: bool = False) -> Sequence[str]:
+  """Returns CPU feature lists, e.g., ['mmx', 'fxsr', 'sse', 'sse2']."""
+  return _get_lscpu_field("Flags", verbose).split(" ")
+
+
+def get_linux_cpu_model(verbose: bool = False) -> str:
+  """Returns CPU model, e.g., 'AMD EPYC 7B12'."""
+  return _get_lscpu_field("Model name", verbose)
+
+
+def get_linux_device_info(device_model: str = "Unknown",
+                          verbose: bool = False) -> DeviceInfo:
+  """Returns device info for the Linux device.
+
+    Args:
+    - device_model: the device model name, e.g., 'ThinkStation P520' 
+  """
+  return DeviceInfo(
+      PlatformType.LINUX,
+      # Includes CPU model as it is the key factor of the device performance.
+      model=f"{device_model}({get_linux_cpu_model(verbose)})",
+      # Currently we only have x86, so CPU ABI = CPU arch.
+      cpu_abi=get_linux_cpu_arch(verbose),
+      cpu_features=get_linux_cpu_features(verbose),
+      # We don't yet support GPU benchmark on Linux devices.
+      gpu_name="Unknown")
diff --git a/build_tools/benchmarks/common/linux_device_utils_test.py b/build_tools/benchmarks/common/linux_device_utils_test.py
new file mode 100644
index 0000000..2552496
--- /dev/null
+++ b/build_tools/benchmarks/common/linux_device_utils_test.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+import unittest
+
+from unittest import mock
+
+from common.benchmark_definition import DeviceInfo, PlatformType
+from common.linux_device_utils import get_linux_cpu_arch, get_linux_cpu_features, get_linux_cpu_model, get_linux_device_info
+
+
+class LinuxDeviceUtilsTest(unittest.TestCase):
+
+  def setUp(self):
+    self.execute_cmd_patch = mock.patch(
+        "common.linux_device_utils.execute_cmd_and_get_output")
+    self.execute_cmd_mock = self.execute_cmd_patch.start()
+    self.execute_cmd_mock.return_value = (
+        "Architecture:                    x86_64\n"
+        "Vendor ID:                       AuthenticAMD\n"
+        "Model name:                      AMD EPYC 7B12\n"
+        "Flags:                           fpu vme de pse tsc\n")
+
+  def tearDown(self):
+    self.execute_cmd_patch.stop()
+
+  def test_get_linux_cpu_arch(self):
+    self.assertEqual(get_linux_cpu_arch(), "x86_64")
+
+  def test_get_linux_cpu_features(self):
+    self.assertEqual(get_linux_cpu_features(),
+                     ["fpu", "vme", "de", "pse", "tsc"])
+
+  def test_get_linux_cpu_model(self):
+    self.assertEqual(get_linux_cpu_model(), "AMD EPYC 7B12")
+
+  def test_get_linux_device_info(self):
+    self.assertEqual(
+        get_linux_device_info("Dummy"),
+        DeviceInfo(platform_type=PlatformType.LINUX,
+                   model="Dummy(AMD EPYC 7B12)",
+                   cpu_abi="x86_64",
+                   cpu_features=["fpu", "vme", "de", "pse", "tsc"],
+                   gpu_name="Unknown"))
+
+
+if __name__ == "__main__":
+  unittest.main()
diff --git a/build_tools/benchmarks/run_benchmarks_on_android.py b/build_tools/benchmarks/run_benchmarks_on_android.py
index 632e140..affb7b9 100755
--- a/build_tools/benchmarks/run_benchmarks_on_android.py
+++ b/build_tools/benchmarks/run_benchmarks_on_android.py
@@ -29,7 +29,6 @@
     /path/to/host/build/dir
 """
 
-import argparse
 import atexit
 import json
 import os
@@ -52,6 +51,7 @@
 from common.android_device_utils import (get_android_device_model,
                                          get_android_device_info,
                                          get_android_gpu_name)
+from common.common_arguments import build_common_argument_parser
 
 # The flagfile/toolfile's filename for compiled benchmark artifacts.
 MODEL_FLAGFILE_NAME = "flagfile"
@@ -577,129 +577,12 @@
   adb_execute_as_root([android_path, policy])
 
 
-def parse_arguments():
-  """Parses command-line options."""
-
-  def check_dir_path(path):
-    if os.path.isdir(path):
-      return path
-    else:
-      raise argparse.ArgumentTypeError(path)
-
-  def check_exe_path(path):
-    if os.access(path, os.X_OK):
-      return path
-    else:
-      raise argparse.ArgumentTypeError(f"'{path}' is not an executable")
-
-  parser = argparse.ArgumentParser()
-  parser.add_argument(
-      "build_dir",
-      metavar="<build-dir>",
-      type=check_dir_path,
-      help="Path to the build directory containing benchmark suites")
-  parser.add_argument(
-      "--normal_benchmark_tool_dir",
-      "--normal-benchmark-tool-dir",
-      type=check_exe_path,
-      default=None,
-      help="Path to the normal (non-tracing) iree tool directory")
-  parser.add_argument("--traced_benchmark_tool_dir",
-                      "--traced-benchmark-tool-dir",
-                      type=check_exe_path,
-                      default=None,
-                      help="Path to the tracing-enabled iree tool directory")
-  parser.add_argument("--trace_capture_tool",
-                      "--trace-capture-tool",
-                      type=check_exe_path,
-                      default=None,
-                      help="Path to the tool for collecting captured traces")
-  parser.add_argument(
-      "--driver-filter-regex",
-      "--driver_filter_regex",
-      type=str,
-      default=None,
-      help="Only run benchmarks matching the given driver regex")
-  parser.add_argument(
-      "--model-name-regex",
-      "--model_name_regex",
-      type=str,
-      default=None,
-      help="Only run benchmarks matching the given model name regex")
-  parser.add_argument(
-      "--mode-regex",
-      "--mode_regex",
-      type=str,
-      default=None,
-      help="Only run benchmarks matching the given benchmarking mode regex")
-  parser.add_argument("--output",
-                      "-o",
-                      default=None,
-                      help="Path to the output file")
-  parser.add_argument("--capture_tarball",
-                      "--capture-tarball",
-                      default=None,
-                      help="Path to the tarball for captures")
-  parser.add_argument("--no-clean",
-                      action="store_true",
-                      help="Do not clean up the temporary directory used for "
-                      "benchmarking on the Android device")
-  parser.add_argument("--verbose",
-                      action="store_true",
-                      help="Print internal information during execution")
-  parser.add_argument(
-      "--pin-cpu-freq",
-      "--pin_cpu_freq",
-      action="store_true",
-      help="Pin CPU frequency for all cores to the maximum. Requires root")
-  parser.add_argument("--pin-gpu-freq",
-                      "--pin_gpu_freq",
-                      action="store_true",
-                      help="Pin GPU frequency to the maximum. Requires root")
-  parser.add_argument(
-      "--keep_going",
-      "--keep-going",
-      action="store_true",
-      help="Continue running after a failed benchmark. The overall exit status"
-      " will still indicate failure and all errors will be reported at the end."
-  )
-  parser.add_argument(
-      "--tmp_dir",
-      "--tmp-dir",
-      "--tmpdir",
-      default="/tmp/iree-benchmarks",
-      help="Base directory in which to store temporary files. A subdirectory"
-      " with a name matching the git commit hash will be created.")
-  parser.add_argument(
-      "--continue_from_directory",
-      "--continue-from-directory",
-      default=None,
-      help="Path to directory with previous benchmark temporary files. This"
-      " should be for the specific commit (not the general tmp-dir). Previous"
-      " benchmark and capture results from here will not be rerun and will be"
-      " combined with the new runs.")
-  parser.add_argument(
-      "--benchmark_min_time",
-      "--benchmark-min-time",
-      default=0,
-      type=float,
-      help="If specified, this will be passed as --benchmark_min_time to the"
-      "iree-benchmark-module (minimum number of seconds to repeat running "
-      "for). In that case, no --benchmark_repetitions flag will be passed."
-      " If not specified, a --benchmark_repetitions will be passed "
-      "instead.")
-
-  args = parser.parse_args()
-
-  return args
-
-
 def real_path_or_none(path: str) -> Optional[str]:
   return os.path.realpath(path) if path else None
 
 
 def main(args):
-  device_info = get_android_device_info()
+  device_info = get_android_device_info(args.verbose)
   if args.verbose:
     print(device_info)
 
@@ -836,4 +719,5 @@
 
 
 if __name__ == "__main__":
-  main(parse_arguments())
+  args = build_common_argument_parser().parse_args()
+  main(args)
diff --git a/build_tools/benchmarks/run_benchmarks_on_linux.py b/build_tools/benchmarks/run_benchmarks_on_linux.py
new file mode 100755
index 0000000..291a803
--- /dev/null
+++ b/build_tools/benchmarks/run_benchmarks_on_linux.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+"""Runs all matched benchmark suites on a Linux device."""
+
+from common.common_arguments import build_common_argument_parser
+from common.linux_device_utils import get_linux_device_info
+
+
+def main(args):
+  device_info = get_linux_device_info(args.device_model, args.verbose)
+  if args.verbose:
+    print(device_info)
+
+  raise NotImplementedError()
+
+
+def parse_argument():
+  arg_parser = build_common_argument_parser()
+  arg_parser.add_argument("--device_model",
+                          default="Unknown",
+                          help="Device model")
+
+  return arg_parser.parse_args()
+
+
+if __name__ == "__main__":
+  main(parse_argument())