[dv] Tool for running regressions

The current Makefile based regression tool offers very limited
capabilities for DV usecases. This python based tool is an attempt to
resolve that. The eventual goal is to build up the tool not only for DV
workflows, but also elab, formal, lint and synthesis.

Features:
- HJson based build / run infrastructure specification
- Run multiple tests and / or regressions
- Deploy runs in parallel (only on the local machine)

Work in progress:
- Disply testplan annotated regression results
- Upload results to the opentitan web server
- Ctrl-c support
- parallelize with lsf / bsub
- parallelize with gcp
- and many more...

Signed-off-by: Srikrishna Iyer <sriyer@google.com>
diff --git a/hw/dv/data/common_modes.hjson b/hw/dv/data/common_modes.hjson
new file mode 100644
index 0000000..db8d4d0
--- /dev/null
+++ b/hw/dv/data/common_modes.hjson
@@ -0,0 +1,33 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+  // Sim modes are collection of build_opts and run_opts
+  // These are only set on the command line
+  // These are different from the build modes in the sense that these collection of
+  // options are appended to actual build_modes
+  build_modes: [
+    {
+      name: waves
+      is_sim_mode: 1
+      en_build_modes: ["{simulator}_waves"]
+    }
+    {
+      name: cov
+      is_sim_mode: 1
+      en_build_modes: ["{simulator}_cov"]
+    }
+    {
+      name: profile
+      is_sim_mode: 1
+      en_build_modes: ["{simulator}_profile"]
+    }
+  ]
+
+  run_modes: [
+    {
+      name: uvm_trace
+      run_opts: ["+UVM_PHASE_TRACE", "+UVM_CONFIG_DB_TRACE", "+UVM_OBJECTION_TRACE"]
+    }
+  ]
+}
diff --git a/hw/dv/data/common_sim_cfg.hjson b/hw/dv/data/common_sim_cfg.hjson
new file mode 100644
index 0000000..a7f9939
--- /dev/null
+++ b/hw/dv/data/common_sim_cfg.hjson
@@ -0,0 +1,114 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+  project:          opentitan
+  flow_makefile:    "{proj_root}/hw/dv/data/sim.mk"
+
+  import_cfgs:      ["{proj_root}/hw/dv/data/common_modes.hjson",
+                     "{proj_root}/hw/dv/data/fusesoc.hjson",
+                     "{proj_root}/hw/dv/data/gen_ral_pkg.hjson",
+                     "{proj_root}/hw/dv/data/{simulator}/{simulator}.hjson"]
+
+  // Default directory structure for the output
+  scratch_path:     "{scratch_root}/{branch}.{dut}.{flow}.{simulator}"
+  build_dir:        "{scratch_path}/{build_mode}"
+  run_dir_name:     "{index}.{test}"
+  run_dir:          "{scratch_path}/{run_dir_name}/out"
+  sw_build_dir:     "{run_dir}/sw_build"
+
+  // pass and fail patterns
+  pass_patterns:    ["^TEST PASSED (UVM_)?CHECKS$"]
+  fail_patterns:    ["^TEST FAILED (UVM_)?CHECKS$",
+                     "^UVM_ERROR\\s[^:].*$",
+                     "^Assert failed: ",
+                     "^\\s*Offending '.*'"]
+
+  // Default TileLink widths
+  tl_aw: 32
+  tl_dw: 32
+  tl_dbw: 4
+
+  // Default UVM verbosity settings
+  n: UVM_NONE
+  l: UVM_LOW
+  m: UVM_MEDIUM
+  h: UVM_HIGH
+  d: UVM_DEBUG
+
+  // Default waves dump settings
+  dump_file:  waves.{dump}
+
+  // Default build and run opts
+  build_opts: [// List multiple tops for the simulation
+               "-top {tb}",
+               "-top {dut}_bind",
+               // Standard UVM defines
+               "+define+UVM_NO_DEPRECATED",
+               "+define+UVM_REGEX_NO_DPI",
+               "+define+UVM_REG_ADDR_WIDTH={tl_aw}",
+               "+define+UVM_REG_DATA_WIDTH={tl_dw}",
+               "+define+UVM_REG_BYTENABLE_WIDTH={tl_dbw}"]
+
+  run_opts: ["+UVM_NO_RELNOTES",
+             "+UVM_VERBOSITY={verbosity}"]
+
+  // Default list of things to export to shell
+  exports: [
+    DUMP_FILE: {dump_file}
+    WAVES: {waves}
+    DUT_TOP: {dut}
+    TB_TOP: {tb}
+  ]
+
+  // Build modes are collection of build_opts and run_opts
+  // A test can enable a specific build mode by setting 'use_build_mode' key
+  build_modes: [
+    {
+      name: foo
+      build_opts: ["+define+bx",
+                   "+define+by",
+                   "+define+bz"]
+      run_opts: ["+rx=1",
+                 "+ry=2",
+                 "+rz=3"]
+    }
+    {
+      name: bar
+      build_opts: ["+define+bbaru1",
+                   "+define+bbaru2",
+                   "+define+bbaru3"]
+      run_opts: ["+rbar1u=1",
+                 "+rbar2u=2",
+                 "+rbar3u=3"]
+    }
+  ]
+
+  // Regressions are tests that can be grouped together and run in one shot
+  // By default, two regressions are made available - "all" and "nightly". Both
+  // run all available tests for the DUT. "nightly" enables coverage as well.
+  // The 'tests' key is set to an empty list, which indicates "run everything".
+  // Test sets can enable sim modes, which are a set of build_opts and run_opts
+  // that are grouped together. These are appended to the build modes used by the
+  // tests.
+  regressions: [
+    {
+      name: all
+      tests: []
+    }
+    {
+      name: all_once
+      reseed: 1
+      tests: []
+    }
+    {
+      name: nightly
+      tests: []
+      // excl_tests: []
+      en_sim_modes: ["cov"]
+    }
+  ]
+
+  // Project defaults for VCS
+  vcs_cov_hier: "-cm_hier {proj_root}/hw/dv/tools/vcs/cover.cfg"
+}
diff --git a/hw/dv/data/fusesoc.hjson b/hw/dv/data/fusesoc.hjson
new file mode 100644
index 0000000..6af8fb6
--- /dev/null
+++ b/hw/dv/data/fusesoc.hjson
@@ -0,0 +1,12 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+  sv_flist_gen_cmd:   fusesoc
+  fusesoc_core_:      "{eval_cmd} echo \"{fusesoc_core}\" | tr ':' '_'"
+  sv_flist_gen_opts:  ["--cores-root {proj_root} --cores-root {gen_ral_pkg_dir}",
+                       "run --target=sim --build-root={build_dir}",
+                       "--setup --no-export {fusesoc_core}"]
+  sv_flist_gen_dir:   "{build_dir}/sim-vcs"
+  sv_flist:           "{sv_flist_gen_dir}/{fusesoc_core_}.scr"
+}
diff --git a/hw/dv/data/gen_ral_pkg.hjson b/hw/dv/data/gen_ral_pkg.hjson
new file mode 100644
index 0000000..0779c5c
--- /dev/null
+++ b/hw/dv/data/gen_ral_pkg.hjson
@@ -0,0 +1,9 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+  gen_ral_pkg:      "{name}"
+  gen_ral_pkg_dir:  "{build_dir}/gen_ral_pkg"
+  gen_ral_pkg_cmd:  "{proj_root}/hw/dv/tools/gen_ral_pkg.py"
+  gen_ral_pkg_opts: "{gen_ral_pkg} {ral_spec} -o {gen_ral_pkg_dir}"
+}
diff --git a/hw/dv/data/sim.mk b/hw/dv/data/sim.mk
new file mode 100644
index 0000000..5c6e513
--- /dev/null
+++ b/hw/dv/data/sim.mk
@@ -0,0 +1,99 @@
+####################################################################################################
+## Copyright lowRISC contributors.                                                                ##
+## Licensed under the Apache License, Version 2.0, see LICENSE for details.                       ##
+## SPDX-License-Identifier: Apache-2.0                                                            ##
+####################################################################################################
+.DEFAULLT_GOAL := all
+
+all: build run
+
+########################
+## RAL target         ##
+########################
+ral:
+ifneq (${skip_gen_ral_pkg},1)
+	mkdir -p ${gen_ral_pkg_dir} && \
+	${gen_ral_pkg_cmd} ${gen_ral_pkg_opts}
+endif
+
+
+###############################
+## sim build and run targets ##
+###############################
+build: compile_result
+
+pre_compile:
+	@echo "[make]: pre_compile"
+	mkdir -p ${build_dir} && env > ${build_dir}/env_vars
+
+gen_sv_flist: pre_compile ral
+	@echo "[make]: gen_sv_flist"
+	cd ${build_dir} && ${sv_flist_gen_cmd} ${sv_flist_gen_opts}
+
+compile: gen_sv_flist
+	@echo "[make]: compile"
+	cd ${sv_flist_gen_dir} && ${build_cmd} ${build_opts}
+
+post_compile: compile
+	@echo "[make]: post_compile"
+
+compile_result: post_compile
+	@echo "[make]: compile_result"
+
+run: run_result
+
+pre_run:
+	@echo "[make]: pre_run"
+	mkdir -p ${run_dir} && env > ${run_dir}/env_vars
+
+sw_build: pre_run
+	@echo "[make]: sw_build"
+ifneq (${sw_name},)
+	$(error "sw_build target is not supported yet")
+endif
+
+simulate: sw_build
+	@echo "[make]: simulate"
+	cd ${run_dir} && ${run_cmd} ${run_opts}
+
+post_run: simulate
+	@echo "[make]: post_run"
+
+run_result: post_run
+	@echo "[make]: run_result"
+
+#######################
+## Load waves target ##
+#######################
+debug_waves:
+	${debug_waves_cmd} ${debug_waves_opts}
+
+############################
+## coverage rated targets ##
+############################
+cov_merge:
+	# TODO: add script to merge coverage in scratch scope
+
+# open coverage tool to review and create report or exclusion file
+cov_analyze:
+	cd ${scratch_path} && ${cov_analyze_cmd} ${cov_analyze_opts}
+
+# generate coverage report directly
+cov_report:
+	cd ${scratch_path} && ${cov_report} ${cov_report_opts}
+
+clean:
+	echo "[make]: clean"
+	rm -rf ${scratch_root}/${dut}/*
+
+.PHONY: build \
+	run \
+	reg \
+	pre_compile \
+	compile \
+	post_compile \
+	compile_result \
+	pre_run \
+	simulate \
+	post_run \
+	run_result
diff --git a/hw/dv/data/tests/csr_tests.hjson b/hw/dv/data/tests/csr_tests.hjson
new file mode 100644
index 0000000..07172e1
--- /dev/null
+++ b/hw/dv/data/tests/csr_tests.hjson
@@ -0,0 +1,46 @@
+{
+  run_modes: [
+    {
+      name: csr_tests_mode
+      uvm_test_seq: "{name}_common_vseq"
+      run_opts: ["+en_scb=0"]
+      reseed: 20
+    }
+  ]
+
+  tests: [
+    {
+      name: "{name}_csr_hw_reset"
+      run_opts: ["+csr_hw_reset"]
+      en_run_modes: ["csr_tests_mode"]
+    }
+
+    {
+      name: "{name}_csr_rw"
+      run_opts: ["+csr_rw"]
+      en_run_modes: ["csr_tests_mode"]
+    }
+
+    {
+      name: "{name}_csr_bit_bash"
+      run_opts: ["+csr_bit_bash"]
+      en_run_modes: ["csr_tests_mode"]
+    }
+
+    {
+      name: "{name}_csr_aliasing"
+      run_opts: ["+csr_aliasing"]
+      en_run_modes: ["csr_tests_mode"]
+    }
+  ]
+
+  regressions: [
+    {
+      name: sw_access
+      tests: ["{name}_csr_hw_reset",
+              "{name}_csr_rw",
+              "{name}_csr_bit_bash",
+              "{name}_csr_aliasing"]
+    }
+  ]
+}
diff --git a/hw/dv/data/tests/intr_test.hjson b/hw/dv/data/tests/intr_test.hjson
new file mode 100644
index 0000000..4d0b217
--- /dev/null
+++ b/hw/dv/data/tests/intr_test.hjson
@@ -0,0 +1,10 @@
+{
+  tests: [
+    {
+      name: "{name}_intr_test"
+      uvm_test_seq: "{name}_common_vseq"
+      run_opts: ["+run_intr_test"]
+      reseed: 20
+    }
+  ]
+}
diff --git a/hw/dv/data/tests/mem_tests.hjson b/hw/dv/data/tests/mem_tests.hjson
new file mode 100644
index 0000000..990dd81
--- /dev/null
+++ b/hw/dv/data/tests/mem_tests.hjson
@@ -0,0 +1,17 @@
+{
+  tests: [
+    {
+      name: "{name}_mem_walk"
+      uvm_test_seq: "{name}_common_vseq"
+      run_opts: ["+csr_mem_walk", "+en_scb=0"]
+      reseed: 20
+    }
+  ]
+
+  regressions: [
+    {
+      name: sw_access
+      tests: ["{name}_mem_walk"]
+    }
+  ]
+}
diff --git a/hw/dv/data/tests/stress_tests.hjson b/hw/dv/data/tests/stress_tests.hjson
new file mode 100644
index 0000000..c412436
--- /dev/null
+++ b/hw/dv/data/tests/stress_tests.hjson
@@ -0,0 +1,17 @@
+{
+  tests: [
+    {
+      name: "{name}_stress_all"
+      uvm_test_seq: "{name}_stress_all_vseq"
+      run_opts: ["+test_timeout_ns=10_000_000_000"]
+    }
+
+    {
+      name: "{name}_stress_all_with_rand_reset"
+      uvm_test_seq: "{name}_common_vseq"
+      run_opts: ["+run_stress_all_with_rand_reset",
+                 "+test_timeout_ns=10_000_000_000",
+                 "+stress_seq={name}_stress_all_vseq"]
+    }
+  ]
+}
diff --git a/hw/dv/data/tests/tl_access_tests.hjson b/hw/dv/data/tests/tl_access_tests.hjson
new file mode 100644
index 0000000..d551045
--- /dev/null
+++ b/hw/dv/data/tests/tl_access_tests.hjson
@@ -0,0 +1,10 @@
+{
+  tests: [
+    {
+      name: "{name}_tl_errors"
+      uvm_test_seq: "{name}_common_vseq"
+      run_opts: ["+run_tl_errors"]
+      reseed: 20
+    }
+  ]
+}
diff --git a/hw/dv/data/vcs/vcs.hjson b/hw/dv/data/vcs/vcs.hjson
new file mode 100644
index 0000000..0c8b944
--- /dev/null
+++ b/hw/dv/data/vcs/vcs.hjson
@@ -0,0 +1,86 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+  build_cmd:  "{job_prefix} vcs"
+  build_ex:   "simv"
+  run_cmd:    "{job_prefix} {build_dir}/{build_ex}"
+
+  tcl:        "{proj_root}/hw/dv/tools/vcs/vcs_fsdb.tcl"
+
+  build_opts: ["-sverilog -full64 -licqueue -kdb -ntb_opts uvm-1.2",
+               "-timescale=1ns/1ps",
+               "-Mdir={build_dir}/simv.csrc",
+               "-o {run_cmd}",
+               "-f {sv_flist}",
+               "+incdir+{build_dir}",
+               "-debug_access+pp",
+               // Turn on warnings for non-void functions called with return values ignored
+               "+warn=SV-NFIVC",
+               "+warn=noUII-L",
+               // Below option required for $error/$fatal system calls
+               "-assert svaext"]
+
+  run_opts:   ["-licqueue",
+               "-ucli -do {tcl}",
+               "+ntb_random_seed={seed}",
+               "+UVM_TESTNAME={uvm_test}",
+               "+UVM_TEST_SEQ={uvm_test_seq}"]
+
+  exports: [
+    VCS_ARCH_OVERRIDE: linux
+    VCS_LIC_EXPIRE_WARNING: 1
+  ]
+
+  // Defaults for VCS
+  cov_metrics:          "line+cond+fsm+tgl+branch+assert"
+  vcs_cov_hier:         ""
+  vcs_cov_assert_hier:  ""
+
+  build_modes: [
+    {
+      name: vcs_waves
+      is_sim_mode: 1
+      build_opts: ["-debug_access+all"]
+    }
+    {
+      name: vcs_cov
+      is_sim_mode: 1
+      build_opts: [// Enable the required cov metrics
+                   "-cm {cov_metrics}",
+                   // Set the coverage hierarchy
+                   "{vcs_cov_hier}",
+                   // Cover all continuous assignments
+                   "-cm_line contassign",
+                   // Dump toggle coverage on mdas, array of structs and on ports only
+                   "-cm_tgl mda+structarr+portsonly",
+                   // Ignore initial blocks for coverage
+                   "-cm_report noinitial",
+                   // Filter unreachable/statically constant blocks
+                   "-cm_noconst",
+                   // Don't count coverage that's coming from zero-time glitches
+                   "-cm_glitch 0",
+                   // Ignore warnings about not applying cm_glitch to path and FSM
+                   "+warn=noVCM-OPTIGN",
+                   // Coverage database output location
+                   "-cm_dir {build_dir}/cov.vdb"]
+
+      run_opts:   [// Enable the required cov metrics
+                   "-cm {cov_metrics}",
+                   // Same directory as build
+                   "-cm_dir {build_dir}/cov.vdb",
+                   // Don't output cm.log which can be quite large
+                   "-cm_log /dev/null",
+                   // Provide a name to the coverage collected for this test
+                   "-cm_name {index}.{test}",
+                   // Don't dump all the coverage assertion attempts at the end of simulation
+                   "-assert nopostproc"]
+    }
+    {
+      name: vcs_profile
+      is_sim_mode: 1
+      build_opts: ["--simprofile"]
+      run_opts:   ["--simprofile {profile}"]
+    }
+  ]
+}
diff --git a/hw/ip/uart/dv/sim_cfg.hjson b/hw/ip/uart/dv/sim_cfg.hjson
new file mode 100644
index 0000000..05fbf21
--- /dev/null
+++ b/hw/ip/uart/dv/sim_cfg.hjson
@@ -0,0 +1,117 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+  // Name of the sim cfg - typically same as the IP under test.
+  name: uart
+
+  // Top level dut name (sv module).
+  dut: uart
+
+  // Top level testbench name (sv module).
+  tb: tb
+
+  // Fusesoc core file used for building the file list.
+  fusesoc_core: lowrisc:dv:uart_sim:0.1
+
+  // Testplan hjson file.
+  testplan: "{proj_root}/hw/ip/uart/data/uart_testplan.hjson"
+
+  // RAL spec - used to generate the RAL model.
+  ral_spec: "{proj_root}/hw/ip/uart/data/uart.hjson"
+
+  // Import additional common sim cfg files.
+  import_cfgs: [// Project wide common sim cfg file
+                "{proj_root}/hw/dv/data/common_sim_cfg.hjson",
+                // List of common CIP test lists that apply to UART
+                "{proj_root}/hw/dv/data/tests/csr_tests.hjson",
+                "{proj_root}/hw/dv/data/tests/intr_test.hjson",
+                "{proj_root}/hw/dv/data/tests/tl_access_tests.hjson",
+                "{proj_root}/hw/dv/data/tests/stress_tests.hjson"]
+
+  // Default iterations for all tests - each test entry can override this.
+  reseed: 50
+
+  // Default UVM test class name.
+  uvm_test: uart_base_test
+
+  // List of test specifications.
+  tests: [
+    {
+      name: uart_sanity
+      uvm_test_seq: uart_sanity_vseq
+    }
+
+    {
+      name: uart_tx_rx
+      uvm_test_seq: uart_tx_rx_vseq
+    }
+
+    {
+      name: uart_fifo_full
+      reseed: 20
+      uvm_test_seq: uart_fifo_full_vseq
+    }
+
+    {
+      name: uart_fifo_overflow
+      reseed: 100
+      uvm_test_seq: uart_fifo_overflow_vseq
+    }
+
+    {
+      name: uart_fifo_reset
+      uvm_test_seq: uart_fifo_reset_vseq
+    }
+
+    {
+      name: uart_rx_oversample
+      uvm_test_seq: uart_rx_oversample_vseq
+    }
+
+    {
+      name: uart_intr
+      uvm_test_seq: uart_intr_vseq
+      run_opts: ["+test_timeout_ns=3_000_000_000"]
+    }
+
+    {
+      name: uart_noise_filter
+      uvm_test_seq: uart_noise_filter_vseq
+    }
+
+    {
+      name: uart_rx_start_bit_filter
+      uvm_test_seq: uart_rx_start_bit_filter_vseq
+    }
+
+    {
+      name: uart_rx_parity_err
+      uvm_test_seq: uart_rx_parity_err_vseq
+    }
+
+    {
+      name: uart_tx_ovrd
+      uvm_test_seq: uart_tx_ovrd_vseq
+    }
+
+    {
+      name: uart_loopback
+      uvm_test_seq: uart_loopback_vseq
+    }
+
+    {
+      name: uart_perf
+      uvm_test_seq: uart_perf_vseq
+      run_opts: ["+zero_delays=1"]
+    }
+  ]
+
+  // List of regression specifications.
+  regressions: [
+    {
+      name: sanity
+      tests: ["uart_sanity", "uart_csr_hw_reset"]
+    }
+  ]
+}
diff --git a/util/dvsim.py b/util/dvsim.py
new file mode 100755
index 0000000..ce93a36
--- /dev/null
+++ b/util/dvsim.py
@@ -0,0 +1,385 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+"""
+dvsim is a command line tool to deploy regressions for design verification. It uses hjson as the
+format for specifying what to build and run. It is an end-to-end regression manager that can deploy
+multiple builds (where some tests might need different set of compile time options requiring a
+uniquely build sim executable) in parallel followed by tests in parallel using the load balancer
+of your choice. dvsim is built to be tool-agnostic so that you can easily switch between tools
+available at your disposal. dvsim uses fusesoc as the starting step to resolve all inter-package
+dependencies and provide us with a filelist that will be consumed by the sim tool.
+"""
+
+import argparse
+import glob
+import logging as log
+import os
+import subprocess
+import sys
+from pathlib import Path
+
+import hjson
+
+from dvsim import SimCfg, utils
+
+# TODO: add dvsim_cfg.hjson to retrieve this info
+version = 0.1
+
+
+# Function to resolve the scratch root directory among the available options:
+# If set on the command line, then use that as a preference.
+# Else, check if $SCRATCH_ROOT env variable exists and is a directory.
+# Else use the default (<cwd>/scratch)
+# Try to create the directory if it does not already exist.
+def resolve_scratch_root(arg_scratch_root):
+    scratch_root = os.environ.get('SCRATCH_ROOT')
+    if arg_scratch_root == "scratch":
+        if scratch_root == None:
+            arg_scratch_root = os.getcwd() + '/' + arg_scratch_root
+        else:
+            # Scratch space could be mounted in a filesystem (such as NFS) on a network drive.
+            # If the network is down, it could cause the access access check to hang. So run a
+            # simple ls command with a timeout to prevent the hang.
+            (out,
+             status) = utils.run_cmd_with_timeout(cmd="ls -d " + scratch_root,
+                                                  timeout=5,
+                                                  exit_on_failure=0)
+            if status == 0 and out != "":
+                arg_scratch_root = scratch_root
+            else:
+                arg_scratch_root = os.getcwd() + '/' + arg_scratch_root
+                log.warning(
+                    "Env variable $SCRATCH_ROOT=\"%s\" is not accessible.\n" +
+                    "Using \"%s\" instead.", scratch_root, arg_scratch_root)
+    try:
+        os.system("mkdir -p " + arg_scratch_root)
+    except:
+        log.fatal(
+            "Invalid --scratch-root=\"%s\" switch - failed to create directory!",
+            arg_scratch_root)
+        sys.exit(1)
+    return (arg_scratch_root)
+
+
+# Set and return the current GitHub branch name, unless set on the command line.
+# It runs "git branch --show-current". If the command fails, it throws a warning
+# and sets the branch name to "default"
+def resolve_branch(arg_branch):
+    if arg_branch is None or arg_branch == "":
+        result = subprocess.run(["git", "branch", "--show-current"],
+                                stdout=subprocess.PIPE)
+        arg_branch = result.stdout.decode("utf-8").strip()
+        if arg_branch == "":
+            log.warning(
+                "Failed to find current git branch. Setting it to \"master\"")
+            arg_branch = "default"
+    return (arg_branch)
+
+
+# Get the project root directory path - this is used to construct the full paths
+def get_proj_root():
+    return os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/..")
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description=__doc__,
+        formatter_class=argparse.RawDescriptionHelpFormatter)
+
+    parser.add_argument(
+        "-f",
+        "--flow",
+        default="sim",
+        metavar="lint|elab|synth|formal|sim",
+        help="which simulation flow' currently only dv flow supported")
+
+    parser.add_argument(
+        "--cfg",
+        default="cfg.hjson",
+        metavar="<file>",
+        help="""simulation configuration file; if this is not supplied, it will
+                              attempt to look for \"<flow>_cfg.hjson\" file in PWD"""
+    )
+
+    parser.add_argument(
+        "items",
+        nargs="*",
+        default=["sanity"],
+        metavar="""regr1, regr2, regr3|test1, test2, test3, ...""",
+        help="""Indicate which regression or tests to run""")
+
+    parser.add_argument(
+        "--list",
+        nargs="+",
+        default=[],
+        metavar="build_modes|run_modes|tests|regressions",
+        help=
+        """Provides a list of modes / tests / regressions available for use."""
+    )
+
+    parser.add_argument("-s",
+                        "--simulator",
+                        default="vcs",
+                        metavar="vcs|xcelium",
+                        help="simulator to use; the default is vcs")
+
+    parser.add_argument(
+        "-sr",
+        "--scratch-root",
+        default="scratch",
+        metavar="path",
+        help="""root scratch directory path where all build / run drectories go;
+                      by default, the tool will create the {scratch_path} = {scratch_root}/{dut}
+                      directory if it doesn't already exist; under {scratch_path}, there will be
+                      {compile_set} set of directories where all the build outputs go and
+                      {test_name} set of directories where the test outputs go"""
+    )
+
+    parser.add_argument(
+        "-br",
+        "--branch",
+        metavar="<github-branch>",
+        help="""GitHub branch name. This is used to construct the build and run
+                              directories""")
+
+    parser.add_argument(
+        "-bo",
+        "--build-opts",
+        nargs="+",
+        default=[],
+        metavar="",
+        help="""pass additional build options over the command line;
+                              note that if there are multiple compile sets identified to be built,
+                              these options will be passed on to all of them"""
+    )
+
+    parser.add_argument(
+        "-bm",
+        "--build-modes",
+        nargs="+",
+        default=[],
+        metavar="",
+        help="""Set build modes on the command line for all tests run as a part
+                              of the regression.""")
+
+    parser.add_argument(
+        "-ro",
+        "--run-opts",
+        nargs="+",
+        default=[],
+        metavar="",
+        help="""pass additional run time options over the command line;
+                              these options will be passed on to all tests schedueld to be run"""
+    )
+
+    parser.add_argument(
+        "-rm",
+        "--run-modes",
+        nargs="+",
+        default=[],
+        metavar="",
+        help="""Set run modes on the command line for all tests run as a part
+                              of the regression.""")
+
+    parser.add_argument(
+        "-bu",
+        "--build-unique",
+        default=False,
+        action='store_true',
+        help=
+        """By default, under the {scratch} directory, there is a {compile_set}
+                              directory created where the build output goes; this can be
+                              uniquified by appending the current timestamp. This is suitable
+                              for the case when a test / regression already running and you want
+                              to run something else from a different terminal without affecting
+                              the previous one""")
+
+    parser.add_argument(
+        "--build-only",
+        default=False,
+        action='store_true',
+        help="Only build the simulation executables for the givem items.")
+
+    parser.add_argument(
+        "--run-only",
+        default=False,
+        action='store_true',
+        help="Assume sim exec is available and proceed to run step")
+
+    parser.add_argument(
+        "--seeds",
+        nargs="+",
+        default=[],
+        metavar="seed0 seed1 ...",
+        help=
+        """Run tests with a specific seeds; if not specified, tool will use a
+                              randomly generated seed""")
+
+    parser.add_argument(
+        "--reseed",
+        type=int,
+        default=-1,
+        metavar="N",
+        help="""repeat tests with N iterations with different seeds""")
+
+    parser.add_argument("-rx",
+                        "--reseed-multiplier",
+                        type=int,
+                        default=1,
+                        metavar="N",
+                        help="""Multiplier for existing reseed values.""")
+
+    parser.add_argument("-w",
+                        "--waves",
+                        default=False,
+                        action='store_true',
+                        help="Enable dumping of waves")
+
+    parser.add_argument("-d",
+                        "--dump",
+                        default="fsdb",
+                        metavar="fsdb|shm",
+                        help="Dump waves in fsdb or shm.")
+
+    parser.add_argument("-mw",
+                        "--max-waves",
+                        type=int,
+                        default=5,
+                        metavar="N",
+                        help="""enable dumpling of waves for at most N tests;
+                              this includes tests scheduled for run AND automatic rerun"""
+                        )
+
+    parser.add_argument("-c",
+                        "--cov",
+                        default=False,
+                        action='store_true',
+                        help="turn on coverage collection")
+
+    parser.add_argument("-p",
+                        "--profile",
+                        default="none",
+                        metavar="time|mem",
+                        help="turn on simulation profiling")
+
+    parser.add_argument(
+        "--job-prefix",
+        default="",
+        metavar="job-prefix",
+        help="Job prefix before deploying the simulator commands.")
+
+    parser.add_argument("--purge",
+                        default=False,
+                        action='store_true',
+                        help="Clean the scratch directory.")
+
+    parser.add_argument(
+        "-mo",
+        "--max-odirs",
+        type=int,
+        default=5,
+        metavar="N",
+        help="""When tests are run, the older runs are backed up. This switch
+                              limits the number of backup directories being maintained."""
+    )
+
+    parser.add_argument(
+        "--no-rerun",
+        default=False,
+        action='store_true',
+        help=
+        """by default, failing tests will be automatically be rerun with waves;
+                              this option will prevent the rerun from being triggered"""
+    )
+
+    parser.add_argument("--skip-ral",
+                        default=False,
+                        action='store_true',
+                        help="""Skip the ral generation step.""")
+
+    parser.add_argument("-v",
+                        "--verbosity",
+                        default="l",
+                        metavar="n|l|m|h|d",
+                        help="""set verbosity to none/low/medium/high/debug;
+                              This will override any setting added to any of the hjson files
+                              used for config""")
+
+    parser.add_argument("--email",
+                        nargs="+",
+                        default=[],
+                        metavar="",
+                        help="""email the report to specified addresses""")
+
+    parser.add_argument(
+        "--verbose",
+        nargs="?",
+        default=None,
+        const="default",
+        metavar="debug",
+        help="""Print verbose dvsim tool messages. If 'debug' is passed, then the
+                              volume of messages is ven higher.""")
+
+    parser.add_argument("--version",
+                        default=False,
+                        action='store_true',
+                        help="Print version and exit")
+
+    parser.add_argument(
+        "-n",
+        "--dry-run",
+        default=False,
+        action='store_true',
+        help=
+        "Print dvsim tool messages only, without actually running any command")
+
+    parser.add_argument(
+        "-pi",
+        "--print-interval",
+        type=int,
+        default=10,
+        metavar="N",
+        help="""Interval in seconds. Print status every N seconds.""")
+
+    parser.add_argument(
+        "-mp",
+        "--max-parallel",
+        type=int,
+        default=32,
+        metavar="N",
+        help="""Run only upto a fixed number of builds/tests at a time.""")
+
+    args = parser.parse_args()
+
+    if args.version:
+        print(version)
+        sys.exit()
+
+    # Add log level 'VERBOSE' between INFO and DEBUG
+    log.addLevelName(utils.VERBOSE, 'VERBOSE')
+
+    log_format = '%(levelname)s: [%(module)s] %(message)s'
+    log_level = log.INFO
+    if args.verbose == "default": log_level = utils.VERBOSE
+    elif args.verbose == "debug": log_level = log.DEBUG
+    log.basicConfig(format=log_format, level=log_level)
+
+    if args.cfg == "cfg.hjson": args.cfg = args.flow + "_" + args.cfg
+    if not os.path.exists(args.cfg):
+        log.fatal("Simulation config file %s appears to be invalid.", args.cfg)
+        sys.exit(1)
+
+    args.scratch_root = resolve_scratch_root(args.scratch_root)
+    args.branch = resolve_branch(args.branch)
+    args.cfg = os.path.abspath(args.cfg)
+
+    cfg = SimCfg.SimCfg(proj_root=get_proj_root(), args=args)
+
+    # sim_cfg_list = dvsim_parser.run(args)
+    # dvsim_scheduler.dispatch(sim_cfg_list)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/util/dvsim/Deploy.py b/util/dvsim/Deploy.py
new file mode 100644
index 0000000..f6b4360
--- /dev/null
+++ b/util/dvsim/Deploy.py
@@ -0,0 +1,455 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""
+Classes
+"""
+
+import logging as log
+import pprint
+import re
+import shlex
+import sys
+import time
+
+import hjson
+
+from .utils import *
+
+
+class Deploy():
+    """
+    Abstraction for deploying builds and runs.
+    """
+
+    # Register the builds and runs  with the parent class
+    items = []
+
+    sim_cfg = None
+    links = {}
+
+    dispatch_counter = 0
+    print_interval = 5
+    max_parallel = 32
+    max_odirs = 5
+
+    def __self_str__(self):
+        if log.getLogger().isEnabledFor(VERBOSE):
+            return pprint.pformat(self.__dict__)
+        else:
+            ret = self.cmd
+            if self.sub != []: ret += "\nSub:\n" + str(self.sub)
+            return ret
+
+    def __str__(self):
+        return self.__self_str__()
+
+    def __repr__(self):
+        return self.__self_str__()
+
+    def __add_mandatory_attrs__(self):
+        # Common vars
+        self.cmd = ""
+        self.odir = ""
+        self.log = ""
+
+        # Flag to indicate whether to 'overwrite' if odir already exists,
+        # or to backup the existing one and create a new one.
+        # For builds, we want to overwrite existing to leverage the tools'
+        # incremental / partition compile features. For runs, we may want to
+        # create a new one.
+        self.renew_odir = False
+
+        # List of vars required to be exported to sub-shell
+        self.exports = {}
+
+        # Deploy sub commands
+        self.sub = []
+
+        # Process
+        self.process = None
+        self.log_fd = None
+        self.status = None
+
+        # These are command, outut directory and log file
+        self.mandatory_misc_attrs.update({
+            "name": False,
+            "build_mode": False,
+            "flow_makefile": False,
+            "exports": False,
+            "dry_run": False
+        })
+
+    def __init__(self, ddict):
+        if not hasattr(self, "target"):
+            log.error(
+                "Class %s does not have the mandatory attribute \"target\" defined",
+                self.__class__.__name__)
+            sys.exit(1)
+
+        ddict_keys = ddict.keys()
+        for key in self.mandatory_cmd_attrs.keys():
+            if self.mandatory_cmd_attrs[key] == False:
+                if key in ddict_keys:
+                    setattr(self, key, ddict[key])
+                    self.mandatory_cmd_attrs[key] = True
+
+        for key in self.mandatory_misc_attrs.keys():
+            if self.mandatory_misc_attrs[key] == False:
+                if key in ddict_keys:
+                    setattr(self, key, ddict[key])
+                    self.mandatory_misc_attrs[key] = True
+
+    def __post_init__(self):
+        # Ensure all mandatory attrs are set
+        for attr in self.mandatory_cmd_attrs.keys():
+            if self.mandatory_cmd_attrs[attr] is False:
+                log.error("Attribute \"%s\" not found for \"%s\".", attr,
+                          self.name)
+                sys.exit(1)
+
+        for attr in self.mandatory_misc_attrs.keys():
+            if self.mandatory_misc_attrs[attr] is False:
+                log.error("Attribute \"%s\" not found for \"%s\".", attr,
+                          self.name)
+                sys.exit(1)
+
+        # Recursively search and replace wildcards
+        self.__dict__ = find_and_substitute_wildcards(self.__dict__,
+                                                      self.__dict__)
+
+        # Set the command, output dir and log
+        self.odir = getattr(self, self.target + "_dir")
+        # Set the output dir link name to the basename of odir (by default)
+        self.odir_ln = os.path.basename(os.path.normpath(self.odir))
+        self.log = self.odir + "/" + self.target + ".log"
+
+        # If using LSF, redirect stdout and err to the log file
+        self.cmd = self.construct_cmd()
+
+    def construct_cmd(self):
+        cmd = "make -f " + self.flow_makefile + " " + self.target
+        if self.dry_run is True:
+            cmd += " -n"
+        for attr in self.mandatory_cmd_attrs.keys():
+            value = getattr(self, attr)
+            if type(value) is list:
+                pretty_value = []
+                for item in value:
+                    pretty_value.append(item.strip())
+                value = " ".join(pretty_value)
+            if type(value) is bool:
+                value = int(value)
+            if type(value) is str:
+                value = value.strip()
+            cmd += " " + attr + "=\"" + str(value) + "\""
+
+        # TODO: If not running locally, redirect stdout and err to the log file
+        # self.cmd += " > " + self.log + " 2>&1 &"
+        return cmd
+
+    def dispatch_cmd(self):
+        self.exports.update(os.environ)
+        args = shlex.split(self.cmd)
+        try:
+            self.odir_limiter()
+            os.system("mkdir -p " + self.odir)
+            os.system("ln -s " + self.odir + " " + Deploy.links['D'] + '/' +
+                      self.odir_ln)
+            f = open(self.log, "w")
+            self.process = subprocess.Popen(args,
+                                            stdout=f,
+                                            stderr=f,
+                                            env=self.exports)
+            self.log_fd = f
+            self.status = "."
+            Deploy.dispatch_counter += 1
+        except IOError:
+            log.error('IO Error: See %s', self.log)
+            if self.log_fd: self.log_fd.close()
+            self.status = "K"
+
+    # Function to backup previously run output directory to maintain a history of
+    # limited number of output directories. It deletes the output directory with the
+    # oldest timestamp, if the limit is reached.
+    def odir_limiter(self):
+        # Return if renew_odir flag is False - we'd be reusing the existing odir.
+        if not self.renew_odir: return
+        try:
+            # If output directory exists, back it up.
+            if os.path.exists(self.odir):
+                raw_ts = run_cmd("stat -c '%y' " + self.odir)
+                ts = run_cmd("date '" + Deploy.sim_cfg.ts_format + "' -d \"" +
+                             raw_ts + "\"")
+                os.system('mv ' + self.odir + " " + self.odir + "_" + ts)
+        except IOError:
+            log.error('Failed to back up existing output directory %s',
+                      self.odir)
+
+        # Delete older directories.
+        try:
+            pdir = os.path.realpath(self.odir + "/..")
+            if os.path.exists(pdir):
+                find_cmd = "find " + pdir + " -mindepth 1 -maxdepth 1 -type d "
+                num_dirs = int(run_cmd(find_cmd + " | wc -l"))
+                num_rm_dirs = num_dirs - Deploy.max_odirs
+                if num_rm_dirs > -1:
+                    dirs = run_cmd(find_cmd +
+                                   "-printf '%T+ %p\n' | sort | head -n " +
+                                   str(num_rm_dirs + 1) +
+                                   " | awk '{print $2}'")
+                    dirs = dirs.replace('\n', ' ')
+                    os.system("/usr/bin/rm -rf " + dirs)
+        except IOError:
+            log.error("Failed to delete old run directories!")
+
+    def set_status(self):
+        self.status = 'P'
+        if self.dry_run is False:
+            for pass_pattern in self.pass_patterns:
+                grep_cmd = "grep -c -m 1 -E \'" + pass_pattern + "\' " + self.log
+                (status, rslt) = subprocess.getstatusoutput(grep_cmd)
+                if rslt == "0":
+                    log.log(VERBOSE, "No pass patterns found: %s", self.name)
+                    self.status = 'F'
+
+            for fail_pattern in self.fail_patterns:
+                grep_cmd = "grep -c -m 1 -E \'" + fail_pattern + "\' " + self.log
+                (status, rslt) = subprocess.getstatusoutput(grep_cmd)
+                if rslt != "0":
+                    log.log(VERBOSE, "Fail patterns found: %s", self.name)
+                    self.status = 'F'
+
+    # Recursively set sub-item's status if parent item fails
+    def set_sub_status(self, status):
+        if self.sub == []: return
+        for sub_item in self.sub:
+            sub_item.status = status
+            sub_item.set_sub_status(status)
+
+    def link_odir(self):
+        if self.status == '.':
+            log.error("Method unexpectedly called!")
+        else:
+            cmd = "mv " + Deploy.links['D'] + "/" + self.odir_ln + " " + \
+                  Deploy.links[self.status] + "/."
+            os.system(cmd)
+
+    def get_status(self):
+        if self.status != ".": return
+        if self.process.poll() is not None:
+            self.log_fd.close()
+            if self.process.returncode != 0:
+                # TODO: read the log to diagnose the failure
+                self.status = "F"
+            else:
+                self.set_status()
+
+            log.log(VERBOSE, "Item %s has completed execution: %s", self.name,
+                    self.status)
+            Deploy.dispatch_counter -= 1
+            self.link_odir()
+
+    @staticmethod
+    def initialize(sim_cfg):
+        Deploy.sim_cfg = sim_cfg
+        Deploy.links['D'] = sim_cfg.scratch_path + "/" + "dispatched"
+        Deploy.links['P'] = sim_cfg.scratch_path + "/" + "passed"
+        Deploy.links['F'] = sim_cfg.scratch_path + "/" + "failed"
+        Deploy.links['K'] = sim_cfg.scratch_path + "/" + "killed"
+        for link in Deploy.links.keys():
+            try:
+                os.system("/bin/rm -rf " + Deploy.links[link])
+                os.system("mkdir -p " + Deploy.links[link])
+            except:
+                log.error("Unable to create dir %s", Deploy.links[link])
+                sys.exit(1)
+
+        if hasattr(sim_cfg, 'print_interval'):
+            Deploy.print_interval = sim_cfg.print_interval
+
+        if hasattr(sim_cfg, 'max_parallel'):
+            Deploy.max_parallel = sim_cfg.max_parallel
+
+        if hasattr(sim_cfg, 'max_odirs'):
+            Deploy.max_odirs = sim_cfg.max_odirs
+
+    @staticmethod
+    def deploy(items):
+        def dispatch_items(items):
+            item_names = {}
+            for item in items:
+                if item.target not in item_names.keys():
+                    item_names[item.target] = "["
+                if item.status is None:
+                    item_names[item.target] += "  "
+                    if log.getLogger().isEnabledFor(VERBOSE):
+                        item_names[
+                            item.target] += item.name + ":" + item.log + ",\n"
+                    else:
+                        item_names[item.target] += item.odir_ln + ", "
+                    item.dispatch_cmd()
+                    Deploy.items.append(item)
+
+            for target in item_names.keys():
+                if item_names[target] != "[":
+                    item_names[target] = " [" + item_names[target][3:]
+                    item_names[target] = item_names[target][:-2] + "]"
+                    log.info("[dvsim]: %s:\n%s", target, item_names[target])
+
+        # Dispatch the given items
+        dispatch_items_queue = []
+        if len(items) > Deploy.max_parallel:
+            dispatch_items(items[0:Deploy.max_parallel - 1])
+            dispatch_items_queue = items[Deploy.max_parallel:]
+        else:
+            dispatch_items(items)
+
+        all_done = 0
+        num_secs = 0
+        status = {}
+        status_str = {}
+        targets_done = {}
+
+        while all_done == 0:
+            time.sleep(1)
+            num_secs += 1
+            trig_print = False
+            for item in Deploy.items:
+                if item.target not in status.keys():
+                    status[item.target] = {}
+                if item.target not in targets_done.keys():
+                    targets_done[item.target] = False
+                if item not in status[item.target].keys():
+                    status[item.target][item] = ""
+
+                item.get_status()
+                if item.status != status[
+                        item.target][item] and item.status != ".":
+                    trig_print = True
+                    if item.status != "P":
+                        # Kill sub items
+                        item.set_sub_status("K")
+                    dispatch_items_queue.extend(item.sub)
+                status[item.target][item] = item.status
+
+            # Dispatch more from the queue
+            if len(dispatch_items_queue) == 0:
+                all_done = 1
+            else:
+                num_slots = Deploy.max_parallel - Deploy.dispatch_counter
+                if len(dispatch_items_queue) > num_slots:
+                    dispatch_items(dispatch_items_queue[0:num_slots])
+                    dispatch_items_queue = dispatch_items_queue[num_slots:]
+                else:
+                    dispatch_items(dispatch_items_queue)
+                    dispatch_items_queue = []
+
+            status_str = {}
+            for target in status.keys():
+                if target not in status_str.keys(): status_str[target] = "["
+                for item in status[target].keys():
+                    if status[target][item] is not None:
+                        status_str[target] += status[target][item]
+                        if status[target][item] == ".":
+                            all_done = 0
+                status_str[target] += "]"
+
+            # Print the status string periodically
+            if trig_print or (num_secs % Deploy.print_interval) == 0:
+                for target in status_str.keys():
+                    if targets_done[target] is True: continue
+                    log.info("[dvsim]: [%06ds] [%s]: %s", num_secs, target,
+                             status_str[target])
+                    if status_str[target].find(".") == -1:
+                        targets_done[target] = True
+
+
+class CompileSim(Deploy):
+    """
+    Abstraction for building the simulation executable.
+    """
+
+    # Register all builds with the class
+    items = []
+
+    def __init__(self, build_mode, sim_cfg):
+        self.target = "build"
+        self.pass_patterns = []
+        self.fail_patterns = []
+
+        self.mandatory_cmd_attrs = {  # RAL gen
+            "skip_ral": False,
+            "gen_ral_pkg_cmd": False,
+            "gen_ral_pkg_dir": False,
+            "gen_ral_pkg_opts": False,
+
+            # Flist gen
+            "sv_flist_gen_cmd": False,
+            "sv_flist_gen_dir": False,
+            "sv_flist_gen_opts": False,
+
+            # Build
+            "build_dir": False,
+            "build_cmd": False,
+            "build_opts": False
+        }
+
+        self.mandatory_misc_attrs = {}
+
+        # Initialize
+        super().__add_mandatory_attrs__()
+        super().__init__(build_mode.__dict__)
+        super().__init__(sim_cfg.__dict__)
+        self.build_mode = self.name
+        self.__post_init__()
+        CompileSim.items.append(self)
+
+
+class RunTest(Deploy):
+    """
+    Abstraction for running tests. This is one per seed for each test.
+    """
+
+    # Register all runs with the class
+    items = []
+
+    def __init__(self, index, test, sim_cfg):
+        self.target = "run"
+        self.pass_patterns = []
+        self.fail_patterns = []
+
+        self.mandatory_cmd_attrs = {
+            "uvm_test": False,
+            "uvm_test_seq": False,
+            "run_opts": False,
+            "sw_dir": False,
+            "sw_name": False,
+            "run_dir": False,
+            "run_cmd": False,
+            "run_opts": False
+        }
+
+        self.mandatory_misc_attrs = {
+            "run_dir_name": False,
+            "pass_patterns": False,
+            "fail_patterns": False
+        }
+
+        self.index = index
+        self.seed = sim_cfg.get_seed()
+
+        # Initialize
+        super().__add_mandatory_attrs__()
+        super().__init__(test.__dict__)
+        super().__init__(sim_cfg.__dict__)
+        self.test = self.name
+        self.renew_odir = True
+        self.build_mode = test.build_mode.name
+        self.scratch_path = sim_cfg.scratch_path
+        self.__post_init__()
+        # Construct custom odir link name for RunTest items by combining name
+        # and index
+        self.odir_ln = self.run_dir_name
+        RunTest.items.append(self)
diff --git a/util/dvsim/Modes.py b/util/dvsim/Modes.py
new file mode 100644
index 0000000..c7096d2
--- /dev/null
+++ b/util/dvsim/Modes.py
@@ -0,0 +1,552 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""
+Classes
+"""
+
+import logging as log
+import pprint
+import re
+import sys
+
+import hjson
+
+from .utils import *
+
+
+class Modes():
+    """
+    Abstraction for specifying collection of options called as 'modes'. This is
+    the base class which is extended for run_modes, build_modes, tests and regressions.
+    """
+    def self_str(self):
+        '''
+        This is used to construct the string representation of the entire class object.
+        '''
+        tname = ""
+        if self.type != "": tname = self.type + "_"
+        if self.mname != "": tname += self.mname
+        if log.getLogger().isEnabledFor(VERBOSE):
+            return "\n<---" + tname + ":\n" + pprint.pformat(self.__dict__) + \
+                   "\n--->\n"
+        else:
+            return tname + ":" + self.name
+
+    def __str__(self):
+        return self.self_str()
+
+    def __repr__(self):
+        return self.self_str()
+
+    def __init__(self, mdict):
+        keys = mdict.keys()
+        attrs = self.__dict__.keys()
+
+        if not 'name' in keys:
+            log.error("Key \"name\" missing in mode %s", mdict)
+            sys.exit(1)
+
+        if not hasattr(self, "type"):
+            log.fatal("Key \"type\" is missing or invalid")
+            sys.exit(1)
+
+        if not hasattr(self, "mname"): self.mname = ""
+
+        for key in keys:
+            if key not in attrs:
+                log.error("Key %s in %s is invalid", key, mdict)
+                sys.exit(1)
+            setattr(self, key, mdict[key])
+
+    def get_sub_modes(self):
+        sub_modes = []
+        if hasattr(self, "en_" + self.type + "_modes"):
+            sub_modes = getattr(self, "en_" + self.type + "_modes")
+        return sub_modes
+
+    def set_sub_modes(self, sub_modes):
+        setattr(self, "en_" + self.type + "_modes", sub_modes)
+
+    def merge_mode(self, mode):
+        '''
+        Merge a new mode with self.
+        Merge sub mode specified with 'en_*_modes with self.
+        '''
+
+        sub_modes = self.get_sub_modes()
+        is_sub_mode = mode.name in sub_modes
+
+        if not mode.name == self.name and not is_sub_mode:
+            return False
+
+        # only merge the lists; if strs are different, then throw an error
+        attrs = self.__dict__.keys()
+        for attr in attrs:
+            # merge lists together
+            self_attr_val = getattr(self, attr)
+            mode_attr_val = getattr(mode, attr)
+
+            if type(self_attr_val) is list:
+                self_attr_val.extend(mode_attr_val)
+                setattr(self, attr, self_attr_val)
+
+            elif not is_sub_mode or attr not in ["name", "mname"]:
+                self.check_conflict(mode.name, attr, mode_attr_val)
+
+        # Check newly appended sub_modes, remove 'self' and duplicates
+        sub_modes = self.get_sub_modes()
+
+        if sub_modes != []:
+            new_sub_modes = []
+            for sub_mode in sub_modes:
+                if not self.name == sub_mode and not sub_mode in new_sub_modes:
+                    new_sub_modes.append(sub_mode)
+            self.set_sub_modes(new_sub_modes)
+        return True
+
+    def check_conflict(self, name, attr, mode_attr_val):
+        self_attr_val = getattr(self, attr)
+        if self_attr_val == mode_attr_val: return
+
+        default_val = None
+        if type(self_attr_val) is int:
+            default_val = -1
+        elif type(self_attr_val) is str:
+            default_val = ""
+
+        if self_attr_val != default_val and mode_attr_val != default_val:
+            log.error(
+                "mode %s cannot be merged into %s due to conflicting %s {%s, %s}",
+                name, self.name, attr, str(self_attr_val), str(mode_attr_val))
+            sys.exit(1)
+        elif self_attr_val == default_val:
+            self_attr_val = mode_attr_val
+            setattr(self, attr, self_attr_val)
+
+    @staticmethod
+    def create_modes(ModeType, mdicts):
+        '''
+        Create modes of type ModeType from a given list of raw dicts
+        Process dependencies.
+        Return a list of modes objects.
+        '''
+        def merge_sub_modes(mode, parent, objs):
+            # Check if there are modes available to merge
+            sub_modes = mode.get_sub_modes()
+            if sub_modes == []: return
+
+            # Set parent if it is None. If not, check cyclic dependency
+            if parent is None:
+                parent = mode
+            else:
+                if mode.name == parent.name:
+                    log.error("Cyclic dependency when processing mode \"%s\"",
+                              mode.name)
+                    sys.exit(1)
+
+            for sub_mode in sub_modes:
+                # Find the sub_mode obj from str
+                found = False
+                for obj in objs:
+                    if sub_mode == obj.name:
+                        # First recursively merge the sub_modes
+                        merge_sub_modes(obj, parent, objs)
+
+                        # Now merge the sub mode with mode
+                        mode.merge_mode(obj)
+                        found = True
+                        break
+                if not found:
+                    log.error(
+                        "Sub mode \"%s\" added to mode \"%s\" was not found!",
+                        sub_mode, mode.name)
+                    sys.exit(1)
+
+        modes_objs = []
+        # create a default mode if available
+        default_mode = ModeType.get_default_mode()
+        if default_mode is not None: modes_objs.append(default_mode)
+
+        # Process list of raw dicts that represent the modes
+        # Pass 1: Create unique set of modes by merging modes with the same name
+        for mdict in mdicts:
+            # Create a new item
+            new_mode_merged = False
+            new_mode = ModeType(mdict)
+            for mode in modes_objs:
+                # Merge new one with existing if available
+                if mode.name == new_mode.name:
+                    mode.merge_mode(new_mode)
+                    new_mode_merged = True
+                    break
+
+            # Add the new mode to the list if not already appended
+            if not new_mode_merged:
+                modes_objs.append(new_mode)
+                ModeType.item_names.append(new_mode.name)
+
+        # Pass 2: Recursively expand sub modes within parent modes
+        for mode in modes_objs:
+            merge_sub_modes(mode, None, modes_objs)
+
+        # Return the list of objects
+        return modes_objs
+
+    @staticmethod
+    def get_default_mode(ModeType):
+        return None
+
+    @staticmethod
+    def find_mode(mode_name, modes):
+        '''
+        Given a mode_name in string, go through list of modes and return the mode with
+        the string that matches. Thrown an error and return None if nothing was found.
+        '''
+        found = False
+        for mode in modes:
+            if mode_name == mode.name:
+                return mode
+        return None
+
+    @staticmethod
+    def find_and_merge_modes(mode, mode_names, modes, merge_modes=True):
+        '''
+        '''
+        found_mode_objs = []
+        for mode_name in mode_names:
+            sub_mode = Modes.find_mode(mode_name, modes)
+            if sub_mode is not None:
+                found_mode_objs.append(sub_mode)
+                if merge_modes is True: mode.merge_mode(sub_mode)
+            else:
+                log.error("Mode \"%s\" enabled within mode \"%s\" not found!",
+                          mode_name, mode.name)
+                sys.exit(1)
+        return found_mode_objs
+
+
+class BuildModes(Modes):
+    """
+    Build modes.
+    """
+
+    # Maintain a list of build_modes str
+    item_names = []
+
+    def __init__(self, bdict):
+        self.name = ""
+        self.type = "build"
+        if not hasattr(self, "mname"): self.mname = "mode"
+        self.is_sim_mode = 0
+        self.build_opts = []
+        self.run_opts = []
+        self.en_build_modes = []
+
+        super().__init__(bdict)
+        self.en_build_modes = list(set(self.en_build_modes))
+
+    @staticmethod
+    def get_default_mode():
+        return BuildModes({"name": "default"})
+
+
+class RunModes(Modes):
+    """
+    Run modes.
+    """
+
+    # Maintain a list of run_modes str
+    item_names = []
+
+    def __init__(self, rdict):
+        self.name = ""
+        self.type = "run"
+        if not hasattr(self, "mname"): self.mname = "mode"
+        self.reseed = -1
+        self.run_opts = []
+        self.uvm_test = ""
+        self.uvm_test_seq = ""
+        self.build_mode = ""
+        self.en_run_modes = []
+        self.sw_dir = ""
+        self.sw_name = ""
+
+        super().__init__(rdict)
+        self.en_run_modes = list(set(self.en_run_modes))
+
+    @staticmethod
+    def get_default_mode():
+        return None
+
+
+class Tests(RunModes):
+    """
+    Abstraction for tests. The RunModes abstraction can be reused here with a few
+    modifications.
+    """
+
+    # Maintain a list of tests str
+    item_names = []
+
+    # TODO: This info should be passed via hjson
+    defaults = {
+        "reseed": -1,
+        "uvm_test": "",
+        "uvm_test_seq": "",
+        "build_mode": ""
+    }
+
+    def __init__(self, tdict):
+        if not hasattr(self, "mname"): self.mname = "test"
+        super().__init__(tdict)
+
+    @staticmethod
+    def create_tests(tdicts, sim_cfg):
+        '''
+        Create Tests from a given list of raw dicts.
+        TODO: enhance the raw dict to include file scoped defaults.
+        Process enabled run modes and the set build mode.
+        Return a list of test objects.
+        '''
+        def get_pruned_en_run_modes(test_en_run_modes, global_en_run_modes):
+            pruned_en_run_modes = []
+            for test_en_run_mode in test_en_run_modes:
+                if test_en_run_mode not in global_en_run_modes:
+                    pruned_en_run_modes.append(test_en_run_mode)
+            return pruned_en_run_modes
+
+        tests_objs = []
+        # Pass 1: Create unique set of tests by merging tests with the same name
+        for tdict in tdicts:
+            # Create a new item
+            new_test_merged = False
+            new_test = Tests(tdict)
+            for test in tests_objs:
+                # Merge new one with existing if available
+                if test.name == new_test.name:
+                    test.merge_mode(new_test)
+                    new_test_merged = True
+                    break
+
+            # Add the new test to the list if not already appended
+            if not new_test_merged:
+                tests_objs.append(new_test)
+                Tests.item_names.append(new_test.name)
+
+        # Pass 2: Process dependencies
+        build_modes = []
+        if hasattr(sim_cfg, "build_modes"):
+            build_modes = getattr(sim_cfg, "build_modes")
+
+        run_modes = []
+        if hasattr(sim_cfg, "run_modes"):
+            run_modes = getattr(sim_cfg, "run_modes")
+
+        attrs = Tests.defaults
+        for test_obj in tests_objs:
+            # Unpack run_modes first
+            en_run_modes = get_pruned_en_run_modes(test_obj.en_run_modes,
+                                                   sim_cfg.en_run_modes)
+            Modes.find_and_merge_modes(test_obj, en_run_modes, run_modes)
+
+            # Find and set the missing attributes from sim_cfg
+            # If not found in sim_cfg either, then throw a warning
+            # TODO: These should be file-scoped
+            for attr in attrs.keys():
+                # Check if attr value is default
+                val = getattr(test_obj, attr)
+                default_val = attrs[attr]
+                if val == default_val:
+                    global_val = None
+                    # Check if we can find a default in sim_cfg
+                    if hasattr(sim_cfg, attr):
+                        global_val = getattr(sim_cfg, attr)
+
+                    if global_val is not None and global_val != default_val:
+                        setattr(test_obj, attr, global_val)
+
+                    else:
+                        # TODO: should we even enforce this?
+                        log.error(
+                            "Required value \"%s\" for the test \"%s\" is missing",
+                            attr, test_obj.name)
+                        sys, exit(1)
+
+            # Unpack the build mode for this test
+            build_mode_objs = Modes.find_and_merge_modes(test_obj,
+                                                         [test_obj.build_mode],
+                                                         build_modes,
+                                                         merge_modes=False)
+            test_obj.build_mode = build_mode_objs[0]
+
+            # Error if set build mode is actually a sim mode
+            if test_obj.build_mode.is_sim_mode is True:
+                log.error(
+                    "Test \"%s\" uses build_mode %s which is actually a sim mode",
+                    test_obj.name, test_obj.build_mode.name)
+                sys.exit(1)
+
+            # Merge build_mode's run_opts with self
+            test_obj.run_opts.extend(test_obj.build_mode.run_opts)
+
+        # Return the list of tests
+        return tests_objs
+
+    @staticmethod
+    def merge_global_opts(tests, global_build_opts, global_run_opts):
+        processed_build_modes = []
+        for test in tests:
+            if test.build_mode.name not in processed_build_modes:
+                test.build_mode.build_opts.extend(global_build_opts)
+                processed_build_modes.append(test.build_mode.name)
+            test.run_opts.extend(global_run_opts)
+
+
+class Regressions(Modes):
+    """
+    Abstraction for test sets / regression sets.
+    """
+
+    # Maintain a list of tests str
+    item_names = []
+
+    # TODO: define __repr__ and __str__ to print list of tests if VERBOSE
+
+    def __init__(self, regdict):
+        self.name = ""
+        self.type = ""
+        if not hasattr(self, "mname"): self.mname = "regression"
+        self.tests = []
+        self.reseed = -1
+        self.test_names = []
+        self.excl_tests = []  # TODO: add support for this
+        self.en_sim_modes = []
+        self.en_run_modes = []
+        self.build_opts = []
+        self.run_opts = []
+        super().__init__(regdict)
+
+    @staticmethod
+    def create_regressions(regdicts, sim_cfg, tests):
+        '''
+        Create Test sets from a given list of raw dicts.
+        Return a list of test set objects.
+        '''
+
+        regressions_objs = []
+        # Pass 1: Create unique set of test sets by merging test sets with the same name
+        for regdict in regdicts:
+            # Create a new item
+            new_regression_merged = False
+            new_regression = Regressions(regdict)
+
+            # Check for name conflicts with tests before merging
+            if new_regression.name in Tests.item_names:
+                log.error("Test names and regression names are required to be unique. " + \
+                          "The regression \"%s\" bears the same name with an existing test. ",
+                          new_regression.name)
+                sys.exit(1)
+
+            for regression in regressions_objs:
+                # Merge new one with existing if available
+                if regression.name == new_regression.name:
+                    regression.merge_mode(new_regression)
+                    new_regression_merged = True
+                    break
+
+            # Add the new test to the list if not already appended
+            if not new_regression_merged:
+                regressions_objs.append(new_regression)
+                Regressions.item_names.append(new_regression.name)
+
+        # Pass 2: Process dependencies
+        build_modes = []
+        if hasattr(sim_cfg, "build_modes"):
+            build_modes = getattr(sim_cfg, "build_modes")
+
+        run_modes = []
+        if hasattr(sim_cfg, "run_modes"):
+            run_modes = getattr(sim_cfg, "run_modes")
+
+        for regression_obj in regressions_objs:
+            # Unpack the sim modes
+            found_sim_mode_objs = Modes.find_and_merge_modes(
+                regression_obj, regression_obj.en_sim_modes, build_modes,
+                False)
+
+            for sim_mode_obj in found_sim_mode_objs:
+                if sim_mode_obj.is_sim_mode == 0:
+                    log.error(
+                        "Enabled mode \"%s\" within the regression \"%s\" is not a sim mode",
+                        sim_mode_obj.name, regression_obj.name)
+                    sys.exit(1)
+
+                # Check if sim_mode_obj's sub-modes are a part of regressions's
+                # sim modes- if yes, then it will cause duplication of opts
+                # Throw an error and exit.
+                for sim_mode_obj_sub in sim_mode_obj.en_build_modes:
+                    if sim_mode_obj_sub in regression_obj.en_sim_modes:
+                        log.error("Regression \"%s\" enables sim_modes \"%s\" and \"%s\". " + \
+                                  "The former is already a sub_mode of the latter.",
+                                  regression_obj.name, sim_mode_obj_sub, sim_mode_obj.name)
+                        sys.exit(1)
+
+                # Check if sim_mode_obj is also passed on the command line, in
+                # which case, skip
+                if sim_mode_obj.name in sim_cfg.en_build_modes:
+                    continue
+
+                # Merge the build and run opts from the sim modes
+                regression_obj.build_opts.extend(sim_mode_obj.build_opts)
+                regression_obj.run_opts.extend(sim_mode_obj.run_opts)
+
+            # Unpack the run_modes
+            # TODO: If there are other params other than run_opts throw an error and exit
+            found_run_mode_objs = Modes.find_and_merge_modes(
+                regression_obj, regression_obj.en_run_modes, run_modes, False)
+
+            # Only merge the run_opts from the run_modes enabled
+            for run_mode_obj in found_run_mode_objs:
+                # Check if run_mode_obj is also passed on the command line, in
+                # which case, skip
+                if run_mode_obj.name in sim_cfg.en_run_modes:
+                    continue
+                self.run_opts.extend(run_mode_obj.run_opts)
+
+            # Unpack tests
+            if regression_obj.tests == []:
+                log.log(VERBOSE,
+                        "Unpacking all tests in scope for regression \"%s\"",
+                        regression_obj.name)
+                regression_obj.tests = sim_cfg.tests
+                regression_obj.test_names = Tests.item_names
+
+            else:
+                tests_objs = []
+                regression_obj.test_names = regression_obj.tests
+                for test in regression_obj.tests:
+                    test_obj = Modes.find_mode(test, sim_cfg.tests)
+                    if test_obj is None:
+                        log.error(
+                            "Test \"%s\" added to regression \"%s\" not found!",
+                            test, regression_obj.name)
+                        sys.exit(1)
+                    tests_objs.append(test_obj)
+                regression_obj.tests = tests_objs
+
+        # Return the list of tests
+        return regressions_objs
+
+    def merge_regression_opts(self):
+        processed_build_modes = []
+        for test in self.tests:
+            if test.build_mode.name not in processed_build_modes:
+                test.build_mode.build_opts.extend(self.build_opts)
+                processed_build_modes.append(test.build_mode.name)
+            test.run_opts.extend(self.run_opts)
+
+            # Override reseed if available.
+            if self.reseed != -1:
+                test.reseed = self.reseed
diff --git a/util/dvsim/SimCfg.py b/util/dvsim/SimCfg.py
new file mode 100644
index 0000000..cd460ef
--- /dev/null
+++ b/util/dvsim/SimCfg.py
@@ -0,0 +1,420 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""
+Class describing simulation configuration object
+"""
+
+import datetime
+import logging as log
+import pprint
+import random
+import re
+import sys
+
+import hjson
+
+from .Deploy import *
+from .Modes import *
+from .utils import *
+
+
+class SimCfg():
+    """Simulation configuration object
+
+    A simulation configuration class holds key information required for building a DV
+    regression framework.
+    """
+    def __str__(self):
+        return pprint.pformat(self.__dict__)
+
+    def __repr__(self):
+        return pprint.pformat(self.__dict__)
+
+    def __init__(self, proj_root, args):
+        # Options set from command line
+        self.flow = args.flow
+        self.cfg_files = []
+        self.cfg_files.append(args.cfg)
+        self.items = args.items
+        self.list_items = args.list
+        self.simulator = args.simulator
+        self.proj_root = proj_root
+        self.scratch_root = args.scratch_root
+        self.branch = args.branch
+        self.build_opts = args.build_opts
+        self.en_build_modes = args.build_modes
+        self.run_opts = args.run_opts
+        self.en_run_modes = args.run_modes
+        self.build_unique = args.build_unique
+        self.build_only = args.build_only
+        self.run_only = args.run_only
+        self.seeds = args.seeds
+        self.reseed_ovrd = args.reseed
+        self.reseed_multiplier = args.reseed_multiplier
+        self.waves = args.waves
+        self.dump = args.dump
+        self.max_waves = args.max_waves
+        self.cov = args.cov
+        self.profile = args.profile
+        self.max_odirs = args.max_odirs
+        self.no_rerun = args.no_rerun
+        self.verbosity = "{" + args.verbosity + "}"
+        self.email = args.email
+        self.verbose = args.verbose
+        self.dry_run = args.dry_run
+        self.skip_ral = args.skip_ral
+        self.job_prefix = args.job_prefix
+        self.print_interval = args.print_interval
+        self.max_parallel = args.max_parallel
+
+        # Set default sim modes for unpacking
+        if self.waves is True: self.en_build_modes.append("waves")
+        if self.cov is True: self.en_build_modes.append("cov")
+        if self.profile is not 'none': self.en_build_modes.append("profile")
+
+        # Options built from cfg_file files
+        self.project = ""
+        self.flow_makefile = ""
+        self.scratch_path = ""
+        self.build_dir = ""
+        self.run_dir = ""
+        self.sw_build_dir = ""
+        self.pass_patterns = []
+        self.fail_patterns = []
+        self.name = ""
+        self.dut = ""
+        self.tb = ""
+        self.testplan = ""
+        self.fusesoc_core = ""
+        self.ral_spec = ""
+        self.build_modes = []
+        self.run_modes = []
+        self.regressions = []
+
+        # Options from simulators - for building and running tests
+        self.build_cmd = ""
+        self.build_ex = ""
+        self.flist_gen_cmd = ""
+        self.flist_gen_opts = []
+        self.flist_file = ""
+        self.run_cmd = ""
+        self.dump_file = ""
+        self.exports = []
+
+        # Current timestamp
+        self.ts_format_long = "%A %B %d %Y %I:%M:%S%p %Z"
+        self.ts_format = '+%a.%m.%d.%y__%I.%M.%S%p'
+
+        curr_ts = datetime.datetime.now()
+        self.timestamp_long = curr_ts.strftime(self.ts_format_long)
+        self.timestamp = curr_ts.strftime(self.ts_format)
+
+        # Parse the cfg_file file tree
+        self.parse_sim_cfg(args.cfg)
+
+        # If build_unique is set, then add current timestamp to uniquify it
+        if self.build_unique:
+            self.build_dir += "_" + self.timestamp
+
+        # Make substitutions, while ignoring the following wildcards
+        # TODO: Find a way to set these in sim cfg instead
+        ignored_wildcards = [
+            "build_mode", "index", "test", "seed", "uvm_test", "uvm_test_seq"
+        ]
+        self.__dict__ = find_and_substitute_wildcards(self.__dict__,
+                                                      self.__dict__,
+                                                      ignored_wildcards)
+
+        # Check if there are items to run
+        if self.items == []:
+            log.error(
+                "No items provided for running this simulation / regression")
+            sys.exit(1)
+
+        # Use the default build mode for tests that do not specify it
+        if not hasattr(self, "build_mode"):
+            setattr(self, "build_mode", "default")
+
+        self.process_exports()
+
+        # Create objects from raw dicts - build_modes, sim_modes, run_modes,
+        # tests and regressions
+        self.create_objects()
+
+        # Once all objects are constructed, check if the --list option is passed
+        # Is yes, then simply print the items requested and exit.
+        self.print_list()
+
+        # Look at list of items and build the list of tests to run
+        self.deploy = []
+        self.build_list = []
+        self.run_list = []
+        self.create_build_and_run_list()
+
+        # Process reseed override
+        for test in self.run_list:
+            # Override reseed if available.
+            if self.reseed_ovrd != -1:
+                test.reseed = self.reseed_ovrd
+
+            # Apply reseed multiplier if set on the command line.
+            test.reseed *= self.reseed_multiplier
+
+        # Create deploy objects
+        self.create_deploy_objects()
+
+        # Deploy the builds and runs
+        Deploy.deploy(self.deploy)
+
+    def process_exports(self):
+        # Convert 'exports' to dict
+        exports_dict = {}
+        if self.exports != []:
+            for item in self.exports:
+                if type(item) is dict:
+                    exports_dict.update(item)
+                elif type(item) is str:
+                    [key, value] = item.split(':', 1)
+                    if type(key) is not str: key = str(key)
+                    if type(value) is not str: value = str(value)
+                    exports_dict.update({key.strip(): value.strip()})
+                else:
+                    log.error("Type error in \"exports\": %s", str(item))
+                    sys.exit(1)
+        self.exports = exports_dict
+
+    def parse_sim_cfg(self, sim_cfg_file):
+        try:
+            log.debug("Parsing %s", sim_cfg_file)
+            f = open(sim_cfg_file, 'rU')
+            text = f.read()
+            f.close()
+        except:
+            log.fatal("Failed to parse \"%s\"", sim_cfg_file)
+            sys.exit(1)
+        self.resolve_hjson_raw(hjson.loads(text, use_decimal=True))
+
+    def resolve_hjson_raw(self, hjson_dict):
+        attrs = self.__dict__.keys()
+        rm_hjson_dict_keys = []
+        import_cfgs = []
+        for key in hjson_dict.keys():
+            if key in attrs:
+                hjson_dict_val = hjson_dict[key]
+                self_val = getattr(self, key)
+                scalar_types = {str: [""], int: [0, -1], bool: [False]}
+
+                # Case 1: key value in class and hjson_dict differ - error!
+                if type(hjson_dict_val) != type(self_val):
+                    log.error("Coflicting key types: %s {%s, %s}", key,
+                              type(hjson_dict_val).__name__,
+                              type(self_val).__name__)
+                    sys.exit(1)
+
+                # Case 2: key value in class and hjson_dict are strs - set if
+                # not already set, else error!
+                elif type(hjson_dict_val) in scalar_types.keys():
+                    defaults = scalar_types[type(hjson_dict_val)]
+                    if self_val == hjson_dict_val:
+                        rm_hjson_dict_keys.append(key)
+                    elif self_val in defaults and not hjson_dict_val in defaults:
+                        setattr(self, key, hjson_dict_val)
+                        rm_hjson_dict_keys.append(key)
+                    elif not self_val in defaults and not hjson_dict_val in defaults:
+                        log.error(
+                            "Coflicting values {%s, %s} encountered for key %s",
+                            str(self_val), str(hjson_dict_val), key)
+                        sys.exit(1)
+
+                # Case 3: key value in class and hjson_dict are lists - merge'em
+                elif type(hjson_dict_val) is list and type(self_val) is list:
+                    self_val.extend(hjson_dict_val)
+                    setattr(self, key, self_val)
+                    rm_hjson_dict_keys.append(key)
+
+                # Case 4: unknown issue
+                else:
+                    log.error(
+                        "Type of \"%s\" (%s) in %s appears to be invalid (should be %s)",
+                        key,
+                        type(hjson_dict_val).__name__, hjson_dict,
+                        type(self_val).__name__)
+                    sys.exit(1)
+            # If key is 'import_cfgs' then add to the list of sim_cfgs to
+            # process
+            elif key == 'import_cfgs':
+                import_cfgs.extend(hjson_dict[key])
+                rm_hjson_dict_keys.append(key)
+
+            else:
+                # add key-value to class
+                setattr(self, key, hjson_dict[key])
+                rm_hjson_dict_keys.append(key)
+
+        # Parse imported sim_cfgs
+        for cfg_file in import_cfgs:
+            if not cfg_file in self.cfg_files:
+                self.cfg_files.append(cfg_file)
+                # Substitute wildcards in cfg_file files since we need to process
+                # them right away.
+                cfg_file = subst_wildcards(cfg_file, self.__dict__)
+                self.parse_sim_cfg(cfg_file)
+            else:
+                log.error("Sim cfg file %s has already been parsed", cfg_file)
+
+    def create_objects(self):
+        # Create build and run modes objects
+        build_modes = Modes.create_modes(BuildModes,
+                                         getattr(self, "build_modes"))
+        setattr(self, "build_modes", build_modes)
+
+        run_modes = Modes.create_modes(RunModes, getattr(self, "run_modes"))
+        setattr(self, "run_modes", run_modes)
+
+        # Walk through build modes enabled on the CLI and append the opts
+        for en_build_mode in self.en_build_modes:
+            build_mode_obj = Modes.find_mode(en_build_mode, build_modes)
+            if build_mode_obj is not None:
+                self.build_opts.extend(build_mode_obj.build_opts)
+                self.run_opts.extend(build_mode_obj.run_opts)
+            else:
+                log.error(
+                    "Mode \"%s\" enabled on the the command line is not defined",
+                    en_build_mode)
+                sys.exit(1)
+
+        # Walk through run modes enabled on the CLI and append the opts
+        for en_run_mode in self.en_run_modes:
+            run_mode_obj = Modes.find_mode(en_run_mode, run_modes)
+            if run_mode_obj is not None:
+                self.run_opts.extend(run_mode_obj.run_opts)
+            else:
+                log.error(
+                    "Mode \"%s\" enabled on the the command line is not defined",
+                    en_run_mode)
+                sys.exit(1)
+
+        # Create tests from given list of items
+        tests = Tests.create_tests(getattr(self, "tests"), self)
+        setattr(self, "tests", tests)
+
+        # Create regressions
+        regressions = Regressions.create_regressions(
+            getattr(self, "regressions"), self, tests)
+        setattr(self, "regressions", regressions)
+
+    def print_list(self):
+        if self.list_items != []:
+            for list_item in self.list_items:
+                if hasattr(self, list_item):
+                    items = getattr(self, list_item)
+                    for item in items:
+                        log.info(item)
+                else:
+                    log.error("Item %s does not exist!", list_item)
+                    sys.exit(1)
+            sys.exit(0)
+
+    def create_build_and_run_list(self):
+        # Walk through the list of items to run and create the build and run
+        # objects.
+        # Allow multiple regressions to run as long as the do not enable
+        # sim_modes or run_modes
+        def get_overlapping_tests(tests, run_list_names):
+            overlapping_tests = []
+            for test in tests:
+                if test.name in run_list_names:
+                    overlapping_tests.append(test)
+            return overlapping_tests
+
+        def prune_items(items, marked_items):
+            pruned_items = []
+            for item in items:
+                if item not in marked_items: pruned_items.append(item)
+            return pruned_items
+
+        items_list = self.items
+        run_list_names = []
+        marked_items = []
+        # Process regressions first
+        for regression in self.regressions:
+            if regression.name in items_list:
+                overlapping_tests = get_overlapping_tests(
+                    regression.tests, run_list_names)
+                if overlapping_tests != []:
+                    log.error("Regression \"%s\" added for run contains tests that overlap with " + \
+                              "other regressions added. This can result in conflicting " + \
+                              "build / run_opts to be set causing unexpected results.",
+                              regression.name)
+                    sys.exit(1)
+
+                self.run_list.extend(regression.tests)
+                # Merge regression's build and run opts with its tests and their
+                # build_modes
+                regression.merge_regression_opts()
+                run_list_names.extend(regression.test_names)
+                marked_items.append(regression.name)
+        items_list = prune_items(items_list, marked_items)
+
+        # Process individual tests
+        for test in self.tests:
+            if test.name in items_list:
+                overlapping_tests = get_overlapping_tests([test],
+                                                          run_list_names)
+                if overlapping_tests == []:
+                    self.run_list.append(test)
+                    run_list_names.append(test.name)
+                    marked_items.append(test.name)
+        items_list = prune_items(items_list, marked_items)
+
+        # Merge the global build and run opts
+        Tests.merge_global_opts(self.run_list, self.build_opts, self.run_opts)
+
+        # Check if all items has been processed
+        if items_list != []:
+            log.error("The items %s added for run were not found! Use the --list switch " + \
+                      "to see a list of available tests / regressions for run", items_list)
+            sys.exit(1)
+
+        # Create the build_list
+        build_list_names = []
+        for test in self.run_list:
+            if test.build_mode.name not in build_list_names:
+                self.build_list.append(test.build_mode)
+                build_list_names.append(test.build_mode.name)
+
+    def create_deploy_objects(self):
+        builds = []
+        build_map = {}
+        Deploy.initialize(self)
+        for build in self.build_list:
+            item = CompileSim(build, self)
+            builds.append(item)
+            build_map[build] = item
+
+        runs = []
+        for test in self.run_list:
+            for num in range(test.reseed):
+                item = RunTest(num, test, self)
+                if self.build_only is False:
+                    build_map[test.build_mode].sub.append(item)
+                runs.append(item)
+
+        if self.run_only is True:
+            self.deploy = runs
+        else:
+            self.deploy = builds
+
+    def get_seed(self):
+        if self.seeds == []:
+            try:
+                # Pre-populate 1000 seeds at a time
+                self.seeds = run_cmd(
+                    "od -vAn -N4000 -tu < /dev/random | xargs").split()
+                random.shuffle(self.seeds)
+            except:
+                log.error("Faild to generate a list of 1000 random seeds")
+                sys.exit(1)
+        return self.seeds.pop(-1)
diff --git a/util/dvsim/__init__.py b/util/dvsim/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/util/dvsim/__init__.py
diff --git a/util/dvsim/utils.py b/util/dvsim/utils.py
new file mode 100644
index 0000000..cc2c80f
--- /dev/null
+++ b/util/dvsim/utils.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""
+Utility functions common across dvsim.
+"""
+
+import logging as log
+import os
+import pprint
+import re
+import shlex
+import subprocess
+import sys
+import time
+from collections import OrderedDict
+
+import hjson
+
+# For verbose logging
+VERBOSE = 15
+
+
+# Run a command and get the result. Exit with error if the command did not
+# succeed. This is a simpler version of the run_cmd function below.
+def run_cmd(cmd):
+    (status, output) = subprocess.getstatusoutput(cmd)
+    if status:
+        sys.stderr.write("cmd " + cmd + " returned with status " + str(status))
+        sys.exit(status)
+    return output
+
+
+# Run a command with a specified timeout. If the command does not finish before
+# the timeout, then it returns -1. Else it returns the command output. If the
+# command fails, it throws an exception and returns the stderr.
+def run_cmd_with_timeout(cmd, timeout=-1, exit_on_failure=1):
+    args = shlex.split(cmd)
+    p = subprocess.Popen(args,
+                         stdout=subprocess.PIPE,
+                         stderr=subprocess.STDOUT)
+
+    # If timeout is set, poll for the process to finish until timeout
+    result = ""
+    status = -1
+    if timeout == -1:
+        p.wait()
+    else:
+        start = time.time()
+        while time.time() - start < timeout:
+            if p.poll() is not None:
+                break
+            time.sleep(0.2)
+
+    # Capture output and status if cmd exited, else kill it
+    if p.poll() is not None:
+        result = p.communicate()[0]
+        status = p.returncode
+    else:
+        log.error("cmd \"%s\" timed out!", cmd)
+        p.kill()
+
+    if status != 0:
+        log.error("cmd \"%s\" exited with status %d", cmd, status)
+        if exit_on_failure == 1: sys.exit(status)
+
+    return (result, status)
+
+
+def subst_wildcards(var, mdict, ignored_wildcards=[]):
+    '''
+    If var has wildcards specified within {..}, find and substitute them.
+    '''
+    def subst(wildcard, mdict):
+        if wildcard in mdict.keys(): return mdict[wildcard]
+        else: return None
+
+    if "{eval_cmd}" in var:
+        idx = var.find("{eval_cmd}") + 11
+        var = subst_wildcards(var[idx:], mdict, ignored_wildcards)
+        var = run_cmd(var)
+    else:
+        match = re.findall(r"{([A-Za-z0-9\_]+)}", var)
+        if len(match) > 0:
+            subst_list = {}
+            for item in match:
+                if item not in ignored_wildcards:
+                    log.debug("Found wildcard in \"%s\": \"%s\"", var, item)
+                    found = subst(item, mdict)
+                    if found is not None:
+                        if type(found) is str:
+                            found = subst_wildcards(found, mdict,
+                                                    ignored_wildcards)
+                        if type(found) is bool:
+                            found = int(found)
+                        subst_list[item] = found
+                    else:
+                        # Check if the wildcard exists as an environment variable
+                        env_var = os.environ.get(item)
+                        if env_var is not None: subst_list[item] = env_var
+                        else:
+                            log.error(
+                                "Substitution for the wildcard \"%s\" not found",
+                                item)
+                            sys.exit(1)
+            for item in subst_list:
+                var = var.replace("{" + item + "}", str(subst_list[item]))
+    return var
+
+
+def find_and_substitute_wildcards(sub_dict, full_dict, ignored_wildcards=[]):
+    '''
+    Recursively find key values containing wildcards in sub_dict in full_dict
+    and return resolved sub_dict.
+    '''
+    for key in sub_dict.keys():
+        if type(sub_dict[key]) in [dict, OrderedDict]:
+            # Recursively call this funciton in sub-dicts
+            sub_dict[key] = find_and_substitute_wildcards(
+                sub_dict[key], full_dict, ignored_wildcards)
+
+        elif type(sub_dict[key]) is list:
+            sub_dict_key_values = list(sub_dict[key])
+            # Loop through the list of key's values and substitute each var
+            # in case it contains a wildcard
+            for i in range(len(sub_dict_key_values)):
+                if type(sub_dict_key_values[i]) in [dict, OrderedDict]:
+                    # Recursively call this funciton in sub-dicts
+                    sub_dict_key_values[i] = \
+                        find_and_substitute_wildcards(sub_dict_key_values[i],
+                                                      full_dict, ignored_wildcards)
+
+                elif type(sub_dict_key_values[i]) is str:
+                    sub_dict_key_values[i] = subst_wildcards(
+                        sub_dict_key_values[i], full_dict, ignored_wildcards)
+
+            # Set the substituted key values back
+            sub_dict[key] = sub_dict_key_values
+
+        elif type(sub_dict[key]) is str:
+            sub_dict[key] = subst_wildcards(sub_dict[key], full_dict,
+                                            ignored_wildcards)
+
+    return sub_dict