[dvsim] Split Deploy into Deploy and Launcher

In this commit, the act of launching and communicating with the job
while it is still running is split out into a separate class called
'Launcher'. It does the job of maintaining the running environment
(creating dirs, launching the sub-process, providing APIs to communicate
with the running job) of a dispatched job.

The Deploy class is reduced to just maintaing a generic runnable job. It
creates the launcher as a child object, allowing Scheduler to access the
launcher's methods. The Deploy class and its extensions are also
significantly refactored (general cleanup) to further simplify how the
instance members are set.

The Launcher class is meant to be an abstract class providing high level
APIs that each variant needs to implement, in addition to proving
general housekeeping functions. The LocalLauncher class extends from it,
providing the launching mechanism locally within the user's workstation
(spawn child processes upto a given limit). This will be followed up by
LSFLauncher and GCPLauncher in subsequent commits / PRs, which can be
chosen based on a switch.

The reason for making this split is to cleanly add support for various
backend compute dispatch systems to which jobs can be launched, such as
LSF and GCP.

Signed-off-by: Srikrishna Iyer <sriyer@google.com>
diff --git a/util/dvsim/Deploy.py b/util/dvsim/Deploy.py
index 4ad769f..d6fc4d2 100644
--- a/util/dvsim/Deploy.py
+++ b/util/dvsim/Deploy.py
@@ -3,68 +3,39 @@
 # SPDX-License-Identifier: Apache-2.0
 
 import logging as log
-import os
 import pprint
 import random
-import re
-import shlex
-import shutil
-import subprocess
-import sys
-from datetime import datetime
-from pathlib import Path
 
+from LocalLauncher import LocalLauncher
 from sim_utils import get_cov_summary_table
 from tabulate import tabulate
-from utils import TS_FORMAT, VERBOSE, find_and_substitute_wildcards, rm_path
-
-
-class DeployError(Exception):
-    def __init__(self, msg):
-        self.msg = msg
+from utils import VERBOSE, find_and_substitute_wildcards, rm_path
 
 
 class Deploy():
     """
-    Abstraction for deploying builds and runs.
+    Abstraction to create and maintain a runnable job (builds, runs, etc.).
     """
 
-    # Misc common deploy settings.
-    max_odirs = 5
+    # Indicate the target for each sub-class.
+    target = None
 
     # List of variable names that are to be treated as "list of commands".
-    # This tells `construct_cmd` that these vars are lists that need to
+    # This tells '_construct_cmd' that these vars are lists that need to
     # be joined with '&&' instead of a space.
     cmds_list_vars = []
 
     def __str__(self):
         return (pprint.pformat(self.__dict__)
-                if log.getLogger().isEnabledFor(VERBOSE) else self.cmd)
+                if log.getLogger().isEnabledFor(VERBOSE) else self.full_name)
 
     def __init__(self, sim_cfg):
-        '''Initialize common class members.'''
+        assert self.target is not None
 
         # Cross ref the whole cfg object for ease.
         self.sim_cfg = sim_cfg
 
-        # Common vars
-        self.identifier = ""
-        self.cmd = ""
-        self.odir = ""
-        self.log = ""
-        self.fail_msg = ""
-
-        # Flag to indicate whether to 'overwrite' if odir already exists,
-        # or to backup the existing one and create a new one.
-        # For builds, we want to overwrite existing to leverage the tools'
-        # incremental / partition compile features. For runs, we may want to
-        # create a new one.
-        self.renew_odir = False
-
-        # List of vars required to be exported to sub-shell
-        self.exports = None
-
-        # A list of jobs on which this job depends
+        # A list of jobs on which this job depends.
         self.dependencies = []
 
         # Indicates whether running this job requires all dependencies to pass.
@@ -72,19 +43,45 @@
         # this current job to run
         self.needs_all_dependencies_passing = True
 
-        # Process
-        self.process = None
-        self.log_fd = None
+        # Declare attributes that need to be extracted from the HJSon cfg.
+        self._define_attrs()
 
-        # These are mandatory class attributes that need to be extracted and
-        # set from the sim_cfg object. These are explicitly used to construct
-        # the command for deployment.
+        # Set class instance attributes.
+        self._set_attrs()
+
+        # Check if all attributes that are needed are set.
+        self._check_attrs()
+
+        # Do variable substitutions.
+        self._subst_vars()
+
+        # List of vars required to be exported to sub-shell, as a dict.
+        self.exports = self._process_exports()
+
+        # Construct the job's command.
+        self.cmd = self._construct_cmd()
+
+        # Create the launcher object. Launcher retains the handle to self for
+        # lookup & callbacks.
+        self.launcher = LocalLauncher(self)
+
+    def _define_attrs(self):
+        """Defines the attributes this instance needs to have.
+
+        These attributes are extracted from the Mode object / HJson config with
+        which this instance is created. There are two types of attributes -
+        one contributes to the generation of the command directly; the other
+        provides supplementary information pertaining to the job, such as
+        patterns that determine whether it passed or failed. These are
+        represented as dicts, whose values indicate in boolean whether the
+        extraction was successful.
+        """
+        # These attributes are explicitly used to construct the job command.
         self.mandatory_cmd_attrs = {}
 
-        # These are mandatory class attributes that also need to be extracted
-        # and set from the sim_cfg object. Some of these contribute to the
-        # construction of the command. Others are used to determine pass / fail
-        # conditions.
+        # These attributes may indirectly contribute to the construction of the
+        # command (through substitution vars) or other things such as pass /
+        # fail patterns.
         self.mandatory_misc_attrs = {
             "name": False,
             "build_mode": False,
@@ -94,13 +91,13 @@
         }
 
     # Function to parse a dict and extract the mandatory cmd and misc attrs.
-    def parse_dict(self, ddict):
-        if not hasattr(self, "target"):
-            log.error(
-                "Class %s does not have the mandatory attribute \"target\" defined",
-                self.__class__.__name__)
-            sys.exit(1)
+    def _extract_attrs(self, ddict):
+        """Extracts the attributes from the supplied dict.
 
+        'ddict' is typically either the Mode object or the entire config
+        object's dict. It is used to retrieve the instance attributes defined
+        in 'mandatory_cmd_attrs' and 'mandatory_misc_attrs'.
+        """
         ddict_keys = ddict.keys()
         for key in self.mandatory_cmd_attrs.keys():
             if self.mandatory_cmd_attrs[key] is False:
@@ -114,48 +111,63 @@
                     setattr(self, key, ddict[key])
                     self.mandatory_misc_attrs[key] = True
 
-    def __post_init__(self):
-        # Ensure all mandatory attrs are set
+    def _set_attrs(self):
+        """Sets additional attributes.
+
+        Invokes '_extract_attrs()' to read in all the necessary instance
+        attributes. Based on those, some additional instance attributes may
+        be derived. Those are set by this method.
+        """
+        self._extract_attrs(self.sim_cfg.__dict__)
+
+        # Output directory where the artifacts go (used by the launcher).
+        self.odir = getattr(self, self.target + "_dir")
+
+        # Qualified name disambiguates the instance name with other instances
+        # of the same class (example: 'uart_smoke' reseeded multiple times
+        # needs to be disambiguated using the index -> '0.uart_smoke'.
+        self.qual_name = self.name
+
+        # Full name disambiguates across multiple cfg being run (example:
+        # 'aes:default', 'uart:default' builds.
+        self.full_name = self.sim_cfg.name + ":" + self.qual_name
+
+        # Pass and fail patterns.
+        self.pass_patterns = []
+        self.fail_patterns = []
+
+    def _check_attrs(self):
+        """Checks if all required class attributes are set.
+
+        Invoked in __init__() after all attributes are extracted and set.
+        """
         for attr in self.mandatory_cmd_attrs.keys():
             if self.mandatory_cmd_attrs[attr] is False:
-                log.error("Attribute \"%s\" not found for \"%s\".", attr,
-                          self.name)
-                sys.exit(1)
+                raise AttributeError("Attribute \"{!r}\" not found for "
+                                     "\"{!r}\".".format(attr, self.name))
 
         for attr in self.mandatory_misc_attrs.keys():
             if self.mandatory_misc_attrs[attr] is False:
-                log.error("Attribute \"%s\" not found for \"%s\".", attr,
-                          self.name)
-                sys.exit(1)
+                raise AttributeError("Attribute \"{!r}\" not found for "
+                                     "\"{!r}\".".format(attr, self.name))
 
-        # Recursively search and replace wildcards
-        # First pass: search within self dict. We ignore errors since some
-        # substitions may be available in the second pass.
+    def _subst_vars(self, ignored_subst_vars=[]):
+        """Recursively search and replace substitution variables.
+
+        First pass: search within self dict. We ignore errors since some
+        substitions may be available in the second pass. Second pass: search
+        the entire sim_cfg object."""
+
         self.__dict__ = find_and_substitute_wildcards(self.__dict__,
-                                                      self.__dict__, [], True)
-
-        # Second pass: search in sim_cfg dict, this time not ignoring errors.
+                                                      self.__dict__,
+                                                      ignored_subst_vars, True)
         self.__dict__ = find_and_substitute_wildcards(self.__dict__,
                                                       self.sim_cfg.__dict__,
-                                                      [], False)
-
-        # Set identifier.
-        self.identifier = self.sim_cfg.name + ":" + self.name
-
-        # Set the command, output dir and log
-        self.odir = getattr(self, self.target + "_dir")
-        # Set the output dir link name to the basename of odir (by default)
-        self.odir_ln = os.path.basename(os.path.normpath(self.odir))
-        self.log = self.odir + "/" + self.target + ".log"
-
-        # Make exports more easily mergeable with the current process' env.
-        self._process_exports()
-
-        # If using LSF, redirect stdout and err to the log file
-        self.cmd = self.construct_cmd()
+                                                      ignored_subst_vars,
+                                                      False)
 
     def _process_exports(self):
-        '''Convert 'exports' as a list of dicts in the HJson to a dict.
+        """Convert 'exports' as a list of dicts in the HJson to a dict.
 
         Exports is a list of key-value pairs that are to be exported to the
         subprocess' environment so that the tools can lookup those options.
@@ -163,22 +175,13 @@
         HJson member cannot be an object). This method converts a list of dicts
         into a dict variable, which makes it easy to merge the list of exports
         with the subprocess' env where the ASIC tool is invoked.
-        '''
-        exports_dict = {}
-        if self.exports:
-            try:
-                exports_dict = {
-                    k: str(v)
-                    for item in self.exports for k, v in item.items()
-                }
-            except ValueError as e:
-                log.error(
-                    "%s: exports: \'%s\' Exports key must be a list of dicts!",
-                    e, str(self.exports))
-                sys.exit(1)
-        self.exports = exports_dict
+        """
 
-    def construct_cmd(self):
+        return {k: str(v) for item in self.exports for k, v in item.items()}
+
+    def _construct_cmd(self):
+        """Construct the command that will eventually be launched."""
+
         cmd = "make -f " + self.flow_makefile + " " + self.target
         if self.dry_run is True:
             cmd += " -n"
@@ -200,15 +203,15 @@
         return cmd
 
     def is_equivalent_job(self, item):
-        '''Checks if job that would be dispatched with `item` is equivalent to
-        `self`.
+        """Checks if job that would be dispatched with 'item' is equivalent to
+        'self'.
 
-        Determines if `item` and `self` would behave exactly the same way when
+        Determines if 'item' and 'self' would behave exactly the same way when
         deployed. If so, then there is no point in keeping both. The caller can
-        choose to discard `item` and pick `self` instead. To do so, we check
-        the final resolved `cmd` & the exports. The `name` field will be unique
-        to `item` and `self`, so we take that out of the comparison.
-        '''
+        choose to discard 'item' and pick 'self' instead. To do so, we check
+        the final resolved 'cmd' & the exports. The 'name' field will be unique
+        to 'item' and 'self', so we take that out of the comparison.
+        """
         if type(self) != type(item):
             return False
 
@@ -233,279 +236,38 @@
                 item.name, self.name)
         return True
 
-    def dispatch_cmd(self):
-        # Update the shell's env vars with self.exports. Values in exports must
-        # replace the values in the shell's env vars if the keys match.
-        exports = os.environ.copy()
-        exports.update(self.exports)
+    def pre_launch(self):
+        """Callback to perform additional pre-launch activities.
 
-        # Clear the magic MAKEFLAGS variable from exports if necessary. This
-        # variable is used by recursive Make calls to pass variables from one
-        # level to the next. Here, self.cmd is a call to Make but it's
-        # logically a top-level invocation: we don't want to pollute the flow's
-        # Makefile with Make variables from any wrapper that called dvsim.
-        if 'MAKEFLAGS' in exports:
-            del exports['MAKEFLAGS']
-
-        args = shlex.split(self.cmd)
-        try:
-            # If renew_odir flag is True - then move it.
-            if self.renew_odir:
-                self.odir_limiter(odir=self.odir)
-            os.makedirs(self.odir, exist_ok=True)
-            # Dump all env variables for ease of debug.
-            with open(self.odir + "/env_vars",
-                      "w",
-                      encoding="UTF-8",
-                      errors="surrogateescape") as f:
-                for var in sorted(exports.keys()):
-                    f.write("{}={}\n".format(var, exports[var]))
-                f.close()
-            self._link_odir("D")
-            f = open(self.log, "w", encoding="UTF-8", errors="surrogateescape")
-            f.write("[Executing]:\n{}\n\n".format(self.cmd))
-            f.flush()
-            self.process = subprocess.Popen(args,
-                                            bufsize=4096,
-                                            universal_newlines=True,
-                                            stdout=f,
-                                            stderr=f,
-                                            env=exports)
-            self.log_fd = f
-        except IOError:
-            if self.log_fd:
-                self.log_fd.close()
-            raise DeployError('IO Error: See {}'.format(self.log))
-
-    def odir_limiter(self, odir):
-        """Clean previous output directories.
-
-        When running jobs, we may want to maintain a limited history of
-        previous invocations. This method finds and deletes the output
-        directories at the base of input arg 'odir' with the oldest timestamps,
-        if that limit is reached. It returns a list of directories that
-        remain after deletion.
+        This is invoked by launcher::_pre_launch().
         """
+        pass
 
-        if os.path.exists(odir):
-            # If output directory exists, back it up.
-            ts = datetime.fromtimestamp(os.stat(odir).st_ctime)
-            ts = ts.strftime(TS_FORMAT)
-            shutil.move(odir, odir + "_" + ts)
+    def post_finish(self, status):
+        """Callback to perform additional post-finish activities.
 
-        # Get list of past output directories sorted by creation time.
-        pdir = Path(odir).resolve().parent
-        if not pdir.exists():
-            return []
-
-        dirs = sorted([old for old in pdir.iterdir() if old.is_dir()],
-                      key=os.path.getctime,
-                      reverse=True)
-
-        for old in dirs[self.max_odirs - 1:]:
-            rm_path(old)
-
-        return dirs[0:self.max_odirs - 2]
-
-    def _test_passed(self):
-        """Determine the outcome of the job (P/F if it ran to completion).
-
-        Return True if the job passed, False otherwise. This is called by
-        poll() just after the job finishes.
-
+        This is invoked by launcher::_post_finish().
         """
-        def log_fail_msg(msg):
-            '''Logs the fail msg to the final report.'''
-            self.fail_msg += msg
-            log.log(VERBOSE, msg)
+        pass
 
-        def _find_patterns(patterns, line):
-            '''Helper function that returns true if all or any of the given
-            patterns is found, else False.'''
+    def get_log_path(self):
+        """Returns the log file path."""
 
-            assert patterns
-            for pattern in patterns:
-                match = re.search(r"{}".format(pattern), line)
-                if match:
-                    return pattern
-            return None
-
-        def _get_n_lines(pos, num):
-            "Helper function that returns next N lines starting at pos index."
-
-            return ''.join(lines[pos:pos + num - 1]).strip()
-
-        if self.dry_run:
-            return True
-
-        # Only one fail pattern needs to be seen.
-        failed = False
-        chk_failed = bool(self.fail_patterns)
-
-        # All pass patterns need to be seen, so we replicate the list and remove
-        # patterns as we encounter them.
-        pass_patterns = self.pass_patterns.copy()
-        chk_passed = bool(pass_patterns) and (self.process.returncode == 0)
-
-        try:
-            with open(self.log, "r", encoding="UTF-8") as f:
-                lines = f.readlines()
-        except OSError as e:
-            log_fail_msg("Error opening file {!r}:\n{}".format(self.log, e))
-            return False
-
-        if chk_failed or chk_passed:
-            for cnt, line in enumerate(lines):
-                if chk_failed:
-                    if _find_patterns(self.fail_patterns, line) is not None:
-                        # Print 4 additional lines to help debug more easily.
-                        log_fail_msg("```\n{}\n```\n".format(
-                            _get_n_lines(cnt, 5)))
-                        failed = True
-                        chk_failed = False
-                        chk_passed = False
-
-                if chk_passed:
-                    pattern = _find_patterns(pass_patterns, line)
-                    if pattern is not None:
-                        pass_patterns.remove(pattern)
-                        chk_passed = bool(pass_patterns)
-
-        # If failed, then nothing else to do. Just return.
-        if failed:
-            return False
-
-        # If no fail patterns were seen, but the job returned with non-zero
-        # exit code for whatever reason, then show the last 10 lines of the log
-        # as the failure message, which might help with the debug.
-        if self.process.returncode != 0:
-            msg = ''.join(lines[-10:]).strip()
-            log_fail_msg("Process returned non-zero exit code. "
-                         "Last 10 lines:\n```\n{}\n```\n".format(msg))
-            return False
-
-        # Ensure all pass patterns were seen.
-        if chk_passed:
-            msg = ''.join(lines[-10:]).strip()
-            log_fail_msg("One or more pass patterns not found:\n{}\n"
-                         "Last 10 lines:\n```\n{}\n```\n".format(
-                             pass_patterns, msg))
-            return False
-
-        return True
-
-    def _link_odir(self, status):
-        '''Soft-links the job's directory based on job's status, into
-        dispatched, running, passed, failed or killed directories in the
-        scratch area.'''
-
-        dest = Path(self.sim_cfg.links[status], self.odir_ln)
-
-        # If dest exists, then atomically remove it and link the odir again.
-        while True:
-            try:
-                os.symlink(self.odir, dest)
-                break
-            except FileExistsError:
-                rm_path(dest)
-
-        # Delete the symlink from dispatched directory if it exists.
-        if status != "D":
-            old = Path(self.sim_cfg.links['D'], self.odir_ln)
-            rm_path(old)
-
-    def _on_finish(self, status):
-        '''Called when the process finishes or is killed'''
-        assert status in ['P', 'F', 'K']
-        if status in ['P', 'F']:
-            self._link_odir(status)
-
-    def poll(self):
-        '''Check status of the running process
-
-        This returns 'D', 'P' or 'F'. If 'D', the job is still running. If 'P',
-        the job finished successfully. If 'F', the job finished with an error.
-
-        This function must only be called after running self.dispatch_cmd() and
-        must not be called again once it has returned 'P' or 'F'.
-
-        '''
-        assert self.process is not None
-        if self.process.poll() is None:
-            return 'D'
-        self.log_fd.close()
-
-        status = 'P' if self._test_passed() else 'F'
-
-        log.debug("Item %s has completed execution: %s", self.name, status)
-        self._on_finish(status)
-
-        del self.process
-        self.process = None
-
-        return status
-
-    def kill(self):
-        '''Kill the running process.
-
-        This must be called between dispatching and reaping the process (the
-        same window as poll()).
-
-        '''
-        assert self.process is not None
-        self.kill_remote_job()
-
-        # Try to kill the running process. Send SIGTERM first, wait a bit,
-        # and then send SIGKILL if it didn't work.
-        self.process.terminate()
-        try:
-            self.process.wait(timeout=2)
-        except subprocess.TimeoutExpired:
-            self.process.kill()
-
-        if self.log_fd:
-            self.log_fd.close()
-        self.process = None
-        self._on_finish('K')
-
-    def kill_remote_job(self):
-        '''
-        If jobs are run in remote server, need to use another command to kill them.
-        '''
-        # TODO: Currently only support lsf, may need to add support for GCP later.
-
-        # If use lsf, kill it by job ID.
-        if re.match("^bsub", self.sim_cfg.job_prefix):
-            # get job id from below string
-            # Job <xxxxxx> is submitted to default queue
-            grep_cmd = "grep -m 1 -E \'" + "^Job <" + "\' " + self.log
-            (status, rslt) = subprocess.getstatusoutput(grep_cmd)
-            if rslt != "":
-                job_id = rslt.split('Job <')[1].split('>')[0]
-                try:
-                    subprocess.run(["bkill", job_id], check=True)
-                except Exception as e:
-                    log.error("%s: Failed to run bkill\n", e)
+        return "{}/{}.log".format(self.odir, self.target)
 
 
 class CompileSim(Deploy):
-    """
-    Abstraction for building the simulation executable.
-    """
+    """Abstraction for building the simulation executable."""
 
-    # Register all builds with the class
-    items = []
-
+    target = "build"
     cmds_list_vars = ["pre_build_cmds", "post_build_cmds"]
 
     def __init__(self, build_mode, sim_cfg):
-        # Initialize common vars.
+        self.build_mode_obj = build_mode
         super().__init__(sim_cfg)
 
-        self.target = "build"
-        self.pass_patterns = []
-        self.fail_patterns = []
-
+    def _define_attrs(self):
+        super()._define_attrs()
         self.mandatory_cmd_attrs.update({
             # tool srcs
             "proj_root": False,
@@ -529,45 +291,32 @@
             "build_fail_patterns": False
         })
 
-        super().parse_dict(build_mode.__dict__)
-        # Call this method again with the sim_cfg dict passed as the object,
-        # since it may contain additional mandatory attrs.
-        super().parse_dict(sim_cfg.__dict__)
+    def _set_attrs(self):
+        super()._extract_attrs(self.build_mode_obj.__dict__)
+        super()._set_attrs()
+
+        # 'build_mode' is used as a substitution variable in the HJson.
         self.build_mode = self.name
         self.pass_patterns = self.build_pass_patterns
         self.fail_patterns = self.build_fail_patterns
-        self.__post_init__()
 
-        # Start fail message construction
-        self.fail_msg = "\n**BUILD:** {}<br>\n".format(self.name)
-        log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
-        self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
-
-        CompileSim.items.append(self)
-
-    def dispatch_cmd(self):
+    def pre_launch(self):
         # Delete old coverage database directories before building again. We
         # need to do this becuase build directory is not 'renewed'.
         rm_path(self.cov_db_dir)
-        super().dispatch_cmd()
 
 
 class CompileOneShot(Deploy):
-    """
-    Abstraction for building the simulation executable.
-    """
+    """Abstraction for building the design (used by non-DV flows)."""
 
-    # Register all builds with the class
-    items = []
+    target = "build"
 
     def __init__(self, build_mode, sim_cfg):
-        # Initialize common vars.
+        self.build_mode_obj = build_mode
         super().__init__(sim_cfg)
 
-        self.target = "build"
-        self.pass_patterns = []
-        self.fail_patterns = []
-
+    def _define_attrs(self):
+        super()._define_attrs()
         self.mandatory_cmd_attrs.update({
             # tool srcs
             "proj_root": False,
@@ -590,43 +339,36 @@
             "report_opts": False
         })
 
-        super().parse_dict(build_mode.__dict__)
-        # Call this method again with the sim_cfg dict passed as the object,
-        # since it may contain additional mandatory attrs.
-        super().parse_dict(sim_cfg.__dict__)
+    def _set_attrs(self):
+        super()._extract_attrs(self.build_mode_obj.__dict__)
+        super()._set_attrs()
+
+        # 'build_mode' is used as a substitution variable in the HJson.
         self.build_mode = self.name
-        self.__post_init__()
-
-        # Start fail message construction
-        self.fail_msg = "\n**BUILD:** {}<br>\n".format(self.name)
-        log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
-        self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
-
-        CompileOneShot.items.append(self)
 
 
 class RunTest(Deploy):
-    """
-    Abstraction for running tests. This is one per seed for each test.
-    """
+    """Abstraction for running tests. This is one per seed for each test."""
 
     # Initial seed values when running tests (if available).
+    target = "run"
     seeds = []
     fixed_seed = None
-
-    # Register all runs with the class
-    items = []
-
     cmds_list_vars = ["pre_run_cmds", "post_run_cmds"]
 
     def __init__(self, index, test, build_job, sim_cfg):
-        # Initialize common vars.
+        self.test_obj = test
+        self.index = index
+        self.seed = RunTest.get_seed()
         super().__init__(sim_cfg)
 
-        self.target = "run"
-        self.pass_patterns = []
-        self.fail_patterns = []
+        if build_job is not None:
+            self.dependencies.append(build_job)
 
+        self.launcher.renew_odir = True
+
+    def _define_attrs(self):
+        super()._define_attrs()
         self.mandatory_cmd_attrs.update({
             # tool srcs
             "proj_root": False,
@@ -650,40 +392,19 @@
             "run_fail_patterns": False
         })
 
-        if build_job is not None:
-            self.dependencies.append(build_job)
+    def _set_attrs(self):
+        super()._extract_attrs(self.test_obj.__dict__)
+        super()._set_attrs()
 
-        self.index = index
-        self.seed = RunTest.get_seed()
-
-        super().parse_dict(test.__dict__)
-        # Call this method again with the sim_cfg dict passed as the object,
-        # since it may contain additional mandatory attrs.
-        super().parse_dict(sim_cfg.__dict__)
+        # 'test' is used as a substitution variable in the HJson.
         self.test = self.name
-        self.renew_odir = True
-        self.build_mode = test.build_mode.name
+        self.build_mode = self.test_obj.build_mode.name
+        self.qual_name = self.run_dir_name + "." + str(self.seed)
+        self.full_name = self.sim_cfg.name + ":" + self.qual_name
         self.pass_patterns = self.run_pass_patterns
         self.fail_patterns = self.run_fail_patterns
-        self.__post_init__()
-        # For output dir link, use run_dir_name instead.
-        self.odir_ln = self.run_dir_name
 
-        # Start fail message construction
-        self.fail_msg = "\n**TEST:** {}, ".format(self.name)
-        self.fail_msg += "**SEED:** {}<br>\n".format(self.seed)
-        log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
-        self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
-
-        RunTest.items.append(self)
-
-    def __post_init__(self):
-        super().__post_init__()
-        # Set identifier.
-        self.identifier = self.sim_cfg.name + ":" + self.run_dir_name
-
-    def _on_finish(self, status):
-        super()._on_finish(status)
+    def post_finish(self, status):
         if status != 'P':
             # Delete the coverage data if available.
             rm_path(self.cov_db_test_dir)
@@ -703,18 +424,15 @@
 
 
 class CovUnr(Deploy):
-    """
-    Abstraction for coverage UNR flow.
-    """
+    """Abstraction for coverage UNR flow."""
 
-    # Register all builds with the class
-    items = []
+    target = "cov_unr"
 
     def __init__(self, sim_cfg):
-        # Initialize common vars.
         super().__init__(sim_cfg)
 
-        self.target = "cov_unr"
+    def _define_attrs(self):
+        super()._define_attrs()
         self.mandatory_cmd_attrs.update({
             # tool srcs
             "proj_root": False,
@@ -735,45 +453,51 @@
             "build_fail_patterns": False
         })
 
-        super().parse_dict(sim_cfg.__dict__)
-        self.__post_init__()
+    def _set_attrs(self):
+        super()._set_attrs()
+        self.qual_name = self.target
+        self.full_name = self.sim_cfg.name + ":" + self.qual_name
 
-        self.pass_patterns = []
-        # Reuse fail_patterns from sim build
+        # Reuse the build_fail_patterns set in the HJson.
         self.fail_patterns = self.build_fail_patterns
 
-        # Start fail message construction
-        self.fail_msg = "\n**COV_UNR:** {}<br>\n".format(self.name)
-        log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
-        self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
-
-        CovUnr.items.append(self)
-
 
 class CovMerge(Deploy):
-    """
-    Abstraction for merging coverage databases. An item of this class is created AFTER
-    the regression is completed.
-    """
+    """Abstraction for merging coverage databases."""
 
-    # Register all builds with the class
-    items = []
+    target = "cov_merge"
 
     def __init__(self, run_items, sim_cfg):
-        # Initialize common vars.
+        # Construct the cov_db_dirs right away from the run_items. This is a
+        # special variable used in the HJson.
+        self.cov_db_dirs = []
+        for run in run_items:
+            if run.cov_db_dir not in self.cov_db_dirs:
+                self.cov_db_dirs.append(run.cov_db_dir)
+
         super().__init__(sim_cfg)
 
         self.dependencies += run_items
+        # Run coverage merge even if one test passes.
         self.needs_all_dependencies_passing = False
 
-        self.target = "cov_merge"
-        self.pass_patterns = []
-        self.fail_patterns = []
+        # TODO: need to move this up.
+        # Prune previous merged cov directories.
+        prev_cov_db_dirs = self.launcher.clean_odirs(
+            odir=self.cov_merge_db_dir)
 
-        # Construct local 'special' variable from cov directories that need to
-        # be merged.
-        self.cov_db_dirs = ""
+        # If a merged cov data base exists from a previous run, then consider
+        # that as well for merging, if the --cov-merge-previous command line
+        # switch is passed.
+        if self.sim_cfg.cov_merge_previous:
+            self.cov_db_dirs += [str(item) for item in prev_cov_db_dirs]
 
+        # Append cov_db_dirs to the list of exports.
+        self.exports["cov_db_dirs"] = "\"{}\"".format(" ".join(
+            self.cov_db_dirs))
+
+    def _define_attrs(self):
+        super()._define_attrs()
         self.mandatory_cmd_attrs.update({
             "cov_merge_cmd": False,
             "cov_merge_opts": False
@@ -784,83 +508,26 @@
             "cov_merge_db_dir": False
         })
 
-        super().parse_dict(sim_cfg.__dict__)
-        self.__post_init__()
+    def _set_attrs(self):
+        super()._set_attrs()
+        self.qual_name = self.target
+        self.full_name = self.sim_cfg.name + ":" + self.qual_name
 
-        # Override standard output and log patterns.
+        # For merging coverage db, the precise output dir is set in the HJson.
         self.odir = self.cov_merge_db_dir
-        self.odir_ln = os.path.basename(os.path.normpath(self.odir))
-
-        # Start fail message construction
-        self.fail_msg = "\n**COV_MERGE:** {}<br>\n".format(self.name)
-        log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
-        self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
-
-        CovMerge.items.append(self)
-
-    def __post_init__(self):
-        # Extract cov db dirs from all the sim runs.
-        for item in self.dependencies:
-            if item.target == "run":
-                if item.cov_db_dir not in self.cov_db_dirs:
-                    self.cov_db_dirs += item.cov_db_dir + " "
-
-        # Recursively search and replace wildcards, ignoring cov_db_dirs.
-        # We need to resolve it later based on cov_db_dirs value set below.
-
-        # First pass: search within self dict. We ignore errors since some
-        # substitions may be available in the second pass.
-        self.__dict__ = find_and_substitute_wildcards(
-            self.__dict__,
-            self.__dict__,
-            ignored_wildcards=["cov_db_dirs"],
-            ignore_error=True)
-
-        # Second pass: search in sim_cfg dict, this time not ignoring errors.
-        self.__dict__ = find_and_substitute_wildcards(
-            self.__dict__,
-            self.sim_cfg.__dict__,
-            ignored_wildcards=["cov_db_dirs"],
-            ignore_error=False)
-
-        # Call base class __post_init__ to do checks and substitutions
-        super().__post_init__()
-
-        # Prune previous merged cov directories.
-        prev_cov_db_dirs = self.odir_limiter(odir=self.cov_merge_db_dir)
-
-        # If a merged cov data base exists from a previous run, then consider
-        # that as well for merging, if the --cov-merge-previous command line
-        # switch is passed.
-        if self.sim_cfg.cov_merge_previous:
-            self.cov_db_dirs += " ".join(
-                [str(item) for item in prev_cov_db_dirs])
-
-        # Append cov_db_dirs to the list of exports.
-        self.exports["cov_db_dirs"] = "\"{}\"".format(self.cov_db_dirs)
 
 
 class CovReport(Deploy):
-    """
-    Abstraction for coverage report generation. An item of this class is created AFTER
-    the regression is completed.
-    """
+    """Abstraction for coverage report generation. """
 
-    # Register all builds with the class
-    items = []
+    target = "cov_report"
 
     def __init__(self, merge_job, sim_cfg):
-        # Initialize common vars.
         super().__init__(sim_cfg)
-
         self.dependencies.append(merge_job)
 
-        self.target = "cov_report"
-        self.pass_patterns = []
-        self.fail_patterns = []
-        self.cov_total = ""
-        self.cov_results = ""
-
+    def _define_attrs(self):
+        super()._define_attrs()
         self.mandatory_cmd_attrs.update({
             "cov_report_cmd": False,
             "cov_report_opts": False
@@ -872,30 +539,31 @@
             "cov_report_txt": False
         })
 
-        super().parse_dict(sim_cfg.__dict__)
-        self.__post_init__()
+    def _set_attrs(self):
+        super()._set_attrs()
+        self.qual_name = self.target
+        self.full_name = self.sim_cfg.name + ":" + self.qual_name
 
-        # Start fail message construction
-        self.fail_msg = "\n**COV_REPORT:** {}<br>\n".format(self.name)
-        log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
-        self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
+        # Keep track of coverage results, once the job is finished.
+        self.cov_total = ""
+        self.cov_results = ""
 
-        CovReport.items.append(self)
+    def post_finish(self, status):
+        """Extract the coverage results summary for the dashboard.
 
-    def _test_passed(self):
-        # Add an extra check to Deploy._test_passed where we extract the
-        # coverage results summary for the dashboard (and fail the job if
-        # something goes wrong).
-        if not super()._test_passed():
-            return False
+        If that fails for some reason, report the job as a failure.
+        """
+
+        if self.dry_run or status != 'P':
+            return
 
         results, self.cov_total, ex_msg = get_cov_summary_table(
             self.cov_report_txt, self.sim_cfg.tool)
 
         if ex_msg:
-            self.fail_msg += ex_msg
+            self.launcher.fail_msg += ex_msg
             log.error(ex_msg)
-            return False
+            return
 
         # Succeeded in obtaining the coverage data.
         colalign = (("center", ) * len(results[0]))
@@ -905,26 +573,19 @@
                                     colalign=colalign)
 
         # Delete the cov report - not needed.
-        rm_path(self.log)
-        return True
+        rm_path(self.get_log_path())
 
 
 class CovAnalyze(Deploy):
-    """
-    Abstraction for coverage analysis tool.
-    """
+    """Abstraction for running the coverage analysis tool."""
 
-    # Register all builds with the class
-    items = []
+    target = "cov_analyze"
 
     def __init__(self, sim_cfg):
-        # Initialize common vars.
         super().__init__(sim_cfg)
 
-        self.target = "cov_analyze"
-        self.pass_patterns = []
-        self.fail_patterns = []
-
+    def _define_attrs(self):
+        super()._define_attrs()
         self.mandatory_cmd_attrs.update({
             # tool srcs
             "proj_root": False,
@@ -937,12 +598,7 @@
             "cov_merge_db_dir": False
         })
 
-        super().parse_dict(sim_cfg.__dict__)
-        self.__post_init__()
-
-        # Start fail message construction
-        self.fail_msg = "\n**COV_ANALYZE:** {}<br>\n".format(self.name)
-        log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
-        self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
-
-        CovAnalyze.items.append(self)
+    def _set_attrs(self):
+        super()._set_attrs()
+        self.qual_name = self.target
+        self.full_name = self.sim_cfg.name + ":" + self.qual_name
diff --git a/util/dvsim/Launcher.py b/util/dvsim/Launcher.py
new file mode 100644
index 0000000..fff1891
--- /dev/null
+++ b/util/dvsim/Launcher.py
@@ -0,0 +1,263 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+import logging as log
+import os
+import re
+import shutil
+from datetime import datetime
+from pathlib import Path
+
+from utils import TS_FORMAT, VERBOSE, rm_path
+
+
+class LauncherError(Exception):
+    def __init__(self, msg):
+        self.msg = msg
+
+
+class Launcher:
+    """
+    Abstraction for launching and maintaining a job.
+
+    An abstract class that provides methods to prepare a job's environment,
+    launch the job, poll for its completion and finally do some cleanup
+    activities. This class is not meant to be instantiated directly. Each
+    launcher object holds an instance of the deploy object.
+    """
+
+    # If a history of previous invocations is to be maintained, then keep no
+    # more than this many directories.
+    max_odirs = 5
+
+    def __str__(self):
+        return self.deploy.full_name + ":launcher"
+
+    def __init__(self, deploy):
+        # Store the deploy object handle.
+        self.deploy = deploy
+
+        # Return status of the process running the job.
+        self.exit_code = None
+
+        # Flag to indicate whether to 'overwrite' if odir already exists,
+        # or to backup the existing one and create a new one.
+        # For builds, we want to overwrite existing to leverage the tools'
+        # incremental / partition compile features. For runs, we may want to
+        # create a new one.
+        self.renew_odir = False
+
+        # Construct failure message if the test fails.
+        self.fail_msg = "\n**{!r}:** {!r}<br>\n".format(
+            self.deploy.target.upper(), self.deploy.qual_name)
+        self.fail_msg += "**LOG:** {}<br>\n".format(self.deploy.get_log_path())
+
+    def _make_odir(self):
+        """Create the output directory."""
+
+        # If renew_odir flag is True - then move it.
+        if self.renew_odir:
+            self.clean_odirs(odir=self.deploy.odir)
+        os.makedirs(self.deploy.odir, exist_ok=True)
+
+    def _dump_env_vars(self, exports):
+        """Write env vars to a file for ease of debug.
+
+        Each extended class computes the list of exports and invokes this
+        method right before launching the job.
+        """
+
+        with open(self.deploy.odir + "/env_vars",
+                  "w",
+                  encoding="UTF-8",
+                  errors="surrogateescape") as f:
+            for var in sorted(exports.keys()):
+                f.write("{}={}\n".format(var, exports[var]))
+
+    def _pre_launch(self):
+        """Do pre-launch activities.
+
+        Examples include such as preparing the job's environment, clearing
+        old runs, creating the output directory, dumping all env variables
+        etc. This method is already invoked by launch() as the first step.
+        """
+
+        self.deploy.pre_launch()
+        self._make_odir()
+
+    def _do_launch(self):
+        """Launch the job."""
+
+        raise NotImplementedError()
+
+    def launch(self):
+        """Launch the job."""
+
+        self._pre_launch()
+        self._do_launch()
+
+    def _post_finish(self, status):
+        """Do post-completion activities, such as preparing the results.
+
+        Must be invoked by poll().
+        """
+
+        assert status in ['P', 'F', 'K']
+        if status in ['P', 'F']:
+            self._link_odir(status)
+        self.deploy.post_finish(status)
+        log.debug("Item %s has completed execution: %s", self, status)
+
+    def poll(self):
+        """Poll the launched job for completion.
+
+        Invokes _has_passed() and _post_finish() when the job completes.
+        """
+
+        raise NotImplementedError()
+
+    def kill(self):
+        """Terminate the job."""
+
+        raise NotImplementedError()
+
+    def _has_passed(self):
+        """Determine the outcome of the job (P/F if it ran to completion).
+
+        Return True if the job passed, False otherwise. This is called by
+        poll() just after the job finishes.
+        """
+        def log_fail_msg(msg):
+            """Logs the fail msg to the final report."""
+
+            self.fail_msg += msg
+            log.log(VERBOSE, msg)
+
+        def _find_patterns(patterns, line):
+            """Helper function that returns the pattern if any of the given
+            patterns is found, else None."""
+
+            assert patterns
+            for pattern in patterns:
+                match = re.search(r"{}".format(pattern), line)
+                if match:
+                    return pattern
+            return None
+
+        def _get_n_lines(pos, num):
+            "Helper function that returns next N lines starting at pos index."
+
+            return ''.join(lines[pos:pos + num - 1]).strip()
+
+        if self.deploy.dry_run:
+            return True
+
+        # Only one fail pattern needs to be seen.
+        failed = False
+        chk_failed = bool(self.deploy.fail_patterns)
+
+        # All pass patterns need to be seen, so we replicate the list and remove
+        # patterns as we encounter them.
+        pass_patterns = self.deploy.pass_patterns.copy()
+        chk_passed = bool(pass_patterns) and (self.exit_code == 0)
+
+        try:
+            with open(self.deploy.get_log_path(), "r", encoding="UTF-8") as f:
+                lines = f.readlines()
+        except OSError as e:
+            log_fail_msg("Error opening file {}:\n{}".format(
+                self.deploy.get_log_path(), e))
+            return False
+
+        if chk_failed or chk_passed:
+            for cnt, line in enumerate(lines):
+                if chk_failed:
+                    if _find_patterns(self.deploy.fail_patterns,
+                                      line) is not None:
+                        # Print 4 additional lines to help debug more easily.
+                        log_fail_msg("```\n{}\n```\n".format(
+                            _get_n_lines(cnt, 5)))
+                        failed = True
+                        chk_failed = False
+                        chk_passed = False
+
+                if chk_passed:
+                    pattern = _find_patterns(pass_patterns, line)
+                    if pattern is not None:
+                        pass_patterns.remove(pattern)
+                        chk_passed = bool(pass_patterns)
+
+        # If failed, then nothing else to do. Just return.
+        if failed:
+            return False
+
+        # If no fail patterns were seen, but the job returned with non-zero
+        # exit code for whatever reason, then show the last 10 lines of the log
+        # as the failure message, which might help with the debug.
+        if self.exit_code != 0:
+            msg = ''.join(lines[-10:]).strip()
+            log_fail_msg("Job returned non-zero exit code. "
+                         "Last 10 lines:\n```\n{}\n```\n".format(msg))
+            return False
+
+        # Ensure all pass patterns were seen.
+        if chk_passed:
+            msg = ''.join(lines[-10:]).strip()
+            log_fail_msg("One or more pass patterns not found:\n{}\n"
+                         "Last 10 lines:\n```\n{}\n```\n".format(
+                             pass_patterns, msg))
+            return False
+
+        return True
+
+    def _link_odir(self, status):
+        """Soft-links the job's directory based on job's status.
+
+        The dispatched, passed and failed directories in the scratch area
+        provide a quick way to get to the job that was executed.
+        """
+
+        dest = Path(self.deploy.sim_cfg.links[status], self.deploy.qual_name)
+
+        # If dest exists, then atomically remove it and link the odir again.
+        while True:
+            try:
+                os.symlink(self.deploy.odir, dest)
+                break
+            except FileExistsError:
+                rm_path(dest)
+
+        # Delete the symlink from dispatched directory if it exists.
+        if status != "D":
+            old = Path(self.deploy.sim_cfg.links['D'], self.deploy.qual_name)
+            rm_path(old)
+
+    def clean_odirs(self, odir):
+        """Clean previous output directories.
+
+        When running jobs, we may want to maintain a limited history of
+        previous invocations. This method finds and deletes the output
+        directories at the base of input arg 'odir' with the oldest timestamps,
+        if that limit is reached. It returns a list of directories that
+        remain after deletion.
+        """
+
+        if not os.path.exists(odir):
+            return []
+
+        # If output directory exists, back it up.
+        ts = datetime.fromtimestamp(os.stat(odir).st_ctime)
+        ts = ts.strftime(TS_FORMAT)
+        shutil.move(odir, odir + "_" + ts)
+
+        # Get list of past output directories sorted by creation time.
+        pdir = Path(odir).resolve().parent
+        dirs = sorted([old for old in pdir.iterdir() if old.is_dir()],
+                      key=os.path.getctime,
+                      reverse=True)
+
+        for old in dirs[self.max_odirs - 1:]:
+            rm_path(old)
+
+        return dirs[0:self.max_odirs - 2]
diff --git a/util/dvsim/LocalLauncher.py b/util/dvsim/LocalLauncher.py
new file mode 100644
index 0000000..4e56c58
--- /dev/null
+++ b/util/dvsim/LocalLauncher.py
@@ -0,0 +1,139 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+import logging as log
+import os
+import re
+import shlex
+import subprocess
+
+from Launcher import Launcher, LauncherError
+
+
+class LocalLauncher(Launcher):
+    """
+    Implementation of Launcher to launch jobs in the user's local workstation.
+    """
+
+    # Misc common LocalLauncher settings.
+    max_odirs = 5
+
+    def __init__(self, deploy):
+        '''Initialize common class members.'''
+
+        super().__init__(deploy)
+
+        # Popen object when launching the job.
+        self.process = None
+
+    def _do_launch(self):
+        # Update the shell's env vars with self.exports. Values in exports must
+        # replace the values in the shell's env vars if the keys match.
+        exports = os.environ.copy()
+        if self.deploy.exports:
+            exports.update(self.deploy.exports)
+
+        # Clear the magic MAKEFLAGS variable from exports if necessary. This
+        # variable is used by recursive Make calls to pass variables from one
+        # level to the next. Here, self.cmd is a call to Make but it's
+        # logically a top-level invocation: we don't want to pollute the flow's
+        # Makefile with Make variables from any wrapper that called dvsim.
+        if 'MAKEFLAGS' in exports:
+            del exports['MAKEFLAGS']
+
+        self._dump_env_vars(exports)
+
+        args = shlex.split(self.deploy.cmd)
+        try:
+            f = open(self.deploy.get_log_path(),
+                     "w",
+                     encoding="UTF-8",
+                     errors="surrogateescape")
+            f.write("[Executing]:\n{}\n\n".format(self.deploy.cmd))
+            f.flush()
+            self.process = subprocess.Popen(args,
+                                            bufsize=4096,
+                                            universal_newlines=True,
+                                            stdout=f,
+                                            stderr=f,
+                                            env=exports)
+        except subprocess.SubprocessError as e:
+            raise LauncherError('IO Error: {}\nSee {}'.format(
+                e, self.deploy.get_log_path()))
+        finally:
+            self._close_process()
+
+        self._link_odir("D")
+
+    def poll(self):
+        '''Check status of the running process
+
+        This returns 'D', 'P' or 'F'. If 'D', the job is still running. If 'P',
+        the job finished successfully. If 'F', the job finished with an error.
+
+        This function must only be called after running self.dispatch_cmd() and
+        must not be called again once it has returned 'P' or 'F'.
+        '''
+
+        assert self.process is not None
+        if self.process.poll() is None:
+            return 'D'
+
+        self.exit_code = self.process.returncode
+        status = 'P' if self._has_passed() else 'F'
+
+        self._post_finish(status)
+        return status
+
+    def _post_finish(self, status):
+        super()._post_finish(status)
+        self._close_process()
+        self.process = None
+
+    def kill(self):
+        '''Kill the running process.
+
+        This must be called between dispatching and reaping the process (the
+        same window as poll()).
+
+        '''
+        assert self.process is not None
+        self.kill_remote_job()
+
+        # Try to kill the running process. Send SIGTERM first, wait a bit,
+        # and then send SIGKILL if it didn't work.
+        self.process.terminate()
+        try:
+            self.process.wait(timeout=2)
+        except subprocess.TimeoutExpired:
+            self.process.kill()
+
+        self._post_finish('K')
+
+    def _close_process(self):
+        '''Close the file descriptors associated with the process.'''
+
+        assert self.process
+        if self.process.stdout:
+            self.process.stdout.close()
+
+    def kill_remote_job(self):
+        '''
+        If jobs are run in remote server, need to use another command to kill them.
+        '''
+        # TODO: Currently only support lsf, may need to add support for GCP later.
+
+        # If use lsf, kill it by job ID.
+        if re.match("^bsub", self.deploy.sim_cfg.job_prefix):
+            # get job id from below string
+            # Job <xxxxxx> is submitted to default queue
+            grep_cmd = "grep -m 1 -E \'" + "^Job <" + "\' " + \
+                self.deploy.get_log_path()
+            (status, rslt) = subprocess.getstatusoutput(grep_cmd)
+            if rslt != "":
+                job_id = rslt.split('Job <')[1].split('>')[0]
+                try:
+                    subprocess.run(["bkill", job_id], check=True)
+                except Exception as e:
+                    log.error("%s: Failed to run bkill\n", e)
diff --git a/util/dvsim/Scheduler.py b/util/dvsim/Scheduler.py
index 92c7997..5c4ab64 100644
--- a/util/dvsim/Scheduler.py
+++ b/util/dvsim/Scheduler.py
@@ -7,7 +7,7 @@
 from collections import OrderedDict
 from signal import SIGINT, signal
 
-from Deploy import DeployError
+from Launcher import LauncherError
 from Timer import Timer
 from utils import VERBOSE
 
@@ -42,32 +42,31 @@
     def _kill_item(self, item):
         '''Kill a running item'''
         self._running.remove(item)
-        item.kill()
+        item.launcher.kill()
         self._killed.add(item)
         self.item_to_status[item] = 'K'
 
     def _poll(self, hms):
-        '''Check for running items that have finished
+        '''Check for running items that have finished.
 
         Returns True if something changed.
-
         '''
         to_pass = []
         to_fail = []
 
         for item in self._running:
-            status = item.poll()
+            status = item.launcher.poll()
             assert status in ['D', 'P', 'F']
             if status == 'D':
                 # Still running
                 continue
             elif status == 'P':
                 log.log(VERBOSE, "[%s]: [%s]: [status] [%s: P]", hms,
-                        item.target, item.identifier)
+                        item.target, item.full_name)
                 to_pass.append(item)
             else:
                 log.error("[%s]: [%s]: [status] [%s: F]", hms, item.target,
-                          item.identifier)
+                          item.full_name)
                 to_fail.append(item)
 
         for item in to_pass:
@@ -85,7 +84,6 @@
         '''Dispatch some queued items if possible.
 
         See run() for the format of old_results.
-
         '''
         num_slots = min(Scheduler.slot_limit,
                         Scheduler.max_parallel - len(self._running),
@@ -131,14 +129,14 @@
             return
 
         log.log(VERBOSE, "[%s]: [%s]: [dispatch]:\n%s", hms, self.name,
-                ", ".join(item.identifier for item in to_dispatch))
+                ", ".join(item.full_name for item in to_dispatch))
 
         for item in to_dispatch:
             self._running.add(item)
             self.item_to_status[item] = 'D'
             try:
-                item.dispatch_cmd()
-            except DeployError as err:
+                item.launcher.launch()
+            except LauncherError as err:
                 log.error('{}'.format(err))
                 self._kill_item(item)
 
@@ -167,7 +165,6 @@
 
         If print_status or we've reached a time interval then print current
         status for those jobs that weren't known to be finished already.
-
         '''
         if timer.check_time():
             print_status = True
@@ -195,10 +192,7 @@
         statuses. Every job that appears as a dependency will be in this list
         (because it ran as part of a previous target).
 
-        is_first_tgt is true if this is the first target to run.
-
         Returns the results from this target (in the same format).
-
         '''
         # Catch one SIGINT and tell the runner to quit. On a second, die.
         stop_now = threading.Event()
@@ -271,7 +265,6 @@
         '''Run all items
 
         Returns a map from item to status.
-
         '''
         timer = Timer()
 
diff --git a/util/dvsim/SimCfg.py b/util/dvsim/SimCfg.py
index 89b16be..bb309fe 100644
--- a/util/dvsim/SimCfg.py
+++ b/util/dvsim/SimCfg.py
@@ -62,7 +62,7 @@
         '''Recursively add a single item to the table of results'''
         status = results[item]
         if status == "F":
-            self.fail_msgs.append(item.fail_msg)
+            self.fail_msgs.append(item.launcher.fail_msg)
 
         # Runs get added to the table directly
         if item.target == "run":
diff --git a/util/dvsim/dvsim.py b/util/dvsim/dvsim.py
index 8b23f32..205eedf 100755
--- a/util/dvsim/dvsim.py
+++ b/util/dvsim/dvsim.py
@@ -30,7 +30,8 @@
 from pathlib import Path
 
 from CfgFactory import make_cfg
-from Deploy import Deploy, RunTest
+from Deploy import RunTest
+from Launcher import Launcher
 from Scheduler import Scheduler
 from Timer import Timer
 from utils import (TS_FORMAT, TS_FORMAT_LONG, VERBOSE, rm_path,
@@ -635,7 +636,7 @@
     # Register the common deploy settings.
     Timer.print_interval = args.print_interval
     Scheduler.max_parallel = args.max_parallel
-    Deploy.max_odirs = args.max_odirs
+    Launcher.max_odirs = args.max_odirs
 
     # Build infrastructure from hjson file and create the list of items to
     # be deployed.