[dvsim] Implement LsfLauncher

This is a first cut implementation of the LsfLauncher. There are several
items left as TODOs - they will be addressed later.

This implementation dispatches all targets (builds, runs, cov etc) as
job arrays by default. Builds are run discretely (array of 1 job) since
we consider each build to have specific job requirements that cannot be
shared with other builds (cpu/mem/disk/stack usage settings - these will
be added in future). Runs pertaining to a build is dispatched as an
array. The associated changes made to other sources support the array
generation.

The job polling is not done by invoking bjobs or bhist, but by looking
for the LSF job output file (unique for each array index), which gets
written to only AFTER the job is complete. This offers a really fast way
to test for completion rather than invoking bjobs or bhist, which bring
the system to a crawl when invoked for 20k tests in flight. This largely
works for now, but we need to explore other options such as using IBM's
Platform LSF Python APIs (future work!).

What launcher system to pick is decided by `DVSIM_LAUNCHER` variable.
In addition, this PR also adds support for Python virtualenv to isolate
project-specific python requirements that need to be met when running
tasks on remote machines used by several other projects as well.

Signed-off-by: Srikrishna Iyer <sriyer@google.com>
diff --git a/util/dvsim/Deploy.py b/util/dvsim/Deploy.py
index b0114a8..e055b81 100644
--- a/util/dvsim/Deploy.py
+++ b/util/dvsim/Deploy.py
@@ -6,7 +6,7 @@
 import pprint
 import random
 
-from LocalLauncher import LocalLauncher
+from LauncherFactory import get_launcher
 from sim_utils import get_cov_summary_table
 from tabulate import tabulate
 from utils import (VERBOSE, clean_odirs, find_and_substitute_wildcards,
@@ -64,7 +64,7 @@
 
         # Create the launcher object. Launcher retains the handle to self for
         # lookup & callbacks.
-        self.launcher = LocalLauncher(self)
+        self.launcher = get_launcher(self)
 
     def _define_attrs(self):
         """Defines the attributes this instance needs to have.
@@ -133,6 +133,9 @@
         # 'aes:default', 'uart:default' builds.
         self.full_name = self.sim_cfg.name + ":" + self.qual_name
 
+        # Job name is used to group the job by cfg and target.
+        self.job_name = "{}_{}".format(self.sim_cfg.name, self.target)
+
         # Pass and fail patterns.
         self.pass_patterns = []
         self.fail_patterns = []
@@ -298,6 +301,8 @@
 
         # 'build_mode' is used as a substitution variable in the HJson.
         self.build_mode = self.name
+        self.job_name = "{}_{}_{}".format(self.sim_cfg.name, self.target,
+                                          self.build_mode)
         self.pass_patterns = self.build_pass_patterns
         self.fail_patterns = self.build_fail_patterns
 
@@ -346,6 +351,8 @@
 
         # 'build_mode' is used as a substitution variable in the HJson.
         self.build_mode = self.name
+        self.job_name = "{}_{}_{}".format(self.sim_cfg.name, self.target,
+                                          self.build_mode)
 
 
 class RunTest(Deploy):
@@ -366,6 +373,10 @@
         if build_job is not None:
             self.dependencies.append(build_job)
 
+        # We did something wrong if build_mode is not the same as the build_job
+        # arg's name.
+        assert self.build_mode == build_job.name
+
         self.launcher.renew_odir = True
 
     def _define_attrs(self):
@@ -402,6 +413,8 @@
         self.build_mode = self.test_obj.build_mode.name
         self.qual_name = self.run_dir_name + "." + str(self.seed)
         self.full_name = self.sim_cfg.name + ":" + self.qual_name
+        self.job_name = "{}_{}_{}".format(self.sim_cfg.name, self.target,
+                                          self.build_mode)
         self.pass_patterns = self.run_pass_patterns
         self.fail_patterns = self.run_fail_patterns
 
diff --git a/util/dvsim/Launcher.py b/util/dvsim/Launcher.py
index c3d7200..cefcca4 100644
--- a/util/dvsim/Launcher.py
+++ b/util/dvsim/Launcher.py
@@ -25,14 +25,76 @@
     launcher object holds an instance of the deploy object.
     """
 
+    # Points to the python virtual env area.
+    python_venv = None
+
     # If a history of previous invocations is to be maintained, then keep no
     # more than this many directories.
     max_odirs = 5
 
+    # Flag indicating the workspace preparation steps are complete.
+    workspace_prepared = False
+    workspace_prepared_for_cfg = set()
+
+    @staticmethod
+    def set_python_venv(project):
+        '''Activate a python virtualenv if available.
+
+        The env variable <PROJECT>_PYTHON_VENV if set, points to the path
+        containing the python virtualenv created specifically for this
+        project. We can activate it if needed, before launching jobs using
+        external compute machines.
+
+        This is not applicable when running jobs locally on the user's machine.
+        '''
+
+        if Launcher.python_venv is not None:
+            return
+
+        # If project-specific python virtualenv path is set, then activate it
+        # before running downstream tools. This is more relevant when not
+        # launching locally, but on external machines in a compute farm, which
+        # may not have access to the default python installation area on the
+        # host machine.
+        Launcher.python_venv = os.environ.get("{}_PYTHON_VENV".format(
+            project.upper()))
+
+    @staticmethod
+    def prepare_workspace(project, repo_top, args):
+        '''Prepare the workspace based on the chosen launcher's needs.
+
+        This is done once for the entire duration for the flow run.
+        'project' is the name of the project.
+        'repo_top' is the path to the repository.
+        'args' are the command line args passed to dvsim.
+        '''
+        pass
+
+    @staticmethod
+    def prepare_workspace_for_cfg(cfg):
+        '''Prepare the workspace for a cfg.
+
+        This is invoked once for each cfg.
+        'cfg' is the flow configuration object.
+        '''
+        pass
+
     def __str__(self):
         return self.deploy.full_name + ":launcher"
 
     def __init__(self, deploy):
+        cfg = deploy.sim_cfg
+
+        # One-time preparation of the workspace.
+        if not Launcher.workspace_prepared:
+            self.prepare_workspace(cfg.project, cfg.proj_root, cfg.args)
+            Launcher.workspace_prepared = True
+
+        # One-time preparation of the workspace, specific to the cfg.
+        if cfg not in Launcher.workspace_prepared_for_cfg:
+            self.prepare_workspace_for_cfg(cfg)
+            Launcher.workspace_prepared_for_cfg.add(cfg)
+
         # Store the deploy object handle.
         self.deploy = deploy
 
diff --git a/util/dvsim/LauncherFactory.py b/util/dvsim/LauncherFactory.py
new file mode 100644
index 0000000..8d63723
--- /dev/null
+++ b/util/dvsim/LauncherFactory.py
@@ -0,0 +1,66 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+import logging as log
+import os
+import sys
+
+from LocalLauncher import LocalLauncher
+from LsfLauncher import LsfLauncher
+from Scheduler import Scheduler
+
+try:
+    from EdaCloudLauncher import EdaCloudLauncher
+    EDACLOUD_LAUNCHER_EXISTS = True
+except ImportError:
+    EDACLOUD_LAUNCHER_EXISTS = False
+
+# The chosen launcher type.
+launcher_type = None
+
+
+def set_launcher_type(is_local=False):
+    '''Sets the launcher type that will be used to launch the jobs.
+
+    The env variable `DVSIM_LAUNCHER` is used to identify what launcher system
+    to use. This variable is specific to the user's work site. It is meant to
+    be set externally before invoking DVSim. Valid values are [local, lsf,
+    edacloud]. If --local arg is supplied then the local launcher takes
+    precedence.
+    '''
+
+    launcher = os.environ.get("DVSIM_LAUNCHER", "local")
+    if is_local:
+        launcher = "local"
+
+    global launcher_type
+    if launcher == "local":
+        launcher_type = LocalLauncher
+
+    elif launcher == "lsf":
+        launcher_type = LsfLauncher
+
+        # The max_parallel setting is not relevant when dispatching with LSF.
+        Scheduler.max_parallel = sys.maxsize
+
+    # These custom launchers are site specific. They may not be committed to the
+    # open source repo.
+    elif launcher == "edacloud" and EDACLOUD_LAUNCHER_EXISTS:
+        launcher_type = EdaCloudLauncher
+
+    else:
+        log.error("Launcher {} set using DVSIM_LAUNCHER env var does not "
+                  "exist. Using local launcher instead.".format(launcher))
+        launcher_type = LocalLauncher
+
+
+def get_launcher(deploy):
+    '''Returns an instance of a launcher.
+
+    'deploy' is an instance of the deploy class to with the launcher is paired.
+    '''
+
+    global launcher_type
+    assert launcher_type is not None
+    return launcher_type(deploy)
diff --git a/util/dvsim/LocalLauncher.py b/util/dvsim/LocalLauncher.py
index 4e56c58..c93e966 100644
--- a/util/dvsim/LocalLauncher.py
+++ b/util/dvsim/LocalLauncher.py
@@ -2,9 +2,7 @@
 # Licensed under the Apache License, Version 2.0, see LICENSE for details.
 # SPDX-License-Identifier: Apache-2.0
 
-import logging as log
 import os
-import re
 import shlex
 import subprocess
 
@@ -99,7 +97,6 @@
 
         '''
         assert self.process is not None
-        self.kill_remote_job()
 
         # Try to kill the running process. Send SIGTERM first, wait a bit,
         # and then send SIGKILL if it didn't work.
@@ -117,23 +114,3 @@
         assert self.process
         if self.process.stdout:
             self.process.stdout.close()
-
-    def kill_remote_job(self):
-        '''
-        If jobs are run in remote server, need to use another command to kill them.
-        '''
-        # TODO: Currently only support lsf, may need to add support for GCP later.
-
-        # If use lsf, kill it by job ID.
-        if re.match("^bsub", self.deploy.sim_cfg.job_prefix):
-            # get job id from below string
-            # Job <xxxxxx> is submitted to default queue
-            grep_cmd = "grep -m 1 -E \'" + "^Job <" + "\' " + \
-                self.deploy.get_log_path()
-            (status, rslt) = subprocess.getstatusoutput(grep_cmd)
-            if rslt != "":
-                job_id = rslt.split('Job <')[1].split('>')[0]
-                try:
-                    subprocess.run(["bkill", job_id], check=True)
-                except Exception as e:
-                    log.error("%s: Failed to run bkill\n", e)
diff --git a/util/dvsim/LsfLauncher.py b/util/dvsim/LsfLauncher.py
new file mode 100644
index 0000000..1a65fa1
--- /dev/null
+++ b/util/dvsim/LsfLauncher.py
@@ -0,0 +1,406 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+import logging as log
+import os
+import re
+import shlex
+import subprocess
+import tarfile
+from pathlib import Path
+
+from Launcher import Launcher, LauncherError
+from utils import VERBOSE, clean_odirs
+
+
+class LsfLauncher(Launcher):
+
+    # A hidden directory specific to a cfg, where we put individual 'job'
+    # scripts.
+    jobs_dir = {}
+
+    # All launcher instances available for lookup.
+    jobs = {}
+
+    # When the job completes, we try to read the job script output to determine
+    # the outcome. It may not have been completely written the first time we
+    # read it so we retry on the next poll, no more than 10 times.
+    max_poll_retries = 10
+
+    # TODO: Add support for build/run/cov job specific resource requirements:
+    #       cpu, mem, disk, stack.
+    # TODO: Allow site-specific job resource usage setting using
+    #       `DVSIM_LSF_CFG` environment variable.
+
+    def __init__(self, deploy):
+        super().__init__(deploy)
+
+        # Set the status. Only update after the job is done - i.e. status will
+        # transition from None to P/F/K.
+        self.status = None
+
+        # Maintain the job script output as an instance variables for polling
+        # and cleanup.
+        self.job_script_out = None
+
+        # If we already opened the job script output file (but have not
+        # determined the outcome), then we maintain the file descriptor rather
+        # then reopening it and starting all over again on the next poll.
+        self.job_script_out_fd = None
+        self.job_script_out_err_msg = []
+        self.job_script_out_err_msg_found = False
+
+        # Set the job id.
+        self.job_id = None
+
+        # Polling retry counter..
+        self.num_poll_retries = 0
+
+        # Add self to the list of jobs.
+        cfg_dict = LsfLauncher.jobs.setdefault(deploy.sim_cfg, {})
+        job_name_list = cfg_dict.setdefault(deploy.job_name, [])
+        job_name_list.append(self)
+
+        # Job's index in the array.
+        self.index = len(job_name_list)
+
+    @staticmethod
+    def prepare_workspace(project, repo_top, args):
+        '''Overrides Launcher.prepare_workspace.'''
+
+        # Since we dispatch to remote machines, a project specific python
+        # virtualenv is exists, needs to be activated when launching the job.
+        Launcher.set_python_venv(project)
+        if Launcher.python_venv is None:
+            return
+
+        # Python_venv needs to be a valid tarfile. Extract it in the scratch
+        # area if it does not exist. It is upto the user to delete it if it is
+        # stale.
+        if tarfile.is_tarfile(Launcher.python_venv):
+            path = Path(args.scratch_root, Path(Launcher.python_venv).stem)
+            if not path.is_dir():
+                with tarfile.open(Launcher.python_venv, mode='r') as tar:
+                    tar.extractall(path=args.scratch_root)
+            Launcher.python_venv = path
+
+        else:
+            raise LauncherError("{} is not a valid tar file".format(
+                Launcher.python_venv))
+
+    @staticmethod
+    def prepare_workspace_for_cfg(cfg):
+        '''Overrides Launcher.prepare_workspace_for_cfg.'''
+
+        # Create the job dir.
+        LsfLauncher.jobs_dir[cfg] = Path(cfg.scratch_path, "lsf",
+                                         cfg.timestamp)
+        clean_odirs(odir=LsfLauncher.jobs_dir[cfg], max_odirs=2)
+        os.makedirs(Path(LsfLauncher.jobs_dir[cfg]), exist_ok=True)
+
+    @staticmethod
+    def make_job_array_script_text(cfg, job_name):
+        """Creates the job array script text.
+
+        Once all jobs in the array are dispatched, the job array script is
+        constructed. It is a bash script that takes the job index as a single
+        argument. This index is set in the bsub command as '$LSB_JOBINDEX',
+        which bsub sets as the actual index when launching that job in the
+        array. This script is super simple - it is just a giant case statement
+        that switches on the job index to run that specific job. This preferred
+        over creating individual scripts for each job which incurs additional
+        file I/O overhead when the scratch area is on NFS, causing a slowdown.
+
+        Returns an iterable representing the lines of the script.
+        """
+
+        lines = ["#!/usr/bin/env bash\nset -e\n"]
+
+        # Activate the python virtualenv if it exists.
+        if Launcher.python_venv:
+            lines += ["source {}/bin/activate\n".format(Launcher.python_venv)]
+
+        lines += ["case $1 in\n"]
+        for job in LsfLauncher.jobs[cfg][job_name]:
+            # Redirect the job's stdout and stderr to its log file.
+            cmd = "{} > {} 2>&1".format(job.deploy.cmd,
+                                        job.deploy.get_log_path())
+            lines += ["  {})\n".format(job.index), "    {};;\n".format(cmd)]
+
+        # Throw error as a sanity check if the job index is invalid.
+        lines += [
+            "  *)\n",
+            "    echo \"ERROR: Illegal job index: $1\" 1>&2; exit 1;;\n",
+            "esac\n"
+        ]
+        if Launcher.python_venv:
+            lines += ["deactivate\n"]
+        return lines
+
+    def launch(self):
+        self._pre_launch()
+
+        # Add self to the list of jobs.
+        job_name = self.deploy.job_name
+        cfg = self.deploy.sim_cfg
+        job_total = len(LsfLauncher.jobs[cfg][job_name])
+
+        # The actual launching of the bsub command cannot happen until the
+        # Scheduler has dispatched ALL jobs in the array.
+        if self.index < job_total:
+            return
+
+        # Write the job array script.
+        job_script_wo_idx = Path(LsfLauncher.jobs_dir[cfg], job_name)
+        try:
+            with open(job_script_wo_idx, "w", encoding='utf-8') as f:
+                f.writelines(self.make_job_array_script_text(cfg, job_name))
+        except IOError as e:
+            err_msg = "ERROR: Failed to write job script {}:\n{}".format(
+                job_script_wo_idx, e)
+            self._kill_job_array(err_msg)
+            raise LauncherError(err_msg)
+
+        # Update the shell's env vars with self.exports. Values in exports must
+        # replace the values in the shell's env vars if the keys match.
+        exports = os.environ.copy()
+        if self.deploy.exports:
+            exports.update(self.deploy.exports)
+
+        # Clear the magic MAKEFLAGS variable from exports if necessary. This
+        # variable is used by recursive Make calls to pass variables from one
+        # level to the next. Here, self.cmd is a call to Make but it's
+        # logically a top-level invocation: we don't want to pollute the flow's
+        # Makefile with Make variables from any wrapper that called dvsim.
+        if 'MAKEFLAGS' in exports:
+            del exports['MAKEFLAGS']
+
+        self._dump_env_vars(exports)
+
+        # TODO: Arbitrarily set the max slot-limit to 100.
+        job_array = "{}[1-{}]".format(job_name, job_total)
+        if job_total > 100:
+            job_array += "%100"
+
+        # TODO: This needs to be moved to a HJson.
+        if self.deploy.sim_cfg.tool == "vcs":
+            job_rusage = "\'rusage[vcssim=1,vcssim_dynamic=1:duration=1]\'"
+
+        elif self.deploy.sim_cfg.tool == "xcelium":
+            job_rusage = "\'rusage[xcelium=1,xcelium_dynamic=1:duration=1]\'"
+
+        else:
+            job_rusage = None
+
+        # Launch the job array.
+        cmd = [
+            "bsub",
+            # TODO: LSF project name could be site specific!
+            "-P",
+            cfg.project,
+            "-J",
+            job_array,
+            "-oo",
+            "{}.%I.out".format(job_script_wo_idx),
+            "-eo",
+            "{}.%I.out".format(job_script_wo_idx)
+        ]
+
+        if job_rusage:
+            cmd += ["-R", job_rusage]
+
+        cmd.append(
+            shlex.quote(
+                "/usr/bin/bash {} $LSB_JOBINDEX".format(job_script_wo_idx)))
+
+        try:
+            p = subprocess.run(' '.join(cmd),
+                               check=True,
+                               shell=True,
+                               timeout=60,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE,
+                               env=exports)
+        except subprocess.CalledProcessError as e:
+            # Need to mark all jobs in this range with this fail pattern.
+            err_msg = e.stderr.decode("utf-8").strip()
+            self._kill_job_array(err_msg)
+            raise LauncherError(err_msg)
+
+        # Extract the job ID.
+        result = p.stdout.decode("utf-8").strip()
+        job_id = result.split('Job <')[1].split('>')[0]
+        if not job_id:
+            self._kill_job_array("Job ID not found!")
+            raise LauncherError(err_msg)
+
+        for job in LsfLauncher.jobs[cfg][job_name]:
+            job.job_script_out = Path("{}.{}.out".format(
+                job_script_wo_idx, job.index))
+            job.job_id = "{}[{}]".format(job_id, job.index)
+            job._link_odir("D")
+
+    def _finish(self, status, err_msg=None):
+        '''Helper function that sets the status, exit code and err msg.'''
+
+        assert status in ['P', 'F', 'K']
+        if self.job_script_out_fd:
+            self.job_script_out_fd.close()
+        self.status = status
+        if self.exit_code is None:
+            self.exit_code = 0 if status == 'P' else 1
+        if err_msg:
+            self.fail_msg += err_msg
+            log.log(VERBOSE, err_msg)
+        self._post_finish(status)
+        return status
+
+    def poll(self):
+        # It is possible we may have determined the status already.
+        if self.status:
+            return self.status
+
+        if not self.job_script_out_fd:
+            # If job id is not set, the bsub command has not been sent yet.
+            if not self.job_id:
+                return 'D'
+
+            # If the bsub output file is not created, we are still in
+            # dispatched state.
+            if not self.job_script_out.is_file():
+                return "D"
+
+            # We redirect the job's output to the log file, so the job script
+            # output remains empty until the point it finishes. This is a very
+            # quick way to check if the job has completed. If nothing has been
+            # written to the job script output yet, then the job is still
+            # running.
+            if not self.job_script_out.stat().st_size:
+                return "D"
+
+            # If we got to this point,  we can now open the job script output
+            # file for reading.
+            try:
+                self.job_script_out_fd = open(self.job_script_out, "r")
+            except IOError as e:
+                return self._finish(
+                    status="F",
+                    err_msg="ERROR: Failed to open {}\n{}.".format(
+                        self.job_script_out, e))
+
+        # Now that the job has completed, we need to determine its status.
+        #
+        # If the job successfully launched and it failed, the failure message
+        # will appear in its log file (because of the stderr redirection).
+        # But, in some cases, if there is something wrong with the command
+        # itself, bsub might return immediately with an error message, which
+        # will appear in the job script output file. We want to retrieve that
+        # so that we can report the status accurately.
+        #
+        # At this point, we could run bjobs or bhist to determine the status,
+        # but it has been found to be too slow, expecially when running 1000s
+        # of jobs. Plus, we have to read the job script output anyway to look
+        # for those error messages.
+        #
+        # So we just read this file to determine both, the status and extract
+        # the error message, rather than running bjobs or bhist. But there is
+        # one more complication to deal with - if we read the file now, it is
+        # possible that it may not have been fully updated. We will try reading
+        # it anyway. If we are unable to find what we are looking for, then we
+        # will resume reading it again at the next poll. We will do this upto
+        # max_poll_retries times before giving up and flagging an error.
+        #
+        # TODO: Consider using the IBM Plarform LSF Python APIs instead.
+        #       (deferred due to shortage of time / resources).
+        # TODO: Parse job telemetry data for performance insights.
+
+        exit_code = self._get_job_exit_code()
+        if exit_code is not None:
+            self.exit_code = exit_code
+            status = "F" if exit_code else "P" if self._has_passed() else "F"
+            return self._finish(status=status)
+
+        else:
+            self.num_poll_retries += 1
+            # Fail the test if we have reached the max polling retries.
+            if self.num_poll_retries == LsfLauncher.max_poll_retries:
+                return self._finish(status="F",
+                                    err_msg="ERROR: Reached max retries while "
+                                    "reading job script output {} to determine"
+                                    " the outcome.".format(
+                                        self.job_script_out))
+        return "D"
+
+    def _get_job_exit_code(self):
+        '''Read the job script output to retrieve the exit code.
+
+        Also read the error message if any, which will appear at the beginning
+        of the log file followed by bsub's standard 'email' format output. It
+        looks something like this:
+
+            <stderr messages>
+            ------------------------------------------------------------
+            Sender: LSF System <...>
+            Subject: ...
+            ...
+
+            Successfully completed.
+            <OR>
+            Exited with exit code 1.
+
+            ...
+
+        The extracted stderr messages are logged to self.fail_msg. The line
+        indicating whether it was successful or it failed with an exit code
+        is used to return the exit code.
+
+        Returns the exit code if found, else None.
+        '''
+
+        # Job script output must have been opened already.
+        assert self.job_script_out_fd
+
+        for line in self.job_script_out_fd:
+            if not self.job_script_out_err_msg_found:
+                m = re.match("^Sender", line)
+                if m:
+                    self.job_script_out_err_msg = "".join(
+                        self.job_script_out_err_msg[:-1]).strip()
+                    self.job_script_out_err_msg_found = True
+                else:
+                    self.job_script_out_err_msg.append(line)
+
+            else:
+                m = re.match(r"^Exited with exit code (\d+).\n$", line)
+                if m:
+                    self.fail_msg += self.job_script_out_err_msg
+                    return m.group(1)
+
+                if not self.job_script_out_err_msg:
+                    m = re.match(r"^Successfully completed.\n$", line)
+                    if m:
+                        return 0
+        return None
+
+    def _kill_job_array(self, err_msg):
+        '''If there is an LSF error, then kill all jobs in the array this job
+        belongs to.'''
+
+        for job in LsfLauncher.jobs[self.deploy.sim_cfg][self.deploy.job_name]:
+            job._finish("K", err_msg)
+
+    def kill(self):
+        if self.job_id:
+            try:
+                subprocess.run(["bkill", "-s", "SIGTERM", self.job_id],
+                               check=True,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE)
+            except subprocess.CalledProcessError as e:
+                log.error("Failed to kill job: {}".format(
+                    e.stderr.decode("utf-8").strip()))
+        else:
+            log.error("Job ID for %s not found", self.name)
+
+        self._post_finish('K')
diff --git a/util/dvsim/SimCfg.py b/util/dvsim/SimCfg.py
index 941a1fa..363214d 100644
--- a/util/dvsim/SimCfg.py
+++ b/util/dvsim/SimCfg.py
@@ -449,22 +449,16 @@
         tests A, B with reseed values of 5 and 2, respectively, then the list
         will be ABABAAA).
 
-        build_map is either None or a dictionary from build name to a
-        CompileSim object. If None, this means that we're in "run only" mode,
-        so there are no builds involved at all. Otherwise, the build_mode of
-        each appears in the map to signify the test's dependency on its
-        corresponding CompileSim item (test cannot run until it has been
-        compiled).
-
+        build_map is a dictionary mapping a build mode to a CompileSim object.
         '''
         tagged = []
+
         for test in self.run_list:
-            build_job = (build_map[test.build_mode]
-                         if build_map is not None else None)
+            build_job = build_map[test.build_mode]
             for idx in range(test.reseed):
                 tagged.append((idx, RunTest(idx, test, build_job, self)))
 
-        # Stably sort the tagged list by the 1st coordinate
+        # Stably sort the tagged list by the 1st coordinate.
         tagged.sort(key=lambda x: x[0])
 
         # Return the sorted list of RunTest objects, discarding the indices by
@@ -507,13 +501,16 @@
                 test.build_mode = Modes.find_mode(
                     build_map[test.build_mode].name, self.build_modes)
 
-        if self.run_only:
-            self.builds = []
-            build_map = None
-
         self.runs = ([]
                      if self.build_only else self._expand_run_list(build_map))
 
+        # Discard the build_job dependency that was added earlier if --run-only
+        # switch is passed.
+        if self.run_only:
+            self.builds = []
+            for run in self.runs:
+                run.dependencies = []
+
         self.deploy = self.builds + self.runs
 
         # Create cov_merge and cov_report objects, so long as we've got at
@@ -548,7 +545,7 @@
         '''
         # TODO, Only support VCS
         if self.tool not in ['vcs', 'xcelium']:
-            log.error("Currently only support VCS and Xcelium for coverage UNR")    
+            log.error("Currently only support VCS and Xcelium for coverage UNR")
             sys.exit(1)
         # Create initial set of directories, such as dispatched, passed etc.
         self._create_dirs()
diff --git a/util/dvsim/dvsim.py b/util/dvsim/dvsim.py
index 5d10b85..e0261ea 100755
--- a/util/dvsim/dvsim.py
+++ b/util/dvsim/dvsim.py
@@ -29,9 +29,10 @@
 import textwrap
 from pathlib import Path
 
+import Launcher
+import LauncherFactory
 from CfgFactory import make_cfg
 from Deploy import RunTest
-from Launcher import Launcher
 from Scheduler import Scheduler
 from Timer import Timer
 from utils import (TS_FORMAT, TS_FORMAT_LONG, VERBOSE, rm_path,
@@ -312,6 +313,11 @@
                       help=('Prepend this string when running each tool '
                             'command.'))
 
+    disg.add_argument("--local",
+                      action='store_true',
+                      help=('Force jobs to be dispatched locally onto user\'s '
+                            'machine.'))
+
     disg.add_argument("--remote",
                       action='store_true',
                       help=('Trigger copying of the repo to scratch area.'))
@@ -637,7 +643,8 @@
     # Register the common deploy settings.
     Timer.print_interval = args.print_interval
     Scheduler.max_parallel = args.max_parallel
-    Launcher.max_odirs = args.max_odirs
+    Launcher.Launcher.max_odirs = args.max_odirs
+    LauncherFactory.set_launcher_type(args.local)
 
     # Build infrastructure from hjson file and create the list of items to
     # be deployed.