[dvsim] PEP8 fixes in dvsim

These should cause no functional change. Detailed list of code changes:

  - Get rid of 'import *': this defeats static analysis tools. Don't
    do it.

  - Add newline after colon in "if foo: bar"

  - Use 'is' to check for equality with boolean literals

  - Don't catch exceptions when running os.system in Deploy.py: the
    os.system function returns the program's exit code.

  - Delete some variables that are written but not read.

  - Minor whitespace changes (missing blank lines between functions;
    weird indentation; missing space after '#')

  - Delete autogenerated module docstrings (they didn't contain any
    information. Maybe it would be good to have a docstring, but at
    the moment it's just noise).

  - Don't use \ as a line continuation character. Use parentheses if
    necessary.

  - Replace code like "foo" + \ "bar" with just "foo" "bar" (Python
    concatenates adjacent string literals just like C). (I didn't do
    this everywhere, but it happened a lot next to the backslash
    continuations, so I got rid of the unnecessary '+' then).

  - Replace "not foo in bar" with "foo not in bar"

  - Use raw strings for regexes with backslashes (r'a\+', not 'a\+')

With these changes, you can run:

  find util/dvsim -name '*.py' | xargs flake8

and see no errors.

Signed-off-by: Rupert Swarbrick <rswarbrick@lowrisc.org>
diff --git a/util/dvsim/Deploy.py b/util/dvsim/Deploy.py
index 471f3a3..3080297 100644
--- a/util/dvsim/Deploy.py
+++ b/util/dvsim/Deploy.py
@@ -1,24 +1,22 @@
 # Copyright lowRISC contributors.
 # Licensed under the Apache License, Version 2.0, see LICENSE for details.
 # SPDX-License-Identifier: Apache-2.0
-r"""
-Classes
-"""
 
 import logging as log
+import os
 import pprint
 import random
 import re
 import shlex
+import subprocess
 import sys
 import time
 from collections import OrderedDict
 
-import hjson
 from tabulate import tabulate
 
-from sim_utils import *
-from utils import *
+from sim_utils import get_cov_summary_table
+from utils import VERBOSE, find_and_substitute_wildcards, run_cmd
 
 
 class Deploy():
@@ -47,7 +45,8 @@
             return pprint.pformat(self.__dict__)
         else:
             ret = self.cmd
-            if self.sub != []: ret += "\nSub:\n" + str(self.sub)
+            if self.sub != []:
+                ret += "\nSub:\n" + str(self.sub)
             return ret
 
     def __str__(self):
@@ -104,13 +103,13 @@
 
         ddict_keys = ddict.keys()
         for key in self.mandatory_cmd_attrs.keys():
-            if self.mandatory_cmd_attrs[key] == False:
+            if self.mandatory_cmd_attrs[key] is False:
                 if key in ddict_keys:
                     setattr(self, key, ddict[key])
                     self.mandatory_cmd_attrs[key] = True
 
         for key in self.mandatory_misc_attrs.keys():
-            if self.mandatory_misc_attrs[key] == False:
+            if self.mandatory_misc_attrs[key] is False:
                 if key in ddict_keys:
                     setattr(self, key, ddict[key])
                     self.mandatory_misc_attrs[key] = True
@@ -171,7 +170,8 @@
         args = shlex.split(self.cmd)
         try:
             # If renew_odir flag is True - then move it.
-            if self.renew_odir: self.odir_limiter(odir=self.odir)
+            if self.renew_odir:
+                self.odir_limiter(odir=self.odir)
             os.system("mkdir -p " + self.odir)
             # Dump all env variables for ease of debug.
             with open(self.odir + "/env_vars", "w") as f:
@@ -192,7 +192,8 @@
             Deploy.dispatch_counter += 1
         except IOError:
             log.error('IO Error: See %s', self.log)
-            if self.log_fd: self.log_fd.close()
+            if self.log_fd:
+                self.log_fd.close()
             self.status = "K"
 
     def odir_limiter(self, odir, max_odirs=-1):
@@ -233,7 +234,8 @@
                 dirs = dirs.replace('\n', ' ')
                 list_dirs = dirs.split()
                 num_dirs = len(list_dirs)
-                if max_odirs == -1: max_odirs = self.max_odirs
+                if max_odirs == -1:
+                    max_odirs = self.max_odirs
                 num_rm_dirs = num_dirs - max_odirs
                 if num_rm_dirs > -1:
                     rm_dirs = run_cmd(find_cmd +
@@ -276,7 +278,8 @@
                 self.status = "F"
 
             # Return if status is fail - no need to look for pass patterns.
-            if self.status == 'F': return
+            if self.status == 'F':
+                return
 
             # If fail patterns were not found, ensure pass patterns indeed were.
             for pass_pattern in self.pass_patterns:
@@ -292,7 +295,6 @@
 
     # Recursively set sub-item's status if parent item fails
     def set_sub_status(self, status):
-        if self.sub == []: return
         for sub_item in self.sub:
             sub_item.status = status
             sub_item.set_sub_status(status)
@@ -305,13 +307,12 @@
             new_link = self.sim_cfg.links[self.status] + "/" + self.odir_ln
             cmd = "ln -s " + self.odir + " " + new_link + "; "
             cmd += "rm " + old_link
-            try:
-                os.system(cmd)
-            except Exception as e:
+            if os.system(cmd):
                 log.error("Cmd \"%s\" could not be run", cmd)
 
     def get_status(self):
-        if self.status != "D": return
+        if self.status != "D":
+            return
         if self.process.poll() is not None:
             self.log_fd.close()
             self.set_status()
@@ -328,7 +329,8 @@
         if self.status == "D" and self.process.poll() is None:
             self.kill_remote_job()
             self.process.kill()
-            if self.log_fd: self.log_fd.close()
+            if self.log_fd:
+                self.log_fd.close()
             self.status = "K"
         # recurisvely kill sub target
         elif len(self.sub):
@@ -339,7 +341,7 @@
         '''
         If jobs are run in remote server, need to use another command to kill them.
         '''
-        #TODO: Currently only support lsf, may need to add support for GCP later.
+        # TODO: Currently only support lsf, may need to add support for GCP later.
 
         # If use lsf, kill it by job ID.
         if re.match("^bsub", self.sim_cfg.job_prefix):
@@ -350,7 +352,7 @@
             if rslt != "":
                 job_id = rslt.split('Job <')[1].split('>')[0]
                 try:
-                    p = subprocess.run(["bkill", job_id], check=True)
+                    subprocess.run(["bkill", job_id], check=True)
                 except Exception as e:
                     log.error("%s: Failed to run bkill\n", e)
 
@@ -367,8 +369,10 @@
 
         incr_hh = False
         Deploy.ss, incr_mm = _incr_ovf_60(Deploy.ss)
-        if incr_mm: Deploy.mm, incr_hh = _incr_ovf_60(Deploy.mm)
-        if incr_hh: Deploy.hh += 1
+        if incr_mm:
+            Deploy.mm, incr_hh = _incr_ovf_60(Deploy.mm)
+        if incr_hh:
+            Deploy.hh += 1
 
     @staticmethod
     def deploy(items):
@@ -470,7 +474,8 @@
         while not all_done:
             # Get status of dispatched items.
             for item in dispatched_items:
-                if item.status == "D": item.get_status()
+                if item.status == "D":
+                    item.get_status()
                 if item.status != status[item.target][item]:
                     print_status_flag = True
                     if item.status != "D":
@@ -716,7 +721,8 @@
         # first. If --fixed-seed <val> is also passed, the subsequent tests
         # (once the custom seeds are consumed) will be run with the fixed seed.
         if not RunTest.seeds:
-            if RunTest.fixed_seed: return RunTest.fixed_seed
+            if RunTest.fixed_seed:
+                return RunTest.fixed_seed
             for i in range(1000):
                 seed = random.getrandbits(32)
                 RunTest.seeds.append(seed)
diff --git a/util/dvsim/FlowCfg.py b/util/dvsim/FlowCfg.py
index ddda8ae..11967db 100644
--- a/util/dvsim/FlowCfg.py
+++ b/util/dvsim/FlowCfg.py
@@ -1,19 +1,19 @@
 # Copyright lowRISC contributors.
 # Licensed under the Apache License, Version 2.0, see LICENSE for details.
 # SPDX-License-Identifier: Apache-2.0
-r"""
-Class describing a flow configuration object
-"""
 
 import datetime
 import logging as log
+import os
 import pprint
 from shutil import which
+import subprocess
+import sys
 
 import hjson
 
-from Deploy import *
-from utils import *
+from Deploy import Deploy
+from utils import VERBOSE, md_results_to_html, parse_hjson, subst_wildcards
 
 
 # Interface class for extensions.
@@ -148,8 +148,7 @@
         # This is a master cfg only if it has a single key called "use_cfgs"
         # which contains a list of actual flow cfgs.
         hjson_cfg_dict_keys = hjson_dict.keys()
-        return ("use_cfgs" in hjson_cfg_dict_keys and \
-                type(hjson_dict["use_cfgs"]) is list)
+        return ("use_cfgs" in hjson_cfg_dict_keys and type(hjson_dict["use_cfgs"]) is list)
 
     def resolve_hjson_raw(self, hjson_dict):
         attrs = self.__dict__.keys()
@@ -176,10 +175,10 @@
                     defaults = scalar_types[type(hjson_dict_val)]
                     if self_val == hjson_dict_val:
                         rm_hjson_dict_keys.append(key)
-                    elif self_val in defaults and not hjson_dict_val in defaults:
+                    elif self_val in defaults and hjson_dict_val not in defaults:
                         setattr(self, key, hjson_dict_val)
                         rm_hjson_dict_keys.append(key)
-                    elif not self_val in defaults and not hjson_dict_val in defaults:
+                    elif self_val not in defaults and hjson_dict_val not in defaults:
                         # check if key exists in command line args, use that, or
                         # throw conflicting error
                         # TODO, may throw the conflicting error but choose one and proceed rather
@@ -236,7 +235,7 @@
 
         # Parse imported cfgs
         for cfg_file in import_cfgs:
-            if not cfg_file in self.imported_cfg_files:
+            if cfg_file not in self.imported_cfg_files:
                 self.imported_cfg_files.append(cfg_file)
                 # Substitute wildcards in cfg_file files since we need to process
                 # them right away.
@@ -262,7 +261,8 @@
                 elif type(entry) is dict:
                     # Treat this as a cfg expanded in-line
                     temp_cfg_file = self._conv_inline_cfg_to_hjson(entry)
-                    if not temp_cfg_file: continue
+                    if not temp_cfg_file:
+                        continue
                     self.cfgs.append(
                         self.create_instance(temp_cfg_file, self.proj_root,
                                              self.args))
@@ -292,14 +292,14 @@
 
         name = idict["name"] if "name" in idict.keys() else None
         if not name:
-            log.error(
-                "In-line entry in use_cfgs list does not contain " + \
-                "a \"name\" key (will be skipped!):\n%s", idict)
+            log.error("In-line entry in use_cfgs list does not contain "
+                      "a \"name\" key (will be skipped!):\n%s",
+                      idict)
             return None
 
         # Check if temp cfg file already exists
-        temp_cfg_file = self.scratch_root + "/." + self.branch + "__" + \
-                        name + "_cfg.hjson"
+        temp_cfg_file = (self.scratch_root + "/." + self.branch + "__" +
+                         name + "_cfg.hjson")
 
         # Create the file and dump the dict as hjson
         log.log(VERBOSE, "Dumping inline cfg \"%s\" in hjson to:\n%s", name,
@@ -308,9 +308,9 @@
             with open(temp_cfg_file, "w") as f:
                 f.write(hjson.dumps(idict, for_json=True))
         except Exception as e:
-            log.error(
-                "Failed to hjson-dump temp cfg file\"%s\" for \"%s\"" + \
-                "(will be skipped!) due to:\n%s", temp_cfg_file, name, e)
+            log.error("Failed to hjson-dump temp cfg file\"%s\" for \"%s\""
+                      "(will be skipped!) due to:\n%s",
+                      temp_cfg_file, name, e)
             return None
 
         # Return the temp cfg file created
@@ -331,8 +331,7 @@
 
             # Process override one by one
             for item in overrides:
-                if type(item) is dict and set(item.keys()) == set(
-                    ["name", "value"]):
+                if type(item) is dict and set(item.keys()) == {"name", "value"}:
                     ov_name = item["name"]
                     ov_value = item["value"]
                     if ov_name not in overrides_dict.keys():
@@ -344,7 +343,7 @@
                             ov_name, overrides_dict[ov_name], ov_value)
                         sys.exit(1)
                 else:
-                    log.error("\"overrides\" is a list of dicts with {\"name\": <name>, " + \
+                    log.error("\"overrides\" is a list of dicts with {\"name\": <name>, "
                               "\"value\": <value>} pairs. Found this instead:\n%s",
                               str(item))
                     sys.exit(1)
@@ -358,7 +357,7 @@
                           ov_name, orig_value, ov_value)
                 setattr(self, ov_name, ov_value)
             else:
-                log.error("The type of override value \"%s\" for \"%s\" mismatches " + \
+                log.error("The type of override value \"%s\" for \"%s\" mismatches "
                           "the type of original value \"%s\"",
                           ov_value, ov_name, orig_value)
                 sys.exit(1)
@@ -375,8 +374,10 @@
                     exports_dict.update(item)
                 elif type(item) is str:
                     [key, value] = item.split(':', 1)
-                    if type(key) is not str: key = str(key)
-                    if type(value) is not str: value = str(value)
+                    if type(key) is not str:
+                        key = str(key)
+                    if type(value) is not str:
+                        value = str(value)
                     exports_dict.update({key.strip(): value.strip()})
                 else:
                     log.error("Type error in \"exports\": %s", str(item))
@@ -460,7 +461,8 @@
             results.append(result)
             self.errors_seen |= item.errors_seen
 
-        if self.is_master_cfg: self.gen_results_summary()
+        if self.is_master_cfg:
+            self.gen_results_summary()
         self.gen_email_html_summary()
 
     def gen_results_summary(self):
@@ -469,7 +471,8 @@
         return
 
     def _get_results_page_link(self, link_text):
-        if not self.args.publish: return link_text
+        if not self.args.publish:
+            return link_text
         results_page_url = self.results_server_page.replace(
             self.results_server_prefix, self.results_server_url_prefix)
         return "[%s](%s)" % (link_text, results_page_url)
@@ -543,8 +546,8 @@
                 old_results_ts = ts.strftime(tf)
 
             old_results_dir = self.results_server_path + "/" + old_results_ts
-            cmd = self.results_server_cmd + " mv " + self.results_server_dir + \
-                  " " + old_results_dir
+            cmd = (self.results_server_cmd + " mv " + self.results_server_dir +
+                   " " + old_results_dir)
             log.log(VERBOSE, cmd)
             cmd_output = subprocess.run(cmd,
                                         shell=True,
@@ -579,7 +582,8 @@
         for rdir in results_dirs:
             dirname = rdir.replace(self.results_server_path, '')
             dirname = dirname.replace('/', '')
-            if dirname == "latest": continue
+            if dirname == "latest":
+                continue
             rdirs.append(dirname)
         rdirs.sort(reverse=True)
 
@@ -614,8 +618,8 @@
         rm_cmd += "/bin/rm -rf " + results_html_file + "; "
 
         log.info("Publishing results to %s", results_page_url)
-        cmd = self.results_server_cmd + " cp " + results_html_file + " " + \
-              self.results_server_page + "; " + rm_cmd
+        cmd = (self.results_server_cmd + " cp " + results_html_file + " " +
+               self.results_server_page + "; " + rm_cmd)
         log.log(VERBOSE, cmd)
         try:
             cmd_output = subprocess.run(args=cmd,
@@ -632,7 +636,8 @@
         for item in self.cfgs:
             item._publish_results()
 
-        if self.is_master_cfg: self.publish_results_summary()
+        if self.is_master_cfg:
+            self.publish_results_summary()
 
     def publish_results_summary(self):
         '''Public facing API for publishing md format results to the opentitan web server.
@@ -652,8 +657,8 @@
         rm_cmd = "/bin/rm -rf " + results_html_file + "; "
 
         log.info("Publishing results summary to %s", results_page_url)
-        cmd = self.results_server_cmd + " cp " + results_html_file + " " + \
-              self.results_summary_server_page + "; " + rm_cmd
+        cmd = (self.results_server_cmd + " cp " + results_html_file + " " +
+               self.results_summary_server_page + "; " + rm_cmd)
         log.log(VERBOSE, cmd)
         try:
             cmd_output = subprocess.run(args=cmd,
diff --git a/util/dvsim/LintCfg.py b/util/dvsim/LintCfg.py
index cac0867..3184aab 100644
--- a/util/dvsim/LintCfg.py
+++ b/util/dvsim/LintCfg.py
@@ -5,16 +5,14 @@
 Class describing lint configuration object
 """
 
+import hjson
 import logging as log
-import sys
 from pathlib import Path
 
 from tabulate import tabulate
 
-from Deploy import *
-from Modes import *
 from OneShotCfg import OneShotCfg
-from utils import *
+from utils import subst_wildcards
 
 
 # helper function for printing messages
@@ -24,7 +22,6 @@
         md_results += "### %s\n" % msg_list_name
         md_results += "```\n"
         for msg in msg_list:
-            msg_parts = msg.split()
             md_results += msg + "\n\n"
         md_results += "```\n"
     return md_results
@@ -209,7 +206,7 @@
                                              self.result["lint_errors"])
                 fail_msgs += _print_msg_list("Lint Warnings",
                                              self.result["lint_warnings"])
-                #fail_msgs += _print_msg_list("Lint Infos", results["lint_infos"])
+                # fail_msgs += _print_msg_list("Lint Infos", results["lint_infos"])
 
         if len(table) > 1:
             self.results_md = results_str + tabulate(
diff --git a/util/dvsim/Modes.py b/util/dvsim/Modes.py
index e553aca..0432702 100644
--- a/util/dvsim/Modes.py
+++ b/util/dvsim/Modes.py
@@ -1,18 +1,12 @@
 # Copyright lowRISC contributors.
 # Licensed under the Apache License, Version 2.0, see LICENSE for details.
 # SPDX-License-Identifier: Apache-2.0
-r"""
-Classes
-"""
 
 import logging as log
 import pprint
-import re
 import sys
 
-import hjson
-
-from utils import *
+from utils import VERBOSE
 
 
 class Modes():
@@ -25,8 +19,10 @@
         This is used to construct the string representation of the entire class object.
         '''
         tname = ""
-        if self.type != "": tname = self.type + "_"
-        if self.mname != "": tname += self.mname
+        if self.type != "":
+            tname = self.type + "_"
+        if self.mname != "":
+            tname += self.mname
         if log.getLogger().isEnabledFor(VERBOSE):
             return "\n<---" + tname + ":\n" + pprint.pformat(self.__dict__) + \
                    "\n--->\n"
@@ -43,7 +39,7 @@
         keys = mdict.keys()
         attrs = self.__dict__.keys()
 
-        if not 'name' in keys:
+        if 'name' not in keys:
             log.error("Key \"name\" missing in mode %s", mdict)
             sys.exit(1)
 
@@ -51,7 +47,8 @@
             log.fatal("Key \"type\" is missing or invalid")
             sys.exit(1)
 
-        if not hasattr(self, "mname"): self.mname = ""
+        if not hasattr(self, "mname"):
+            self.mname = ""
 
         for key in keys:
             if key not in attrs:
@@ -100,14 +97,15 @@
         if sub_modes != []:
             new_sub_modes = []
             for sub_mode in sub_modes:
-                if not self.name == sub_mode and not sub_mode in new_sub_modes:
+                if self.name != sub_mode and sub_mode not in new_sub_modes:
                     new_sub_modes.append(sub_mode)
             self.set_sub_modes(new_sub_modes)
         return True
 
     def check_conflict(self, name, attr, mode_attr_val):
         self_attr_val = getattr(self, attr)
-        if self_attr_val == mode_attr_val: return
+        if self_attr_val == mode_attr_val:
+            return
 
         default_val = None
         if type(self_attr_val) is int:
@@ -134,7 +132,8 @@
         def merge_sub_modes(mode, parent, objs):
             # Check if there are modes available to merge
             sub_modes = mode.get_sub_modes()
-            if sub_modes == []: return
+            if sub_modes == []:
+                return
 
             # Set parent if it is None. If not, check cyclic dependency
             if parent is None:
@@ -166,7 +165,8 @@
         modes_objs = []
         # create a default mode if available
         default_mode = ModeType.get_default_mode()
-        if default_mode is not None: modes_objs.append(default_mode)
+        if default_mode is not None:
+            modes_objs.append(default_mode)
 
         # Process list of raw dicts that represent the modes
         # Pass 1: Create unique set of modes by merging modes with the same name
@@ -203,7 +203,6 @@
         Given a mode_name in string, go through list of modes and return the mode with
         the string that matches. Thrown an error and return None if nothing was found.
         '''
-        found = False
         for mode in modes:
             if mode_name == mode.name:
                 return mode
@@ -218,7 +217,8 @@
             sub_mode = Modes.find_mode(mode_name, modes)
             if sub_mode is not None:
                 found_mode_objs.append(sub_mode)
-                if merge_modes is True: mode.merge_mode(sub_mode)
+                if merge_modes is True:
+                    mode.merge_mode(sub_mode)
             else:
                 log.error("Mode \"%s\" enabled within mode \"%s\" not found!",
                           mode_name, mode.name)
@@ -237,7 +237,8 @@
     def __init__(self, bdict):
         self.name = ""
         self.type = "build"
-        if not hasattr(self, "mname"): self.mname = "mode"
+        if not hasattr(self, "mname"):
+            self.mname = "mode"
         self.is_sim_mode = 0
         self.build_opts = []
         self.run_opts = []
@@ -262,7 +263,8 @@
     def __init__(self, rdict):
         self.name = ""
         self.type = "run"
-        if not hasattr(self, "mname"): self.mname = "mode"
+        if not hasattr(self, "mname"):
+            self.mname = "mode"
         self.reseed = -1
         self.run_opts = []
         self.uvm_test = ""
@@ -302,7 +304,8 @@
     }
 
     def __init__(self, tdict):
-        if not hasattr(self, "mname"): self.mname = "test"
+        if not hasattr(self, "mname"):
+            self.mname = "test"
         super().__init__(tdict)
 
     @staticmethod
@@ -413,7 +416,8 @@
     def __init__(self, regdict):
         self.name = ""
         self.type = ""
-        if not hasattr(self, "mname"): self.mname = "regression"
+        if not hasattr(self, "mname"):
+            self.mname = "regression"
         self.tests = []
         self.reseed = -1
         self.test_names = []
@@ -440,7 +444,7 @@
 
             # Check for name conflicts with tests before merging
             if new_regression.name in Tests.item_names:
-                log.error("Test names and regression names are required to be unique. " + \
+                log.error("Test names and regression names are required to be unique. "
                           "The regression \"%s\" bears the same name with an existing test. ",
                           new_regression.name)
                 sys.exit(1)
@@ -484,7 +488,7 @@
                 # Throw an error and exit.
                 for sim_mode_obj_sub in sim_mode_obj.en_build_modes:
                     if sim_mode_obj_sub in regression_obj.en_sim_modes:
-                        log.error("Regression \"%s\" enables sim_modes \"%s\" and \"%s\". " + \
+                        log.error("Regression \"%s\" enables sim_modes \"%s\" and \"%s\". "
                                   "The former is already a sub_mode of the latter.",
                                   regression_obj.name, sim_mode_obj_sub, sim_mode_obj.name)
                         sys.exit(1)
diff --git a/util/dvsim/OneShotCfg.py b/util/dvsim/OneShotCfg.py
index a1e44b0..a82db23 100644
--- a/util/dvsim/OneShotCfg.py
+++ b/util/dvsim/OneShotCfg.py
@@ -6,13 +6,14 @@
 """
 
 import logging as log
+import os
 import sys
 from collections import OrderedDict
 
-from Deploy import *
+from Deploy import CompileOneShot
 from FlowCfg import FlowCfg
-from Modes import *
-from utils import *
+from Modes import BuildModes, Modes
+from utils import find_and_substitute_wildcards
 
 
 class OneShotCfg(FlowCfg):
diff --git a/util/dvsim/SimCfg.py b/util/dvsim/SimCfg.py
index 47d9bf9..7d43d9a 100644
--- a/util/dvsim/SimCfg.py
+++ b/util/dvsim/SimCfg.py
@@ -5,15 +5,19 @@
 Class describing simulation configuration object
 """
 
-import logging as log
+import os
+import subprocess
 import sys
 from collections import OrderedDict
 
-from Deploy import *
+import logging as log
+from tabulate import tabulate
+
+from Deploy import CompileSim, CovAnalyze, CovMerge, CovReport, RunTest, Deploy
 from FlowCfg import FlowCfg
-from Modes import *
-from testplanner import class_defs, testplan_utils
-from utils import *
+from Modes import BuildModes, Modes, Regressions, RunModes, Tests
+from testplanner import testplan_utils
+from utils import VERBOSE, find_and_substitute_wildcards
 
 
 class SimCfg(FlowCfg):
@@ -53,13 +57,18 @@
         self.map_full_testplan = args.map_full_testplan
 
         # Disable cov if --build-only is passed.
-        if self.build_only: self.cov = False
+        if self.build_only:
+            self.cov = False
 
         # Set default sim modes for unpacking
-        if self.waves is True: self.en_build_modes.append("waves")
-        if self.cov is True: self.en_build_modes.append("cov")
-        if self.profile != 'none': self.en_build_modes.append("profile")
-        if self.xprop_off is not True: self.en_build_modes.append("xprop")
+        if self.waves is True:
+            self.en_build_modes.append("waves")
+        if self.cov is True:
+            self.en_build_modes.append("cov")
+        if self.profile != 'none':
+            self.en_build_modes.append("profile")
+        if self.xprop_off is not True:
+            self.en_build_modes.append("xprop")
 
         # Options built from cfg_file files
         self.project = ""
@@ -254,7 +263,8 @@
         def prune_items(items, marked_items):
             pruned_items = []
             for item in items:
-                if item not in marked_items: pruned_items.append(item)
+                if item not in marked_items:
+                    pruned_items.append(item)
             return pruned_items
 
         # Check if there are items to run
@@ -431,7 +441,8 @@
         # TODO: add support for html
         def retrieve_result(name, results):
             for item in results:
-                if name == item["name"]: return item
+                if name == item["name"]:
+                    return item
             return None
 
         def gen_results_sub(items, results, fail_msgs):
@@ -443,7 +454,6 @@
             This list of dicts is directly consumed by the Testplan::results_table
             method for testplan mapping / annotation.
             '''
-            if items == []: return (results, fail_msgs)
             for item in items:
                 if item.status == "F":
                     fail_msgs += item.fail_msg
@@ -454,7 +464,8 @@
                     if result is None:
                         result = {"name": item.name, "passing": 0, "total": 0}
                         results.append(result)
-                    if item.status == "P": result["passing"] += 1
+                    if item.status == "P":
+                        result["passing"] += 1
                     result["total"] += 1
                 (results, fail_msgs) = gen_results_sub(item.sub, results,
                                                        fail_msgs)
@@ -463,7 +474,8 @@
         regr_results = []
         fail_msgs = ""
         deployed_items = self.deploy
-        if self.cov: deployed_items.append(self.cov_merge_deploy)
+        if self.cov:
+            deployed_items.append(self.cov_merge_deploy)
         (regr_results, fail_msgs) = gen_results_sub(deployed_items,
                                                     regr_results, fail_msgs)
 
@@ -536,14 +548,16 @@
 
         # sim summary result has 5 columns from each SimCfg.results_summary
         header = ["Name", "Passing", "Total", "Pass Rate"]
-        if self.cov: header.append('Coverage')
+        if self.cov:
+            header.append('Coverage')
         table = [header]
         colalign = ("center", ) * len(header)
         for item in self.cfgs:
             row = []
             for title in item.results_summary:
                 row.append(item.results_summary[title])
-            if row == []: continue
+            if row == []:
+                continue
             table.append(row)
         self.results_summary_md = "## " + self.results_title + " (Summary)\n"
         self.results_summary_md += "### " + self.timestamp_long + "\n"
@@ -564,8 +578,8 @@
 
             log.info("Publishing coverage results to %s",
                      results_server_dir_url)
-            cmd = self.results_server_cmd + " -m cp -R " + \
-                  self.cov_report_deploy.cov_report_dir + " " + self.results_server_dir
+            cmd = (self.results_server_cmd + " -m cp -R " +
+                   self.cov_report_deploy.cov_report_dir + " " + self.results_server_dir)
             try:
                 cmd_output = subprocess.run(args=cmd,
                                             shell=True,
diff --git a/util/dvsim/SynCfg.py b/util/dvsim/SynCfg.py
index 7479fd6..a901a4d 100644
--- a/util/dvsim/SynCfg.py
+++ b/util/dvsim/SynCfg.py
@@ -6,15 +6,13 @@
 """
 
 import logging as log
-import sys
 from pathlib import Path
 
+import hjson
 from tabulate import tabulate
 
-from Deploy import *
-from Modes import *
 from OneShotCfg import OneShotCfg
-from utils import *
+from utils import subst_wildcards
 
 
 class SynCfg(OneShotCfg):
@@ -243,7 +241,8 @@
 
                     # go through submodules
                     for name in self.result["area"]["instances"].keys():
-                        if name == self.result["top"]: continue
+                        if name == self.result["top"]:
+                            continue
                         row = [name]
                         for field in ["comb", "buf", "reg", "macro", "total"]:
                             row += [
@@ -329,11 +328,11 @@
 
                     total_power = sum(power)
 
-                    row = [_create_entry(power[0], 1.0E-3) + " / " + \
+                    row = [_create_entry(power[0], 1.0E-3) + " / " +
                            _create_entry(power[0], 1.0E-3, total_power),
-                           _create_entry(power[1], 1.0E-3) + " / " + \
+                           _create_entry(power[1], 1.0E-3) + " / " +
                            _create_entry(power[1], 1.0E-3, total_power),
-                           _create_entry(power[2], 1.0E-3) + " / " + \
+                           _create_entry(power[2], 1.0E-3) + " / " +
                            _create_entry(power[2], 1.0E-3, total_power),
                            _create_entry(total_power, 1.0E-3)]
 
diff --git a/util/dvsim/dvsim.py b/util/dvsim/dvsim.py
index 39064ab..0cdfc89 100755
--- a/util/dvsim/dvsim.py
+++ b/util/dvsim/dvsim.py
@@ -18,7 +18,6 @@
 import os
 import subprocess
 import sys
-from pathlib import Path
 from signal import SIGINT, signal
 
 import Deploy
@@ -84,6 +83,7 @@
             arg_branch = "default"
     return (arg_branch)
 
+
 # Get the project root directory path - this is used to construct the full paths
 def get_proj_root():
     cmd = ["git", "rev-parse", "--show-toplevel"]
diff --git a/util/dvsim/sim_utils.py b/util/dvsim/sim_utils.py
index 3a50536..9408778 100644
--- a/util/dvsim/sim_utils.py
+++ b/util/dvsim/sim_utils.py
@@ -66,13 +66,15 @@
             values = []
             cov_total = None
             for metric in items.keys():
-                if items[metric]['total'] == 0: values.append("-- %")
+                if items[metric]['total'] == 0:
+                    values.append("-- %")
                 else:
                     value = items[metric]['covered'] / items[metric][
                         'total'] * 100
                     value = "{0:.2f} %".format(round(value, 2))
                     values.append(value)
-                    if metric == 'Score': cov_total = value
+                    if metric == 'Score':
+                        cov_total = value
             return [metrics, values], cov_total, None
 
     # If we reached here, then we were unable to extract the coverage.
diff --git a/util/dvsim/testplanner.py b/util/dvsim/testplanner.py
index b93eaf5..df55ae3 100755
--- a/util/dvsim/testplanner.py
+++ b/util/dvsim/testplanner.py
@@ -6,12 +6,7 @@
 
 """
 import argparse
-import logging as log
-import os
 import sys
-from pathlib import PurePath
-
-import hjson
 
 from testplanner import testplan_utils
 
diff --git a/util/dvsim/testplanner/class_defs.py b/util/dvsim/testplanner/class_defs.py
index a7c9eed..63bf4a8 100644
--- a/util/dvsim/testplanner/class_defs.py
+++ b/util/dvsim/testplanner/class_defs.py
@@ -32,7 +32,8 @@
         self.desc = desc
         self.milestone = milestone
         self.tests = tests
-        if not self.do_substitutions(substitutions): sys.exit(1)
+        if not self.do_substitutions(substitutions):
+            sys.exit(1)
 
     @staticmethod
     def is_valid_entry(kv_pairs):
@@ -40,7 +41,7 @@
         from it.
         '''
         for field in TestplanEntry.fields:
-            if not field in kv_pairs.keys():
+            if field not in kv_pairs.keys():
                 print(
                     "Error: input key-value pairs does not contain all of the ",
                     "required fields to create an entry:\n", kv_pairs,
@@ -65,7 +66,8 @@
         key=value pairs provided by the substitutions arg. If wildcards are present but no
         replacement is available, then the wildcards are replaced with an empty string.
         '''
-        if substitutions == []: return True
+        if substitutions == []:
+            return True
         for kv_pair in substitutions:
             resolved_tests = []
             [(k, v)] = kv_pair.items()
@@ -98,7 +100,8 @@
                                 return False
                 else:
                     resolved_tests.append(test)
-            if resolved_tests != []: self.tests = resolved_tests
+            if resolved_tests != []:
+                self.tests = resolved_tests
 
         # if wildcards have no available replacements in substitutions arg, then
         # replace with empty string
@@ -108,7 +111,8 @@
             if len(match) > 0:
                 for item in match:
                     resolved_tests.append(test.replace("{" + item + "}", ""))
-        if resolved_tests != []: self.tests = resolved_tests
+        if resolved_tests != []:
+            self.tests = resolved_tests
         return True
 
     def map_regr_results(self, regr_results, map_full_testplan=True):
@@ -183,7 +187,8 @@
     def add_entry(self, entry):
         '''add a new entry into the testplan
         '''
-        if self.entry_exists(entry): sys.exit(1)
+        if self.entry_exists(entry):
+            sys.exit(1)
         self.entries.append(entry)
 
     def sort(self):
@@ -226,7 +231,8 @@
                                            "passing": 0,
                                            "total": 0
                                        }])
-            if ms != "N.A.": totals[ms].tests = []
+            if ms != "N.A.":
+                totals[ms].tests = []
 
         for entry in self.entries:
             regr_results = entry.map_regr_results(regr_results,
@@ -236,7 +242,7 @@
         # extract unmapped tests from regr_results and create 'unmapped' entry
         unmapped_regr_results = []
         for regr_result in regr_results:
-            if not "mapped" in regr_result.keys():
+            if "mapped" not in regr_result.keys():
                 unmapped_regr_results.append(regr_result)
 
         unmapped = TestplanEntry(
@@ -265,7 +271,8 @@
         regressions = {}
         for entry in self.entries:
             # Skip if milestone is "n.a."
-            if entry.milestone not in entry.milestones[1:]: continue
+            if entry.milestone not in entry.milestones[1:]:
+                continue
             # if ms key doesnt exist, create one
             if entry.milestone not in regressions.keys():
                 regressions[entry.milestone] = []
@@ -314,10 +321,13 @@
         for entry in self.entries:
             milestone = entry.milestone
             entry_name = entry.name
-            if milestone == "N.A.": milestone = ""
-            if entry_name == "N.A.": entry_name = ""
+            if milestone == "N.A.":
+                milestone = ""
+            if entry_name == "N.A.":
+                entry_name = ""
             for test in entry.tests:
-                if test["total"] == 0: pass_rate = "-- %"
+                if test["total"] == 0:
+                    pass_rate = "-- %"
                 else:
                     pass_rate = test["passing"] / test["total"] * 100
                     pass_rate = "{0:.2f} %".format(round(pass_rate, 2))
diff --git a/util/dvsim/testplanner/testplan_utils.py b/util/dvsim/testplanner/testplan_utils.py
index baa57aa..34a3c6e 100644
--- a/util/dvsim/testplanner/testplan_utils.py
+++ b/util/dvsim/testplanner/testplan_utils.py
@@ -5,16 +5,14 @@
     The data structure is used for expansion inline within DV plan documentation
     as well as for annotating the regression results.
 """
-import logging as log
 import os
 import sys
-from pathlib import PurePath
 
 import hjson
 import mistletoe
 from tabulate import tabulate
 
-from .class_defs import *
+from .class_defs import Testplan, TestplanEntry
 
 
 def parse_testplan(filename):
@@ -30,7 +28,8 @@
         if key == "import_testplans":
             imported_testplans = obj[key]
         elif key != "entries":
-            if key == "name": name = obj[key]
+            if key == "name":
+                name = obj[key]
             substitutions.append({key: obj[key]})
     for imported_testplan in imported_testplans:
         obj = merge_dicts(
@@ -38,7 +37,8 @@
 
     testplan = Testplan(name=name)
     for entry in obj["entries"]:
-        if not TestplanEntry.is_valid_entry(entry): sys.exit(1)
+        if not TestplanEntry.is_valid_entry(entry):
+            sys.exit(1)
         testplan_entry = TestplanEntry(name=entry["name"],
                                        desc=entry["desc"],
                                        milestone=entry["milestone"],
@@ -110,7 +110,7 @@
 def parse_regr_results(filename):
     obj = parse_hjson(filename)
     # TODO need additional syntax checks
-    if not "test_results" in obj.keys():
+    if "test_results" not in obj.keys():
         print("Error: key \'test_results\' not found")
         sys, exit(1)
     return obj
diff --git a/util/dvsim/utils.py b/util/dvsim/utils.py
index 2d734be..85c1ca0 100644
--- a/util/dvsim/utils.py
+++ b/util/dvsim/utils.py
@@ -7,7 +7,6 @@
 
 import logging as log
 import os
-import pprint
 import re
 import shlex
 import subprocess
@@ -63,7 +62,8 @@
 
     if status != 0:
         log.error("cmd \"%s\" exited with status %d", cmd, status)
-        if exit_on_failure == 1: sys.exit(status)
+        if exit_on_failure == 1:
+            sys.exit(status)
 
     return (result, status)
 
@@ -90,8 +90,10 @@
     If var has wildcards specified within {..}, find and substitute them.
     '''
     def subst(wildcard, mdict):
-        if wildcard in mdict.keys(): return mdict[wildcard]
-        else: return None
+        if wildcard in mdict.keys():
+            return mdict[wildcard]
+        else:
+            return None
 
     if "{eval_cmd}" in var:
         idx = var.find("{eval_cmd}") + 11
@@ -133,7 +135,8 @@
                     else:
                         # Check if the wildcard exists as an environment variable
                         env_var = os.environ.get(item)
-                        if env_var is not None: subst_list[item] = env_var
+                        if env_var is not None:
+                            subst_list[item] = env_var
                         elif not ignore_error:
                             log.error(
                                 "Substitution for the wildcard \"%s\" not found",
@@ -254,7 +257,7 @@
     def color_cell(cell, cclass, indicator="%"):
         op = cell.replace("<td", "<td class=\"" + cclass + "\"")
         # Remove the indicator.
-        op = re.sub(r"\s*" + indicator + "\s*", "", op)
+        op = re.sub(r"\s*" + indicator + r"\s*", "", op)
         return op
 
     # List of 'not applicable' identifiers.
@@ -262,12 +265,11 @@
     na_list_patterns = '|'.join(na_list)
 
     # List of floating point patterns: '0', '0.0' & '.0'
-    fp_patterns = "[\+\-]?\d+\.?\d*"
+    fp_patterns = r"[\+\-]?\d+\.?\d*"
 
     patterns = fp_patterns + '|' + na_list_patterns
     indicators = "%|%u|G|B|E|W|EN|WN"
-    match = re.findall(
-        r"(<td.*>\s*(" + patterns + ")\s+(" + indicators + ")\s*</td>)", text)
+    match = re.findall(r"(<td.*>\s*(" + patterns + r")\s+(" + indicators + r")\s*</td>)", text)
     if len(match) > 0:
         subst_list = {}
         fp_nums = []
@@ -278,20 +280,23 @@
             fp_num = item[1]
             indicator = item[2]
             # Skip if fp_num is already processed.
-            if (fp_num, indicator) in fp_nums: continue
+            if (fp_num, indicator) in fp_nums:
+                continue
             fp_nums.append((fp_num, indicator))
-            if fp_num in na_list: subst = color_cell(cell, "cna", indicator)
+            if fp_num in na_list:
+                subst = color_cell(cell, "cna", indicator)
             else:
                 # Item is a fp num.
                 try:
                     fp = float(fp_num)
                 except ValueError:
-                    log.error("Percentage item \"%s\" in cell \"%s\" is not an " + \
+                    log.error("Percentage item \"%s\" in cell \"%s\" is not an "
                               "integer or a floating point number", fp_num, cell)
                     continue
                 # Percentage, colored.
                 if indicator == "%":
-                    if fp >= 0.0 and fp < 10.0: subst = color_cell(cell, "c0")
+                    if fp >= 0.0 and fp < 10.0:
+                        subst = color_cell(cell, "c0")
                     elif fp >= 10.0 and fp < 20.0:
                         subst = color_cell(cell, "c1")
                     elif fp >= 20.0 and fp < 30.0: