[dvsim tool] "use_cfgs" usecase enhancement

- expanded "use_cfgs" to list hjson file targets or complete hjson dicts
inline
- this will allow specification of simpler, repeatitive cfgs such as
lint a lot more concisely

Signed-off-by: Srikrishna Iyer <sriyer@google.com>
diff --git a/hw/data/common_project_cfg.hjson b/hw/data/common_project_cfg.hjson
new file mode 100644
index 0000000..f7160a9
--- /dev/null
+++ b/hw/data/common_project_cfg.hjson
@@ -0,0 +1,29 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+  project:          opentitan
+  doc_server:       docs.opentitan.org
+  results_server:   reports.opentitan.org
+
+  // Default directory structure for the output
+  scratch_base_path:  "{scratch_root}/{dut}.{flow}.{tool}"
+  scratch_path:       "{scratch_base_path}/{branch}"
+  tool_srcs_dir:      "{scratch_path}/{tool}"
+
+  // Results server stuff - indicate what command to use to copy over the results.
+  // Workaround for gsutil to fall back to using python2.7.
+  results_server_prefix:      "gs://"
+  results_server_url_prefix:  "https://"
+  results_server_cmd:         "CLOUDSDK_PYTHON=/usr/bin/python2.7 /usr/bin/gsutil"
+  results_server_css_path:    "{results_server_url_prefix}{results_server}/css/style.css"
+
+  results_server_path: "{results_server_prefix}{results_server}/{rel_path}"
+  results_server_dir:  "{results_server_path}/latest"
+
+  results_server_html: "results.html"
+  results_server_page: "{results_server_dir}/{results_server_html}"
+
+  results_summary_server_html: "summary.html"
+  results_summary_server_page: "{results_server_path}/{results_summary_server_html}"
+}
diff --git a/hw/dv/data/common_sim_cfg.hjson b/hw/dv/data/common_sim_cfg.hjson
index 9fc0380..54501c6 100644
--- a/hw/dv/data/common_sim_cfg.hjson
+++ b/hw/dv/data/common_sim_cfg.hjson
@@ -9,15 +9,13 @@
   flow:             sim
   flow_makefile:    "{proj_root}/hw/dv/data/sim.mk"
 
-  import_cfgs:      ["{proj_root}/hw/dv/data/common_modes.hjson",
+  import_cfgs:      ["{proj_root}/hw/data/common_project_cfg.hjson",
+                     "{proj_root}/hw/dv/data/common_modes.hjson",
                      "{proj_root}/hw/dv/data/fusesoc.hjson",
                      "{proj_root}/hw/dv/data/gen_ral_pkg.hjson",
                      "{proj_root}/hw/dv/data/{tool}/{tool}.hjson"]
 
   // Default directory structure for the output
-  scratch_base_path:  "{scratch_root}/{dut}.{flow}.{tool}"
-  scratch_path:       "{scratch_base_path}/{branch}"
-  tool_srcs_dir:      "{scratch_path}/{tool}"
   build_dir:          "{scratch_path}/{build_mode}"
   run_dir_name:       "{index}.{test}"
   run_dir:            "{scratch_path}/{run_dir_name}/out"
@@ -127,12 +125,4 @@
   // Project defaults for VCS
   vcs_cov_hier: "-cm_hier {tool_srcs_dir}/cover.cfg"
   vcs_cov_excl_files: ["{tool_srcs_dir}/common_cov_excl.el"]
-
-  // Results server stuff - indicate what command to use to copy over the results.
-  // Workaround for gsutil to fall back to using python2.7.
-  results_server_prefix:      "gs://"
-  results_server_url_prefix:  "https://"
-  results_server_cmd:     "CLOUDSDK_PYTHON=/usr/bin/python2.7 /usr/bin/gsutil"
-  results_server_path:    "{results_server_prefix}{results_server}/{rel_path}"
-  results_server_dir:     "{results_server_path}/latest"
 }
diff --git a/hw/top_earlgrey/dv/top_earlgrey_master_sim_cfgs.hjson b/hw/top_earlgrey/dv/top_earlgrey_master_sim_cfgs.hjson
index f153899..ee80680 100644
--- a/hw/top_earlgrey/dv/top_earlgrey_master_sim_cfgs.hjson
+++ b/hw/top_earlgrey/dv/top_earlgrey_master_sim_cfgs.hjson
@@ -5,6 +5,11 @@
   // This is the master cfg hjson for DV simulations. It imports ALL individual DV sim
   // cfgs of the IPs and the full chip used in top_earlgrey. This enables the common
   // regression sets to be run in one shot.
+  name: top_earlgrey_set
+
+  import_cfgs: [// Project wide common cfg file
+                "{proj_root}/hw/data/common_project_cfg.hjson"]
+
   use_cfgs: ["{proj_root}/hw/ip/aes/dv/aes_sim_cfg.hjson",
              "{proj_root}/hw/ip/alert_handler/dv/alert_handler_sim_cfg.hjson",
              "{proj_root}/hw/ip/gpio/dv/gpio_sim_cfg.hjson",
diff --git a/util/dvsim/Deploy.py b/util/dvsim/Deploy.py
index 84fb53c..95a4230 100644
--- a/util/dvsim/Deploy.py
+++ b/util/dvsim/Deploy.py
@@ -674,13 +674,13 @@
             except Exception as e:
                 ex_msg = "Failed to parse \"{}\":\n{}".format(
                     self.cov_report_dashboard, str(e))
-                log.fail_msg += ex_msg
+                self.fail_msg += ex_msg
                 log.error(ex_msg)
                 self.status = "F"
 
             if self.cov_results == "":
                 nf_msg = "Coverage summary not found in the reports dashboard!"
-                log.fail_msg += nf_msg
+                self.fail_msg += nf_msg
                 log.error(nf_msg)
                 self.status = "F"
 
diff --git a/util/dvsim/FlowCfg.py b/util/dvsim/FlowCfg.py
index 309be96..306683c 100644
--- a/util/dvsim/FlowCfg.py
+++ b/util/dvsim/FlowCfg.py
@@ -11,6 +11,8 @@
 import pprint
 from shutil import which
 
+import hjson
+
 from .Deploy import *
 from .utils import *
 
@@ -36,6 +38,10 @@
         self.branch = args.branch
         self.job_prefix = args.job_prefix
 
+        # Options set from hjson cfg.
+        self.project = ""
+        self.scratch_path = ""
+
         # Imported cfg files using 'import_cfgs' keyword
         self.imported_cfg_files = []
         self.imported_cfg_files.append(flow_cfg_file)
@@ -55,10 +61,6 @@
         # a special key 'use_cfgs' within the hjson cfg.
         self.is_master_cfg = False
 
-        # Set the partial path to the IP's DV area.
-        self.rel_path = os.path.dirname(flow_cfg_file).replace(
-            self.proj_root + '/', '')
-
         # Timestamp
         self.ts_format_long = args.ts_format_long
         self.timestamp_long = args.timestamp_long
@@ -66,16 +68,21 @@
         self.timestamp = args.timestamp
 
         # Results
+        self.rel_path = ""
         self.results_title = ""
         self.results_server_prefix = ""
         self.results_server_url_prefix = ""
         self.results_server_cmd = ""
+        self.results_server_css_path = ""
         self.results_server_path = ""
         self.results_server_dir = ""
+        self.results_server_html = ""
+        self.results_server_page = ""
+        self.results_summary_server_html = ""
+        self.results_summary_server_page = ""
 
-        # Full results in md text.
+        # Full and summary results in md text.
         self.results_md = ""
-        # Summary results in md text.
         self.results_summary_md = ""
 
     def __post_init__(self):
@@ -117,12 +124,19 @@
         # Resolve the raw hjson dict to build this object
         self.resolve_hjson_raw(hjson_dict)
 
+    def _post_parse_flow_cfg(self):
+        '''Hook to set some defaults not found in the flow cfg hjson files.
+        This function has to be called manually after calling the parse_flow_cfg().
+        '''
+        if self.rel_path == "":
+            self.rel_path = os.path.dirname(self.flow_cfg_file).replace(
+                self.proj_root + '/', '')
+
     def check_if_master_cfg(self, hjson_dict):
         # This is a master cfg only if it has a single key called "use_cfgs"
         # which contains a list of actual flow cfgs.
         hjson_cfg_dict_keys = hjson_dict.keys()
-        return (len(hjson_cfg_dict_keys) == 1 and \
-                "use_cfgs" in hjson_cfg_dict_keys and \
+        return ("use_cfgs" in hjson_cfg_dict_keys and \
                 type(hjson_dict["use_cfgs"]) is list)
 
     def resolve_hjson_raw(self, hjson_dict):
@@ -221,12 +235,74 @@
 
         # Parse master cfg files
         if self.is_master_cfg:
-            for cfg_file in use_cfgs:
-                # Substitute wildcards in cfg_file files since we need to process
-                # them right away.
-                cfg_file = subst_wildcards(cfg_file, self.__dict__)
-                self.cfgs.append(
-                    self.create_instance(cfg_file, self.proj_root, self.args))
+            for entry in use_cfgs:
+                if type(entry) is str:
+                    # Treat this as a file entry
+                    # Substitute wildcards in cfg_file files since we need to process
+                    # them right away.
+                    cfg_file = subst_wildcards(entry,
+                                               self.__dict__,
+                                               ignore_error=True)
+                    self.cfgs.append(
+                        self.create_instance(cfg_file, self.proj_root,
+                                             self.args))
+
+                elif type(entry) is dict:
+                    # Treat this as a cfg expanded in-line
+                    temp_cfg_file = self._conv_inline_cfg_to_hjson(entry)
+                    if not temp_cfg_file: continue
+                    self.cfgs.append(
+                        self.create_instance(temp_cfg_file, self.proj_root,
+                                             self.args))
+
+                    # Delete the temp_cfg_file once the instance is created
+                    try:
+                        log.log(VERBOSE, "Deleting temp cfg file:\n%s",
+                                temp_cfg_file)
+                        os.system("/bin/rm -rf " + temp_cfg_file)
+                    except IOError:
+                        log.error("Failed to remove temp cfg file:\n%s",
+                                  temp_cfg_file)
+
+                else:
+                    log.error(
+                        "Type of entry \"%s\" in the \"use_cfgs\" key is invalid: %s",
+                        entry, str(type(entry)))
+                    sys.exit(1)
+
+    def _conv_inline_cfg_to_hjson(self, idict):
+        '''Dump a temp hjson file in the scratch space from input dict.
+        This method is to be called only by a master cfg'''
+
+        if not self.is_master_cfg:
+            log.fatal("This method can only be called by a master cfg")
+            sys.exit(1)
+
+        name = idict["name"] if "name" in idict.keys() else None
+        if not name:
+            log.error(
+                "In-line entry in use_cfgs list does not contain " + \
+                "a \"name\" key (will be skipped!):\n%s", idict)
+            return None
+
+        # Check if temp cfg file already exists
+        temp_cfg_file = self.scratch_root + "/." + self.branch + "__" + \
+                        name + "_cfg.hjson"
+
+        # Create the file and dump the dict as hjson
+        log.log(VERBOSE, "Dumping inline cfg \"%s\" in hjson to:\n%s", name,
+                temp_cfg_file)
+        try:
+            with open(temp_cfg_file, "w") as f:
+                f.write(hjson.dumps(idict, for_json=True))
+        except Exception as e:
+            log.error(
+                "Failed to hjson-dump temp cfg file\"%s\" for \"%s\"" + \
+                "(will be skipped!) due to:\n%s", temp_cfg_file, name, e)
+            return None
+
+        # Return the temp cfg file created
+        return temp_cfg_file
 
     def _process_overrides(self):
         # Look through the dict and find available overrides.
@@ -256,7 +332,7 @@
                             ov_name, overrides_dict[ov_name], ov_value)
                         sys.exit(1)
                 else:
-                    log.error("\"overrides\" is is a list of dicts with {\"name\": <name>, " \
+                    log.error("\"overrides\" is a list of dicts with {\"name\": <name>, " + \
                               "\"value\": <value>} pairs. Found this instead:\n%s",
                               str(item))
                     sys.exit(1)
@@ -364,47 +440,14 @@
         '''
         return
 
-    def publish_results_summary(self):
-        '''Public facing API for publishing md format results to the opentitan web server.
-        '''
-        results_html_file = 'summary.html'
-        # master cfg doesn't have server info, instead, get it from cfgs[0]
-        path = self.cfgs[0].results_server_prefix + self.cfgs[0].results_server + '/' + \
-               self.rel_path
-        results_page = path + '/' + results_html_file
-        results_page_url = results_page.replace(
-            self.cfgs[0].results_server_prefix,
-            self.cfgs[0].results_server_url_prefix)
-
-        # Assume that a 'style.css' is available at root path
-        css_path = (
-            (len(self.rel_path.split("/")) + 2) * "../") + "css/style.css"
-
-        # Publish the results page.
-        # First, write the results html file temporarily to the scratch area.
-        f = open(results_html_file, 'w')
-        f.write(
-            md_results_to_html(self.results_title, css_path,
-                               self.results_summary_md))
-        f.close()
-        rm_cmd = "rm -rf " + results_html_file + "; "
-
-        log.info("Publishing results summary to %s", results_page_url)
-        cmd = self.cfgs[0].results_server_cmd + " cp " + results_html_file + " " + \
-              results_page + "; " + rm_cmd
-        log.log(VERBOSE, cmd)
-        try:
-            cmd_output = subprocess.run(args=cmd,
-                                        shell=True,
-                                        stdout=subprocess.PIPE,
-                                        stderr=subprocess.STDOUT)
-            log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
-        except Exception as e:
-            log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd))
+    def _get_results_page_link(self, link_text):
+        results_page_url = self.results_server_page.replace(
+            self.results_server_prefix, self.results_server_url_prefix)
+        return "[%s](%s)" % (link_text, results_page_url)
 
     def _publish_results(self):
         '''Publish results to the opentitan web server.
-        Results are uploaded to {results_server}/{rel_path}/latest/results.
+        Results are uploaded to {results_server_path}/latest/results.
         If the 'latest' directory exists, then it is renamed to its 'timestamp' directory.
         If the list of directories in this area is > 14, then the oldest entry is removed.
         Links to the last 7 regression results are appended at the end if the results page.
@@ -416,21 +459,16 @@
             return
 
         # Construct the paths
-        results_fname = 'results.html'
-        results_page = self.results_server_dir + '/' + results_fname
-        results_page_url = results_page.replace(self.results_server_prefix,
-                                                self.results_server_url_prefix)
-
-        # Assume that a 'style.css' is available at root path
-        css_path = (
-            (len(self.rel_path.split("/")) + 1) * "../") + "css/style.css"
+        results_page_url = self.results_server_page.replace(
+            self.results_server_prefix, self.results_server_url_prefix)
 
         # Timeformat for moving the dir
         tf = "%Y.%m.%d_%H.%M.%S"
 
-        # Extract the timestamp of the existing results_page
-        cmd = self.results_server_cmd + " ls -L " + results_page + " | " + \
-              "grep \'Creation time:\'"
+        # Extract the timestamp of the existing self.results_server_page
+        cmd = self.results_server_cmd + " ls -L " + self.results_server_page + \
+            " | grep \'Creation time:\'"
+
         log.log(VERBOSE, cmd)
         cmd_output = subprocess.run(cmd,
                                     shell=True,
@@ -510,7 +548,7 @@
             for i in range(len(rdirs)):
                 if i < 7:
                     rdir_url = self.results_server_path + '/' + rdirs[
-                        i] + "/" + results_fname
+                        i] + "/" + self.results_server_html
                     rdir_url = rdir_url.replace(self.results_server_prefix,
                                                 self.results_server_url_prefix)
                     history_txt += "- [{}]({})\n".format(rdirs[i], rdir_url)
@@ -525,15 +563,17 @@
 
         # Publish the results page.
         # First, write the results html file temporarily to the scratch area.
-        results_html_file = self.results_file + ".html"
+        results_html_file = self.scratch_path + "/results_" + self.timestamp + ".html"
         f = open(results_html_file, 'w')
-        f.write(md_results_to_html(self.results_title, css_path, results_md))
+        f.write(
+            md_results_to_html(self.results_title,
+                               self.results_server_css_path, results_md))
         f.close()
-        rm_cmd += "rm -rf " + results_html_file + "; "
+        rm_cmd += "/bin/rm -rf " + results_html_file + "; "
 
         log.info("Publishing results to %s", results_page_url)
         cmd = self.results_server_cmd + " cp " + results_html_file + " " + \
-              results_page + "; " + rm_cmd
+              self.results_server_page + "; " + rm_cmd
         log.log(VERBOSE, cmd)
         try:
             cmd_output = subprocess.run(args=cmd,
@@ -551,3 +591,33 @@
             item._publish_results()
 
         if self.is_master_cfg: self.publish_results_summary()
+
+    def publish_results_summary(self):
+        '''Public facing API for publishing md format results to the opentitan web server.
+        '''
+        results_html_file = "summary_" + self.timestamp + ".html"
+        results_page_url = self.results_summary_server_page.replace(
+            self.results_server_prefix, self.results_server_url_prefix)
+
+        # Publish the results page.
+        # First, write the results html file temporarily to the scratch area.
+        f = open(results_html_file, 'w')
+        f.write(
+            md_results_to_html(self.results_title,
+                               self.results_server_css_path,
+                               self.results_summary_md))
+        f.close()
+        rm_cmd = "/bin/rm -rf " + results_html_file + "; "
+
+        log.info("Publishing results summary to %s", results_page_url)
+        cmd = self.results_server_cmd + " cp " + results_html_file + " " + \
+              self.results_summary_server_page + "; " + rm_cmd
+        log.log(VERBOSE, cmd)
+        try:
+            cmd_output = subprocess.run(args=cmd,
+                                        shell=True,
+                                        stdout=subprocess.PIPE,
+                                        stderr=subprocess.STDOUT)
+            log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
+        except Exception as e:
+            log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd))
diff --git a/util/dvsim/SimCfg.py b/util/dvsim/SimCfg.py
index 9374c2d..d83b84b 100644
--- a/util/dvsim/SimCfg.py
+++ b/util/dvsim/SimCfg.py
@@ -65,7 +65,6 @@
         self.project = ""
         self.flow = ""
         self.flow_makefile = ""
-        self.scratch_path = ""
         self.build_dir = ""
         self.run_dir = ""
         self.sw_build_dir = ""
@@ -104,9 +103,7 @@
 
         # Parse the cfg_file file tree
         self.parse_flow_cfg(flow_cfg_file)
-
-        # Stop here if this is a master cfg list
-        if self.is_master_cfg: return
+        self._post_parse_flow_cfg()
 
         # If build_unique is set, then add current timestamp to uniquify it
         if self.build_unique:
@@ -123,14 +120,11 @@
         ]
         self.__dict__ = find_and_substitute_wildcards(self.__dict__,
                                                       self.__dict__,
-                                                      ignored_wildcards)
+                                                      ignored_wildcards,
+                                                      self.is_master_cfg)
 
-        # TODO only support VCS coverage now
-        if self.tool != "vcs" and self.cov is True:
-            self.cov = False
-            log.warning(
-                "Coverage collection with tool \"%s\" is not supported yet",
-                self.tool)
+        # Set the title for simulation results.
+        self.results_title = self.name.upper() + " Simulation Results"
 
         # Print info
         log.info("Scratch path for %s: %s", self.name, self.scratch_path)
@@ -150,8 +144,15 @@
         self._process_exports()
 
         # Create objects from raw dicts - build_modes, sim_modes, run_modes,
-        # tests and regressions
-        self._create_objects()
+        # tests and regressions, only if not a master cfg obj
+        if not self.is_master_cfg:
+            # TODO: hack to prevent coverage collection if tool != vcs
+            if self.cov and self.tool != "vcs":
+                self.cov = False
+                log.warning(
+                    "Coverage collection with tool \"%s\" is not supported yet",
+                    self.tool)
+            self._create_objects()
 
         # Post init checks
         self.__post_init__()
@@ -159,7 +160,6 @@
     def __post_init__(self):
         # Run some post init checks
         super().__post_init__()
-        self.results_title = self.name.upper() + " Simulation Results"
 
     @staticmethod
     def create_instance(flow_cfg_file, proj_root, args):
@@ -298,9 +298,10 @@
 
         # Check if all items has been processed
         if items_list != []:
-            log.error("The items %s added for run were not found in \n%s!" + \
-                      "\nUse the --list switch to see a list of available tests / regressions.", \
-                      items_list, self.flow_cfg_file)
+            log.error(
+                "The items %s added for run were not found in \n%s!\n \
+                Use the --list switch to see a list of available \
+                tests / regressions.", items_list, self.flow_cfg_file)
             sys.exit(1)
 
         # Process reseed override and create the build_list
@@ -400,6 +401,26 @@
         if self.cov:
             Deploy.deploy(self.cov_deploys)
 
+    def _cov_analyze(self):
+        '''Use the last regression coverage data to open up the GUI tool to
+        analyze the coverage.
+        '''
+        cov_analyze_deploy = CovAnalyze(self)
+        try:
+            proc = subprocess.Popen(args=cov_analyze_deploy.cmd,
+                                    shell=True,
+                                    close_fds=True)
+        except Exception as e:
+            log.fatal("Failed to run coverage analysis cmd:\n\"%s\"\n%s",
+                      cov_analyze_deploy.cmd, e)
+            sys.exit(1)
+
+    def cov_analyze(self):
+        '''Public facing API for analyzing coverage.
+        '''
+        for item in self.cfgs:
+            item._cov_analyze()
+
     def _gen_results(self):
         '''
         The function is called after the regression has completed. It collates the
@@ -482,18 +503,19 @@
             results_str += self.cov_report_deploy.cov_results
             self.results_summary["Coverage"] = self.cov_report_deploy.cov_total
         else:
-            self.results_summary["Coverage"] = "N.A. %"
+            self.results_summary["Coverage"] = "-- %"
 
         # append link of detail result to block name
-        self.results_summary["Name"] = self.append_result_link(self.results_summary["Name"])
+        self.results_summary["Name"] = self._get_results_page_link(
+            self.results_summary["Name"])
 
         # Append failures for triage
         self.results_md = results_str + fail_msgs
 
         # Write results to the scratch area
-        self.results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
-        log.info("Detailed results are available at %s", self.results_file)
-        f = open(self.results_file, 'w')
+        results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
+        log.info("Detailed results are available at %s", results_file)
+        f = open(results_file, 'w')
         f.write(self.results_md)
         f.close()
 
@@ -511,7 +533,7 @@
             for title in item.results_summary:
                 row.append(item.results_summary[title])
             table.append(row)
-        self.results_summary_md = "## Simulation Summary Results\n"
+        self.results_summary_md = "## " + self.results_title + " (Summary)\n"
         self.results_summary_md += "### " + self.timestamp_long + "\n"
         self.results_summary_md += tabulate(table,
                                             headers="firstrow",
@@ -520,32 +542,6 @@
         print(self.results_summary_md)
         return self.results_summary_md
 
-    def append_result_link(self, link_name):
-        results_page = self.results_server_dir + '/' + 'results.html'
-        results_page_url = results_page.replace(self.results_server_prefix,
-                                                self.results_server_url_prefix)
-        return "[%s](%s)" % (link_name, results_page_url)
-
-    def _cov_analyze(self):
-        '''Use the last regression coverage data to open up the GUI tool to
-        analyze the coverage.
-        '''
-        cov_analyze_deploy = CovAnalyze(self)
-        try:
-            proc = subprocess.Popen(args=cov_analyze_deploy.cmd,
-                                    shell=True,
-                                    close_fds=True)
-        except Exception as e:
-            log.fatal("Failed to run coverage analysis cmd:\n\"%s\"\n%s",
-                      cov_analyze_deploy.cmd, e)
-            sys.exit(1)
-
-    def cov_analyze(self):
-        '''Public facing API for analyzing coverage.
-        '''
-        for item in self.cfgs:
-            item._cov_analyze()
-
     def _publish_results(self):
         '''Publish coverage results to the opentitan web server.'''
         super()._publish_results()
diff --git a/util/dvsim/utils.py b/util/dvsim/utils.py
index 0e95a41..377bd67 100644
--- a/util/dvsim/utils.py
+++ b/util/dvsim/utils.py
@@ -86,7 +86,7 @@
     return hjson_cfg_dict
 
 
-def subst_wildcards(var, mdict, ignored_wildcards=[]):
+def subst_wildcards(var, mdict, ignored_wildcards=[], ignore_error=False):
     '''
     If var has wildcards specified within {..}, find and substitute them.
     '''
@@ -96,7 +96,8 @@
 
     if "{eval_cmd}" in var:
         idx = var.find("{eval_cmd}") + 11
-        subst_var = subst_wildcards(var[idx:], mdict, ignored_wildcards)
+        subst_var = subst_wildcards(var[idx:], mdict, ignored_wildcards,
+                                    ignore_error)
         # If var has wildcards that were ignored, then skip running the command
         # for now, assume that it will be handled later.
         match = re.findall(r"{([A-Za-z0-9\_]+)}", subst_var)
@@ -115,7 +116,8 @@
                             subst_found = []
                             for element in found:
                                 element = subst_wildcards(
-                                    element, mdict, ignored_wildcards)
+                                    element, mdict, ignored_wildcards,
+                                    ignore_error)
                                 subst_found.append(element)
                             # Expand list into a str since list within list is
                             # not supported.
@@ -123,7 +125,8 @@
 
                         elif type(found) is str:
                             found = subst_wildcards(found, mdict,
-                                                    ignored_wildcards)
+                                                    ignored_wildcards,
+                                                    ignore_error)
 
                         elif type(found) is bool:
                             found = int(found)
@@ -132,7 +135,7 @@
                         # Check if the wildcard exists as an environment variable
                         env_var = os.environ.get(item)
                         if env_var is not None: subst_list[item] = env_var
-                        else:
+                        elif not ignore_error:
                             log.error(
                                 "Substitution for the wildcard \"%s\" not found",
                                 item)
@@ -142,7 +145,10 @@
     return var
 
 
-def find_and_substitute_wildcards(sub_dict, full_dict, ignored_wildcards=[]):
+def find_and_substitute_wildcards(sub_dict,
+                                  full_dict,
+                                  ignored_wildcards=[],
+                                  ignore_error=False):
     '''
     Recursively find key values containing wildcards in sub_dict in full_dict
     and return resolved sub_dict.
@@ -151,7 +157,7 @@
         if type(sub_dict[key]) in [dict, OrderedDict]:
             # Recursively call this funciton in sub-dicts
             sub_dict[key] = find_and_substitute_wildcards(
-                sub_dict[key], full_dict, ignored_wildcards)
+                sub_dict[key], full_dict, ignored_wildcards, ignore_error)
 
         elif type(sub_dict[key]) is list:
             sub_dict_key_values = list(sub_dict[key])
@@ -162,18 +168,19 @@
                     # Recursively call this funciton in sub-dicts
                     sub_dict_key_values[i] = \
                         find_and_substitute_wildcards(sub_dict_key_values[i],
-                                                      full_dict, ignored_wildcards)
+                                                      full_dict, ignored_wildcards, ignore_error)
 
                 elif type(sub_dict_key_values[i]) is str:
                     sub_dict_key_values[i] = subst_wildcards(
-                        sub_dict_key_values[i], full_dict, ignored_wildcards)
+                        sub_dict_key_values[i], full_dict, ignored_wildcards,
+                        ignore_error)
 
             # Set the substituted key values back
             sub_dict[key] = sub_dict_key_values
 
         elif type(sub_dict[key]) is str:
             sub_dict[key] = subst_wildcards(sub_dict[key], full_dict,
-                                            ignored_wildcards)
+                                            ignored_wildcards, ignore_error)
     return sub_dict
 
 
@@ -217,7 +224,7 @@
         return op
 
     # List of 'not applicable' identifiers.
-    na_list = ['--', 'NA', 'N.A.', 'N.A', 'na', 'n.a.', 'n.a']
+    na_list = ['--', 'NA', 'N.A.', 'N.A', 'N/A', 'na', 'n.a.', 'n.a', 'n/a']
     na_list_patterns = '|'.join(na_list)
 
     # List of floating point patterns: '0', '0.0' & '.0'