[dv regr tool] Coverage collection and reporting
- This PR enables coverage collection as a part of the regression run
when --cov switch is passed
- If there are multiple builds as a part of the same DUT, it merges the
coverage across them
- It also merges coverage from previous regressions if the
--cov-merge-previous switch is passed
- Finally, it extracts the high level summary coverage from the VCS
coverage dashboard and prints it as a part of the regression report
Another major update in this PR is - all percentages indicated in a
report table indicated with a '%' sign is automatically colored in the
html report as a heat map, from red for low percentages to green
approaching 100%. This is enabled for regression results as well as
coverage results.
Signed-off-by: Srikrishna Iyer <sriyer@google.com>
diff --git a/util/dvsim.py b/util/dvsim.py
index d60eb87..ee516a7 100755
--- a/util/dvsim.py
+++ b/util/dvsim.py
@@ -69,7 +69,7 @@
# and sets the branch name to "default"
def resolve_branch(arg_branch):
if arg_branch is None or arg_branch == "":
- result = subprocess.run(["git", "branch", "--show-current"],
+ result = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"],
stdout=subprocess.PIPE)
arg_branch = result.stdout.decode("utf-8").strip()
if arg_branch == "":
@@ -251,6 +251,20 @@
action='store_true',
help="turn on coverage collection")
+ parser.add_argument(
+ "--cov-merge-previous",
+ default=False,
+ action='store_true',
+ help="""Applicable when --cov switch is enabled. If a previous cov
+ database directory exists, this switch will cause it to be merged with
+ the current cov database.""")
+
+ parser.add_argument(
+ "--cov-analyze",
+ default=False,
+ action='store_true',
+ help="Analyze the coverage from the last regression result.")
+
parser.add_argument("-p",
"--profile",
default="none",
@@ -422,26 +436,34 @@
# and other ASIC flow targets.
cfg = SimCfg.SimCfg(args.cfg, get_proj_root(), args)
- # Purge the scratch path if --purge option is set.
- if args.purge:
- cfg.purge()
-
# List items available for run if --list switch is passed, and exit.
if args.list != []:
cfg.print_list()
sys.exit(0)
- # Create deploy objects.
- cfg.create_deploy_objects()
+ # In simulation mode: if --cov-analyze switch is passed, then run the GUI
+ # tool.
+ if args.cov_analyze:
+ cfg.cov_analyze()
+ sys.exit(0)
+
+ # Purge the scratch path if --purge option is set.
+ if args.purge:
+ cfg.purge()
# Deploy the builds and runs
- Deploy.Deploy.deploy(cfg.deploy)
+ if args.items != []:
+ # Create deploy objects.
+ cfg.create_deploy_objects()
+ cfg.deploy_objects()
- # Generate results.
- results = cfg.gen_results()
+ # Generate results.
+ results = cfg.gen_results()
- # Publish results
- if args.publish: cfg.publish_results()
+ # Publish results
+ if args.publish: cfg.publish_results()
+ else:
+ log.info("No items specified to be run.")
if __name__ == '__main__':
diff --git a/util/dvsim/Deploy.py b/util/dvsim/Deploy.py
index d57ea4f..c3b38f0 100644
--- a/util/dvsim/Deploy.py
+++ b/util/dvsim/Deploy.py
@@ -16,6 +16,7 @@
import time
import hjson
+from tabulate import tabulate
from .utils import *
@@ -75,7 +76,7 @@
self.log_fd = None
self.status = None
- # These are command, outut directory and log file
+ # These are command, output directory and log file
self.mandatory_misc_attrs.update({
"name": False,
"build_mode": False,
@@ -157,7 +158,8 @@
self.exports.update(os.environ)
args = shlex.split(self.cmd)
try:
- self.odir_limiter()
+ # If renew_odir flag is True - then move it.
+ if self.renew_odir: self.odir_limiter(odir=self.odir)
os.system("mkdir -p " + self.odir)
os.system("ln -s " + self.odir + " " + self.sim_cfg.links['D'] +
'/' + self.odir_ln)
@@ -176,38 +178,57 @@
if self.log_fd: self.log_fd.close()
self.status = "K"
- # Function to backup previously run output directory to maintain a history of
- # limited number of output directories. It deletes the output directory with the
- # oldest timestamp, if the limit is reached.
- def odir_limiter(self):
- # Return if renew_odir flag is False - we'd be reusing the existing odir.
- if not self.renew_odir: return
+ def odir_limiter(self, odir, max_odirs=-1):
+ '''Function to backup previously run output directory to maintain a
+ history of a limited number of output directories. It deletes the output
+ directory with the oldest timestamps, if the limit is reached. It returns
+ a list of directories that remain after deletion.
+ Arguments:
+ odir: The output directory to backup
+ max_odirs: Maximum output directories to maintain as history.
+
+ Returns:
+ dirs: Space-separated list of directories that remain after deletion.
+ '''
try:
# If output directory exists, back it up.
- if os.path.exists(self.odir):
+ if os.path.exists(odir):
ts = run_cmd("date '+" + self.sim_cfg.ts_format + "' -d \"" +
- "$(stat -c '%y' " + self.odir + ")\"")
- os.system('mv ' + self.odir + " " + self.odir + "_" + ts)
+ "$(stat -c '%y' " + odir + ")\"")
+ os.system('mv ' + odir + " " + odir + "_" + ts)
except IOError:
- log.error('Failed to back up existing output directory %s',
- self.odir)
+ log.error('Failed to back up existing output directory %s', odir)
+ dirs = ""
# Delete older directories.
try:
- pdir = os.path.realpath(self.odir + "/..")
+ pdir = os.path.realpath(odir + "/..")
+ # Fatal out if pdir got set to root.
+ if pdir == "/":
+ log.fatal(
+ "Something went wrong while processing \"%s\": odir = \"%s\"",
+ self.name, odir)
+ sys.exit(1)
+
if os.path.exists(pdir):
find_cmd = "find " + pdir + " -mindepth 1 -maxdepth 1 -type d "
- num_dirs = int(run_cmd(find_cmd + " | wc -l"))
- num_rm_dirs = num_dirs - Deploy.max_odirs
+ dirs = run_cmd(find_cmd)
+ dirs = dirs.replace('\n', ' ')
+ list_dirs = dirs.split()
+ num_dirs = len(list_dirs)
+ if max_odirs == -1: max_odirs = self.max_odirs
+ num_rm_dirs = num_dirs - max_odirs
if num_rm_dirs > -1:
- dirs = run_cmd(find_cmd +
- "-printf '%T+ %p\n' | sort | head -n " +
- str(num_rm_dirs + 1) +
- " | awk '{print $2}'")
- dirs = dirs.replace('\n', ' ')
- os.system("/usr/bin/rm -rf " + dirs)
+ rm_dirs = run_cmd(find_cmd +
+ "-printf '%T+ %p\n' | sort | head -n " +
+ str(num_rm_dirs + 1) +
+ " | awk '{print $2}'")
+ rm_dirs = rm_dirs.replace('\n', ' ')
+ dirs = dirs.replace(rm_dirs, "")
+ os.system("/usr/bin/rm -rf " + rm_dirs)
except IOError:
log.error("Failed to delete old run directories!")
+ return dirs
def set_status(self):
self.status = 'P'
@@ -263,10 +284,10 @@
if self.process.poll() is not None:
self.log_fd.close()
if self.process.returncode != 0:
- msg = "Last 5 lines of the log:<br>\n"
+ msg = "Last 10 lines of the log:<br>\n"
self.fail_msg += msg
log.log(VERBOSE, msg)
- get_fail_msg_cmd = "tail -n 5 " + self.log
+ get_fail_msg_cmd = "tail -n 10 " + self.log
msg = run_cmd(get_fail_msg_cmd)
msg = "```\n{}\n```\n".format(msg)
self.fail_msg += msg
@@ -330,7 +351,7 @@
if item not in status[item.target].keys():
status[item.target][item] = ""
- item.get_status()
+ if item.status == ".": item.get_status()
if item.status != status[
item.target][item] and item.status != ".":
trig_print = True
@@ -410,7 +431,7 @@
"build_opts": False
}
- self.mandatory_misc_attrs = {}
+ self.mandatory_misc_attrs = {"cov_db_dir": False}
# Initialize
super().__init__(sim_cfg)
@@ -428,6 +449,12 @@
CompileSim.items.append(self)
+ def dispatch_cmd(self):
+ # Delete previous cov_db_dir if it exists before dispatching new build.
+ if os.path.exists(self.cov_db_dir):
+ os.system("rm -rf " + self.cov_db_dir)
+ super().dispatch_cmd()
+
class RunTest(Deploy):
"""
@@ -458,6 +485,7 @@
self.mandatory_misc_attrs = {
"run_dir_name": False,
+ "cov_db_test_dir": False,
"pass_patterns": False,
"fail_patterns": False
}
@@ -486,6 +514,17 @@
RunTest.items.append(self)
+ def get_status(self):
+ '''Override base class get_status implementation for additional post-status
+ actions.'''
+ super().get_status()
+ if self.status not in [".", "P"]:
+ # Delete the coverage data if available.
+ if os.path.exists(self.cov_db_test_dir):
+ log.log(VERBOSE, "Deleting coverage data of failing test:\n%s",
+ self.cov_db_test_dir)
+ os.system("/usr/bin/rm -rf " + self.cov_db_test_dir)
+
@staticmethod
def get_seed():
if RunTest.seeds == []:
@@ -495,3 +534,192 @@
seed = int.from_bytes(seed, byteorder='little')
RunTest.seeds.append(seed)
return RunTest.seeds.pop(0)
+
+
+class CovMerge(Deploy):
+ """
+ Abstraction for merging coverage databases. An item of this class is created AFTER
+ the regression is completed.
+ """
+
+ # Register all builds with the class
+ items = []
+
+ def __init__(self, sim_cfg):
+ self.target = "cov_merge"
+ self.pass_patterns = []
+ self.fail_patterns = []
+
+ # Construct local 'special' variable from cov directories that need to
+ # be merged.
+ self.cov_db_dirs = ""
+
+ self.mandatory_cmd_attrs = {
+ "cov_merge_cmd": False,
+ "cov_merge_opts": False
+ }
+
+ self.mandatory_misc_attrs = {
+ "cov_merge_dir": False,
+ "cov_merge_db_dir": False
+ }
+
+ # Initialize
+ super().__init__(sim_cfg)
+ super().parse_dict(sim_cfg.__dict__)
+ self.__post_init__()
+
+ # Override standard output and log patterns.
+ self.odir = self.cov_merge_db_dir
+ self.odir_ln = os.path.basename(os.path.normpath(self.odir))
+
+ # Start fail message construction
+ self.fail_msg = "\n**COV_MERGE:** {}<br>\n".format(self.name)
+ log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
+ self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
+
+ CovMerge.items.append(self)
+
+ def __post_init__(self):
+ # Add cov db dirs from all the builds that were kicked off.
+ for bld in self.sim_cfg.builds:
+ self.cov_db_dirs += bld.cov_db_dir + " "
+
+ # Recursively search and replace wildcards, ignoring cov_db_dirs for now.
+ # We need to resolve it later based on cov_db_dirs value set below.
+ self.__dict__ = find_and_substitute_wildcards(
+ self.__dict__, self.__dict__, ignored_wildcards=["cov_db_dirs"])
+
+ # Prune previous merged cov directories.
+ prev_cov_db_dirs = self.odir_limiter(odir=self.cov_merge_db_dir)
+
+ # If a merged cov data base exists from a previous run, then consider
+ # that as well for merging, if the --cov-merge-previous command line
+ # switch is passed.
+ if self.sim_cfg.cov_merge_previous:
+ self.cov_db_dirs += prev_cov_db_dirs
+
+ # Call base class __post_init__ to do checks and substitutions
+ super().__post_init__()
+
+
+class CovReport(Deploy):
+ """
+ Abstraction for coverage report generation. An item of this class is created AFTER
+ the regression is completed.
+ """
+
+ # Register all builds with the class
+ items = []
+
+ def __init__(self, sim_cfg):
+ self.target = "cov_report"
+ self.pass_patterns = []
+ self.fail_patterns = []
+ self.cov_results = ""
+
+ self.mandatory_cmd_attrs = {
+ "cov_report_cmd": False,
+ "cov_report_opts": False
+ }
+
+ self.mandatory_misc_attrs = {
+ "cov_report_dir": False,
+ "cov_merge_db_dir": False,
+ "cov_report_dashboard": False
+ }
+
+ # Initialize
+ super().__init__(sim_cfg)
+ super().parse_dict(sim_cfg.__dict__)
+ self.__post_init__()
+
+ # Start fail message construction
+ self.fail_msg = "\n**COV_REPORT:** {}<br>\n".format(self.name)
+ log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
+ self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
+
+ CovReport.items.append(self)
+
+ def get_status(self):
+ super().get_status()
+ # Once passed, extract the cov results summary from the dashboard.
+ if self.status == "P":
+ try:
+ with open(self.cov_report_dashboard, 'r') as f:
+ for line in f:
+ match = re.match("total coverage summary", line,
+ re.IGNORECASE)
+ if match:
+ results = []
+ # Metrics on the next line.
+ line = f.readline().strip()
+ results.append(line.split())
+ # Values on the next.
+ line = f.readline().strip()
+ # Pretty up the values - add % sign for ease of post
+ # processing.
+ values = []
+ for val in line.split():
+ val += " %"
+ values.append(val)
+ results.append(values)
+ colalign = (("center", ) * len(values))
+ self.cov_results = tabulate(results,
+ headers="firstrow",
+ tablefmt="pipe",
+ colalign=colalign)
+ break
+
+ except Exception as e:
+ ex_msg = "Failed to parse \"{}\":\n{}".format(
+ self.cov_report_dashboard, str(e))
+ log.fail_msg += ex_msg
+ log.error(ex_msg)
+ self.status = "F"
+
+ if self.cov_results == "":
+ nf_msg = "Coverage summary not found in the reports dashboard!"
+ log.fail_msg += nf_msg
+ log.error(nf_msg)
+ self.status = "F"
+
+ if self.status == "P":
+ # Delete the cov report - not needed.
+ os.system("rm -rf " + self.log)
+
+
+class CovAnalyze(Deploy):
+ """
+ Abstraction for coverage analysis tool.
+ """
+
+ # Register all builds with the class
+ items = []
+
+ def __init__(self, sim_cfg):
+ self.target = "cov_analyze"
+ self.pass_patterns = []
+ self.fail_patterns = []
+
+ self.mandatory_cmd_attrs = {
+ "cov_analyze_cmd": False,
+ "cov_analyze_opts": False
+ }
+
+ self.mandatory_misc_attrs = {
+ "cov_analyze_dir": False,
+ "cov_merge_db_dir": False
+ }
+
+ # Initialize
+ super().__init__(sim_cfg)
+ super().parse_dict(sim_cfg.__dict__)
+ self.__post_init__()
+
+ # Start fail message construction
+ self.fail_msg = "\n**COV_ANALYZE:** {}<br>\n".format(self.name)
+ log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
+ self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
+
+ CovAnalyze.items.append(self)
diff --git a/util/dvsim/FlowCfg.py b/util/dvsim/FlowCfg.py
index f483d7e..ad02c06 100644
--- a/util/dvsim/FlowCfg.py
+++ b/util/dvsim/FlowCfg.py
@@ -11,6 +11,7 @@
import pprint
from shutil import which
+from .Deploy import *
from .utils import *
@@ -66,6 +67,11 @@
# Results
self.results_title = ""
+ self.results_server_prefix = ""
+ self.results_server_url_prefix = ""
+ self.results_server_cmd = ""
+ self.results_server_path = ""
+ self.results_server_dir = ""
# Full results in md text.
self.results_md = ""
@@ -315,12 +321,17 @@
else:
self._create_deploy_objects()
+ def deploy_objects(self):
+ '''Public facing API for deploying all available objects.'''
+ Deploy.deploy(self.deploy)
+
def _gen_results(self, fmt="md"):
'''
- The function is called after the flow has executed. It collates the status of
- all run targets and generates a dict. It parses the testplan and maps the generated
- result to the testplan entries to generate a final table (list). It uses the fmt arg
- to dump the final result as a markdown or html.
+ The function is called after the regression has completed. It collates the
+ status of all run targets and generates a dict. It parses the testplan and
+ maps the generated result to the testplan entries to generate a final table
+ (list). It also prints the full list of failures for debug / triage. The
+ final result is in markdown format.
'''
return
@@ -349,9 +360,9 @@
# Construct the paths
results_fname = 'results.html'
- results_root_dir = "gs://" + self.results_server + '/' + self.rel_path
- results_dir = results_root_dir + '/latest'
- results_page = results_dir + '/' + results_fname
+ results_page = self.results_server_dir + '/' + results_fname
+ results_page_url = results_page.replace(self.results_server_prefix,
+ self.results_server_url_prefix)
# Assume that a 'style.css' is available at root path
css_path = (
@@ -361,11 +372,14 @@
tf = "%Y.%m.%d_%H.%M.%S"
# Extract the timestamp of the existing results_page
- cmd = "gsutil ls -L " + results_page + " | " + "grep \'Creation time:\'"
+ cmd = self.results_server_cmd + " ls -L " + results_page + " | " + \
+ "grep \'Creation time:\'"
+ log.log(VERBOSE, cmd)
cmd_output = subprocess.run(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
+ log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
old_results_ts = cmd_output.stdout.decode("utf-8")
old_results_ts = old_results_ts.replace("Creation time:", "")
old_results_ts = old_results_ts.strip()
@@ -391,21 +405,28 @@
datetime.timezone.utc) - datetime.timedelta(days=1)
old_results_ts = ts.strftime(tf)
- old_results_dir = results_root_dir + "/" + old_results_ts
- cmd = ["gsutil", "mv", results_dir, old_results_dir]
+ old_results_dir = self.results_server_path + "/" + old_results_ts
+ cmd = self.results_server_cmd + " mv " + self.results_server_dir + \
+ " " + old_results_dir
+ log.log(VERBOSE, cmd)
cmd_output = subprocess.run(cmd,
+ shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
+ log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
if cmd_output.returncode != 0:
log.error("Failed to mv old results page \"%s\" to \"%s\"!",
- results_dir, old_results_dir)
+ self.results_server_dir, old_results_dir)
# Do an ls in the results root dir to check what directories exist.
results_dirs = []
- cmd = ["gsutil", "ls", results_root_dir]
+ cmd = self.results_server_cmd + " ls " + self.results_server_path
+ log.log(VERBOSE, cmd)
cmd_output = subprocess.run(args=cmd,
+ shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
+ log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
if cmd_output.returncode == 0:
# Some directories exist. Check if 'latest' is one of them
results_dirs = cmd_output.stdout.decode("utf-8").strip()
@@ -415,11 +436,11 @@
# Start pruning
log.log(VERBOSE, "Pruning %s area to limit last 7 results",
- results_root_dir)
+ self.results_server_path)
rdirs = []
for rdir in results_dirs:
- dirname = rdir.replace(results_root_dir, '')
+ dirname = rdir.replace(self.results_server_path, '')
dirname = dirname.replace('/', '')
if dirname == "latest": continue
rdirs.append(dirname)
@@ -427,18 +448,20 @@
rm_cmd = ""
history_txt = "\n## Past Results\n"
- history_txt += "- [Latest](" + results_page + ")\n"
+ history_txt += "- [Latest](" + results_page_url + ")\n"
if len(rdirs) > 0:
for i in range(len(rdirs)):
if i < 7:
- rdir_url = results_root_dir + '/' + rdirs[
+ rdir_url = self.results_server_path + '/' + rdirs[
i] + "/" + results_fname
+ rdir_url = rdir_url.replace(self.results_server_prefix,
+ self.results_server_url_prefix)
history_txt += "- [{}]({})\n".format(rdirs[i], rdir_url)
elif i > 14:
- rm_cmd += results_root_dir + '/' + rdirs[i] + " "
+ rm_cmd += self.results_server_path + '/' + rdirs[i] + " "
if rm_cmd != "":
- rm_cmd = "gsutil rm -r " + rm_cmd + "; "
+ rm_cmd = self.results_server_cmd + " -m rm -r " + rm_cmd + "; "
# Append the history to the results.
results_md = self.results_md + history_txt
@@ -451,15 +474,16 @@
f.close()
rm_cmd += "rm -rf " + results_html_file + "; "
- log.info("Publishing results to %s",
- results_page.replace("gs://", "https://"))
- cmd = "gsutil cp " + results_html_file + " " + results_page + "; " + rm_cmd
-
+ log.info("Publishing results to %s", results_page_url)
+ cmd = self.results_server_cmd + " cp " + results_html_file + " " + \
+ results_page + "; " + rm_cmd
+ log.log(VERBOSE, cmd)
try:
cmd_output = subprocess.run(args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
+ log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
except Exception as e:
log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd))
diff --git a/util/dvsim/SimCfg.py b/util/dvsim/SimCfg.py
index 8849436..20aa1a4 100644
--- a/util/dvsim/SimCfg.py
+++ b/util/dvsim/SimCfg.py
@@ -44,6 +44,7 @@
self.dump = args.dump
self.max_waves = args.max_waves
self.cov = args.cov
+ self.cov_merge_previous = args.cov_merge_previous
self.profile = args.profile
self.xprop_off = args.xprop_off
self.no_rerun = args.no_rerun
@@ -93,6 +94,12 @@
self.build_list = []
self.run_list = []
self.deploy = []
+ self.cov_merge_deploy = None
+ self.cov_report_deploy = None
+
+ # If is_master_cfg is set, then each cfg will have its own cov_deploy.
+ # Maintain an array of those in cov_deploys.
+ self.cov_deploys = []
# Parse the cfg_file file tree
self.parse_flow_cfg(flow_cfg_file)
@@ -110,7 +117,8 @@
# Make substitutions, while ignoring the following wildcards
# TODO: Find a way to set these in sim cfg instead
ignored_wildcards = [
- "build_mode", "index", "test", "seed", "uvm_test", "uvm_test_seq"
+ "build_mode", "index", "test", "seed", "uvm_test", "uvm_test_seq",
+ "cov_db_dirs"
]
self.__dict__ = find_and_substitute_wildcards(self.__dict__,
self.__dict__,
@@ -343,20 +351,54 @@
build_map[test.build_mode].sub.append(item)
runs.append(item)
+ self.builds = builds
+ self.runs = runs
if self.run_only is True:
self.deploy = runs
else:
self.deploy = builds
+ # Create cov_merge and cov_report objects
+ if self.cov:
+ self.cov_merge_deploy = CovMerge(self)
+ self.cov_report_deploy = CovReport(self)
+ # Generate reports only if merge was successful; add it as a dependency
+ # of merge.
+ self.cov_merge_deploy.sub.append(self.cov_report_deploy)
+
# Create initial set of directories before kicking off the regression.
self._create_dirs()
+ def create_deploy_objects(self):
+ '''Public facing API for _create_deploy_objects().
+ '''
+ super().create_deploy_objects()
+
+ # Also, create cov_deploys
+ if self.cov:
+ for item in self.cfgs:
+ self.cov_deploys.append(item.cov_merge_deploy)
+
+ # deploy additional commands as needed. We do this separated for coverage
+ # since that needs to happen at the end.
+ def deploy_objects(self):
+ '''This is a public facing API, so we use "self.cfgs" instead of self.
+ '''
+ # Invoke the base class method to run the regression.
+ super().deploy_objects()
+
+ # If coverage is enabled, then deploy the coverage tasks.
+ if self.cov:
+ Deploy.deploy(self.cov_deploys)
+
def _gen_results(self):
'''
- The function is called after the regression has completed. It collates the status of
- all run targets and generates a dict. It parses the testplan and maps the generated
- result to the testplan entries to generate a final table (list). It uses the fmt arg
- to dump the final result as a markdown or html.
+ The function is called after the regression has completed. It collates the
+ status of all run targets and generates a dict. It parses the testplan and
+ maps the generated result to the testplan entries to generate a final table
+ (list). It also prints the full list of failures for debug / triage. If cov
+ is enabled, then the summary coverage report is also generated. The final
+ result is in markdown format.
'''
# TODO: add support for html
@@ -393,8 +435,10 @@
regr_results = []
fail_msgs = ""
- (regr_results, fail_msgs) = gen_results_sub(self.deploy, regr_results,
- fail_msgs)
+ deployed_items = self.deploy
+ if self.cov: deployed_items.append(self.cov_merge_deploy)
+ (regr_results, fail_msgs) = gen_results_sub(deployed_items,
+ regr_results, fail_msgs)
# Add title if there are indeed failures
if fail_msgs != "":
@@ -420,6 +464,12 @@
map_full_testplan=self.map_full_testplan)
results_str += "\n"
+ # Append coverage results of coverage was enabled.
+ if self.cov and self.cov_report_deploy.status == "P":
+ results_str += "\n## Coverage Results\n"
+ results_str += "\n### [Coverage Dashboard](cov_report/dashboard.html)\n\n"
+ results_str += self.cov_report_deploy.cov_results
+
# Append failures for triage
self.results_md = results_str + fail_msgs
@@ -432,3 +482,45 @@
# Return only the tables
return results_str
+
+ def _cov_analyze(self):
+ '''Use the last regression coverage data to open up the GUI tool to
+ analyze the coverage.
+ '''
+ cov_analyze_deploy = CovAnalyze(self)
+ try:
+ proc = subprocess.Popen(args=cov_analyze_deploy.cmd,
+ shell=True,
+ close_fds=True)
+ except Exception as e:
+ log.fatal("Failed to run coverage analysis cmd:\n\"%s\"\n%s",
+ cov_analyze_deploy.cmd, e)
+ sys.exit(1)
+
+ def cov_analyze(self):
+ '''Public facing API for analyzing coverage.
+ '''
+ for item in self.cfgs:
+ item._cov_analyze()
+
+ def _publish_results(self):
+ '''Publish coverage results to the opentitan web server.'''
+ super()._publish_results()
+
+ if self.cov:
+ results_server_dir_url = self.results_server_dir.replace(
+ self.results_server_prefix, self.results_server_url_prefix)
+
+ log.info("Publishing coverage results to %s",
+ results_server_dir_url)
+ cmd = self.results_server_cmd + " -m cp -R " + \
+ self.cov_report_deploy.cov_report_dir + " " + self.results_server_dir
+ try:
+ cmd_output = subprocess.run(args=cmd,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
+ except Exception as e:
+ log.error("%s: Failed to publish results:\n\"%s\"", e,
+ str(cmd))
diff --git a/util/dvsim/style.css b/util/dvsim/style.css
index 5f0c390..911d127 100644
--- a/util/dvsim/style.css
+++ b/util/dvsim/style.css
@@ -72,3 +72,64 @@
.results tbody tr:nth-child(even) {
background: #f2f2f2;
}
+
+/* Color encoding for percentages. */
+.cna {
+ color: 000000;
+ background-color: #f8f8f8;
+}
+
+.c0 {
+ color: #ffffff;
+ background-color: #EF6F6F;
+}
+
+.c1 {
+ color: #ffffff;
+ background-color: #EF886F;
+}
+
+.c2 {
+ color: #000000;
+ background-color: #EFA26F;
+}
+
+.c3 {
+ color: #000000;
+ background-color: #EFBB6F;
+}
+
+.c4 {
+ color: #000000;
+ background-color: #EFD56F;
+}
+
+.c5 {
+ color: #000000;
+ background-color: #EEEF6F;
+}
+
+.c6 {
+ color: #000000;
+ background-color: #D5EF6F;
+}
+
+.c7 {
+ color: #000000;
+ background-color: #BBEF6F;
+}
+
+.c8 {
+ color: #000000;
+ background-color: #A2EF6F;
+}
+
+.c9 {
+ color: #000000;
+ background-color: #88EF6F;
+}
+
+.c10 {
+ color: #000000;
+ background-color: #6FEF6F;
+}
diff --git a/util/dvsim/utils.py b/util/dvsim/utils.py
index 0d124dc..0e95a41 100644
--- a/util/dvsim/utils.py
+++ b/util/dvsim/utils.py
@@ -79,8 +79,9 @@
hjson_cfg_dict = hjson.loads(text, use_decimal=True)
f.close()
except Exception as e:
- log.fatal("Failed to parse \"%s\" possibly due to bad path or syntax error.\n%s",
- hjson_file, e)
+ log.fatal(
+ "Failed to parse \"%s\" possibly due to bad path or syntax error.\n%s",
+ hjson_file, e)
sys.exit(1)
return hjson_cfg_dict
@@ -95,15 +96,19 @@
if "{eval_cmd}" in var:
idx = var.find("{eval_cmd}") + 11
- var = subst_wildcards(var[idx:], mdict, ignored_wildcards)
- var = run_cmd(var)
+ subst_var = subst_wildcards(var[idx:], mdict, ignored_wildcards)
+ # If var has wildcards that were ignored, then skip running the command
+ # for now, assume that it will be handled later.
+ match = re.findall(r"{([A-Za-z0-9\_]+)}", subst_var)
+ if len(match) == 0:
+ var = run_cmd(subst_var)
else:
match = re.findall(r"{([A-Za-z0-9\_]+)}", var)
if len(match) > 0:
subst_list = {}
for item in match:
if item not in ignored_wildcards:
- log.debug("Found wildcard in \"%s\": \"%s\"", var, item)
+ log.debug("Found wildcard \"%s\" in \"%s\"", item, var)
found = subst(item, mdict)
if found is not None:
if type(found) is list:
@@ -190,4 +195,68 @@
html_text += "</div>\n"
html_text += "</body>\n"
html_text += "</html>\n"
+ html_text = htmc_color_pc_cells(html_text)
return html_text
+
+
+def htmc_color_pc_cells(text):
+ '''This function finds cells in a html table that contains a % sign. It then
+ uses the number in front if the % sign to color the cell based on the value
+ from a shade from red to green. These color styles are encoded in ./style.css
+ which is assumed to be accessible by the final webpage.
+ '''
+
+ # Replace <td> with <td class="color-class"> based on the fp
+ # value. "color-classes" are listed in ./style.css as follows: "cna"
+ # for NA value, "c0" to "c10" for fp value falling between 0.00-9.99,
+ # 10.00-19.99 ... 90.00-99.99, 100.0 respetively.
+ def color_cell(cell, cclass):
+ op = cell.replace("<td", "<td class=\"" + cclass + "\"")
+ # Remove '%' sign.
+ op = re.sub(r"\s*%\s*", "", op)
+ return op
+
+ # List of 'not applicable' identifiers.
+ na_list = ['--', 'NA', 'N.A.', 'N.A', 'na', 'n.a.', 'n.a']
+ na_list_patterns = '|'.join(na_list)
+
+ # List of floating point patterns: '0', '0.0' & '.0'
+ fp_patterns = "\d+|\d+\.\d+|\.\d+"
+
+ patterns = fp_patterns + '|' + na_list_patterns
+ match = re.findall(r"(<td.*>\s*(" + patterns + ")\s+%\s*</td>)", text)
+ if len(match) > 0:
+ subst_list = {}
+ fp_nums = []
+ for item in match:
+ # item is a tuple - first is the full string indicating the table
+ # cell which we want to replace, second is the floating point value.
+ cell = item[0]
+ fp_num = item[1]
+ # Skip if fp_num is already processed.
+ if fp_num in fp_nums: continue
+ fp_nums.append(fp_num)
+ if fp_num in na_list: subst = color_cell(cell, "cna")
+ else:
+ # Item is a fp num.
+ try:
+ fp = float(fp_num)
+ except ValueError:
+ log.error("Percentage item \"%s\" in cell \"%s\" is not an " + \
+ "integer or a floating point number", fp_num, cell)
+ continue
+ if fp >= 0.0 and fp < 10.0: subst = color_cell(cell, "c0")
+ elif fp >= 10.0 and fp < 20.0: subst = color_cell(cell, "c1")
+ elif fp >= 20.0 and fp < 30.0: subst = color_cell(cell, "c2")
+ elif fp >= 30.0 and fp < 40.0: subst = color_cell(cell, "c3")
+ elif fp >= 40.0 and fp < 50.0: subst = color_cell(cell, "c4")
+ elif fp >= 50.0 and fp < 60.0: subst = color_cell(cell, "c5")
+ elif fp >= 60.0 and fp < 70.0: subst = color_cell(cell, "c6")
+ elif fp >= 70.0 and fp < 80.0: subst = color_cell(cell, "c7")
+ elif fp >= 80.0 and fp < 90.0: subst = color_cell(cell, "c8")
+ elif fp >= 90.0 and fp < 100.0: subst = color_cell(cell, "c9")
+ elif fp >= 100.0: subst = color_cell(cell, "c10")
+ subst_list[cell] = subst
+ for item in subst_list:
+ text = text.replace(item, subst_list[item])
+ return text
diff --git a/util/testplanner/class_defs.py b/util/testplanner/class_defs.py
index e5d21e1..05694d9 100644
--- a/util/testplanner/class_defs.py
+++ b/util/testplanner/class_defs.py
@@ -132,12 +132,12 @@
# if a test was not found in regr results, indicate 0/1 passing
if map_full_testplan and not found:
- test_results.append({"name": test, "passing": 0, "total": 1})
+ test_results.append({"name": test, "passing": 0, "total": 0})
# if no written tests were indicated in the testplan, reuse planned
# test name and indicate 0/1 passing
if map_full_testplan and self.tests == []:
- test_results.append({"name": self.name, "passing": 0, "total": 1})
+ test_results.append({"name": self.name, "passing": 0, "total": 0})
# replace tests with test results
self.tests = test_results
@@ -305,7 +305,7 @@
'''
self.map_regr_results(regr_results, map_full_testplan)
table = [[
- "Milestone", "Name", "Tests", "Passing", "Iterations", "Pass Rate"
+ "Milestone", "Name", "Tests", "Passing", "Total", "Pass Rate"
]]
colalign = ("center", "center", "left", "center", "center", "center")
for entry in self.entries:
@@ -314,8 +314,10 @@
if milestone == "N.A.": milestone = ""
if entry_name == "N.A.": entry_name = ""
for test in entry.tests:
- pass_rate = test["passing"] / test["total"] * 100
- pass_rate = "{0:.2f}".format(round(pass_rate, 2))
+ if test["total"] == 0: pass_rate = "-- %"
+ else:
+ pass_rate = test["passing"] / test["total"] * 100
+ pass_rate = "{0:.2f} %".format(round(pass_rate, 2))
table.append([
milestone, entry_name, test["name"], test["passing"],
test["total"], pass_rate