[dv regr tool] Enabled 2 command line switches
- Enabled the following command line switches:
- --purge: Clean the scratch area
This is a standalone switch meant to be run by itself. If items are
specified to be run, they are ignored.
- --map-full-testplan: This forces the result to be mapped to the
complete testplan. By default the full mapping is disabled; only the
tests that are run in that regression are mapped instead for a smaller
and more concise table.
- Fixes:
- Fixed results table column alignment
Signed-off-by: Srikrishna Iyer <sriyer@google.com>
diff --git a/util/dvsim.py b/util/dvsim.py
index 3b07c5b..b80b262 100755
--- a/util/dvsim.py
+++ b/util/dvsim.py
@@ -329,6 +329,13 @@
"Print dvsim tool messages only, without actually running any command")
parser.add_argument(
+ "--map-full-testplan",
+ default=False,
+ action='store_true',
+ help="Force complete testplan annotated results to be shown at the end."
+ )
+
+ parser.add_argument(
"-pi",
"--print-interval",
type=int,
@@ -371,14 +378,21 @@
# be deployed.
cfg = SimCfg.SimCfg(proj_root=get_proj_root(), args=args)
+ # Purge the scratch path if --purge option is set.
+ if args.purge:
+ cfg.do_purge()
+ sys.exit(0)
+
+ # List items available for run if --list switch is passed, and exit.
+ if args.list != []:
+ cfg.print_list()
+ sys.exit(0)
+
# Deploy the builds and runs
Deploy.Deploy.deploy(cfg.deploy)
# Generate results.
- cfg.gen_results()
-
- # sim_cfg_list = dvsim_parser.run(args)
- # dvsim_scheduler.dispatch(sim_cfg_list)
+ print(cfg.gen_results())
if __name__ == '__main__':
diff --git a/util/dvsim/Deploy.py b/util/dvsim/Deploy.py
index 34eacaf..4b48ca7 100644
--- a/util/dvsim/Deploy.py
+++ b/util/dvsim/Deploy.py
@@ -200,7 +200,7 @@
try:
# If output directory exists, back it up.
if os.path.exists(self.odir):
- ts = run_cmd("date '" + self.sim_cfg.ts_format + "' -d \"" +
+ ts = run_cmd("date '+" + self.sim_cfg.ts_format + "' -d \"" +
"$(stat -c '%y' " + self.odir + ")\"")
os.system('mv ' + self.odir + " " + self.odir + "_" + ts)
except IOError:
diff --git a/util/dvsim/SimCfg.py b/util/dvsim/SimCfg.py
index 0dfea27..ba496a7 100644
--- a/util/dvsim/SimCfg.py
+++ b/util/dvsim/SimCfg.py
@@ -28,9 +28,12 @@
regression framework.
"""
+ # Maintain a list of registered top level cfgs
+ cfgs = []
+
# Static variables - indicate timestamp.
ts_format_long = "%A %B %d %Y %I:%M:%S%p %Z"
- ts_format = '+%a.%m.%d.%y__%I.%M.%S%p'
+ ts_format = "%a.%m.%d.%y__%I.%M.%S%p"
def __str__(self):
return pprint.pformat(self.__dict__)
@@ -69,6 +72,7 @@
self.dry_run = args.dry_run
self.skip_ral = args.skip_ral
self.job_prefix = args.job_prefix
+ self.map_full_testplan = args.map_full_testplan
# Set default sim modes for unpacking
if self.waves is True: self.en_build_modes.append("waves")
@@ -150,10 +154,6 @@
# tests and regressions
self.create_objects()
- # Once all objects are constructed, check if the --list option is passed
- # Is yes, then simply print the items requested and exit.
- self.print_list()
-
# Look at list of items and build the list of tests to run
self.deploy = []
self.build_list = []
@@ -166,6 +166,21 @@
# Print info
log.info("Scratch path: %s", self.scratch_path)
+ # Register self
+ SimCfg.cfgs.append(self)
+
+ def do_purge(self):
+ if self.scratch_path is not "":
+ try:
+ log.info("Purging scratch path %s", self.scratch_path)
+ os.system("/bin/rm -rf " + self.scratch_path)
+ except IOError:
+ log.error('Failed to purge scratch directory %s',
+ self.scratch_path)
+
+ # TODO: can't exit here!
+ sys.exit(0)
+
def process_exports(self):
# Convert 'exports' to dict
exports_dict = {}
@@ -306,16 +321,13 @@
setattr(self, "regressions", regressions)
def print_list(self):
- if self.list_items != []:
- for list_item in self.list_items:
- if hasattr(self, list_item):
- items = getattr(self, list_item)
- for item in items:
- log.info(item)
- else:
- log.error("Item %s does not exist!", list_item)
- sys.exit(1)
- sys.exit(0)
+ for list_item in self.list_items:
+ if hasattr(self, list_item):
+ items = getattr(self, list_item)
+ for item in items:
+ log.info(item)
+ else:
+ log.error("Item %s does not exist!", list_item)
def create_build_and_run_list(self):
# Walk through the list of items to run and create the build and run
@@ -429,6 +441,14 @@
return None
def gen_results_sub(items, results):
+ '''
+ Generate the results table from the test runs (builds are ignored).
+ The table has 3 columns - name, passing and total as a list of dicts.
+ This is populated for all tests. The number of passing and total is
+ in reference to the number of iterations or reseeds for that test.
+ This list of dicts is directly consumed by the Testplan::results_table
+ method for testplan mapping / annotation.
+ '''
if items == []: return results
for item in items:
# Only generate results table for runs.
@@ -443,12 +463,17 @@
return results
# Generate results table for runs.
- regr_results = {}
- regr_results["timestamp"] = self.timestamp_long
- regr_results["test_results"] = gen_results_sub(self.deploy, [])
results_str = "# " + self.name.upper() + " Regression Results\n"
- results_str += " Run on " + regr_results["timestamp"] + "\n"
+ results_str += " Run on " + self.timestamp_long + "\n"
results_str += "\n## Test Results\n"
testplan = testplan_utils.parse_testplan(self.testplan)
- results_str += testplan.results_table(regr_results["test_results"])
- print(results_str)
+ results_str += testplan.results_table(
+ regr_results=gen_results_sub(self.deploy, []),
+ map_full_testplan=self.map_full_testplan)
+
+ # Write results to the scratch area
+ regr_results_file = self.scratch_path + "/regr_results_" + self.timestamp + "." + fmt
+ f = open(regr_results_file, 'w')
+ f.write(results_str)
+ f.close()
+ return results_str
diff --git a/util/testplanner/class_defs.py b/util/testplanner/class_defs.py
index 02e13e3..322e13d 100644
--- a/util/testplanner/class_defs.py
+++ b/util/testplanner/class_defs.py
@@ -110,7 +110,7 @@
if resolved_tests != []: self.tests = resolved_tests
return True
- def map_regr_results(self, regr_results):
+ def map_regr_results(self, regr_results, map_full_testplan=True):
'''map regression results to tests in this entry
Given a list of regression results (a tuple containing {test name, # passing and
@@ -130,12 +130,12 @@
break
# if a test was not found in regr results, indicate 0/1 passing
- if not found:
+ if map_full_testplan and not found:
test_results.append({"name": test, "passing": 0, "total": 1})
# if no written tests were indicated in the testplan, reuse planned
# test name and indicate 0/1 passing
- if self.tests == []:
+ if map_full_testplan and self.tests == []:
test_results.append({"name": self.name, "passing": 0, "total": 1})
# replace tests with test results
@@ -187,7 +187,7 @@
'''
self.entries = sorted(self.entries, key=lambda entry: entry.milestone)
- def map_regr_results(self, regr_results):
+ def map_regr_results(self, regr_results, map_full_testplan=True):
'''map regression results to testplan entries
'''
def sum_results(totals, entry):
@@ -195,11 +195,24 @@
'''
ms = entry.milestone
for test in entry.tests:
+ # Create dummy tests entry for milestone total
+ if totals[ms].tests == []:
+ totals[ms].tests = [{
+ "name": "TOTAL",
+ "passing": 0,
+ "total": 0
+ }]
+ # Sum milestone total
totals[ms].tests[0]["passing"] += test["passing"]
totals[ms].tests[0]["total"] += test["total"]
+ # Sum grand total
+ if ms != "N.A.":
+ totals["N.A."].tests[0]["passing"] += test["passing"]
+ totals["N.A."].tests[0]["total"] += test["total"]
return totals
totals = {}
+ # Create entry for total in each milestone; & the grand total.
for ms in TestplanEntry.milestones:
totals[ms] = TestplanEntry(name="N.A.",
desc="Total tests",
@@ -209,9 +222,11 @@
"passing": 0,
"total": 0
}])
+ if ms != "N.A.": totals[ms].tests = []
for entry in self.entries:
- regr_results = entry.map_regr_results(regr_results)
+ regr_results = entry.map_regr_results(regr_results,
+ map_full_testplan)
totals = sum_results(totals, entry)
# extract unmapped tests from regr_results and create 'unmapped' entry
@@ -220,23 +235,17 @@
if not "mapped" in regr_result.keys():
unmapped_regr_results.append(regr_result)
- unmapped = TestplanEntry(name="Unmapped tests",
- desc="""A list of tests in the regression result that are not
+ unmapped = TestplanEntry(
+ name="Unmapped tests",
+ desc="""A list of tests in the regression result that are not
mapped to testplan entries.""",
- milestone="N.A.",
- tests=unmapped_regr_results)
+ milestone="N.A.",
+ tests=unmapped_regr_results)
totals = sum_results(totals, unmapped)
- # Add the grand total: repurpose the milestone = "N.A." key used for it.
- for ms in TestplanEntry.milestones:
- if ms != "N.A.":
- totals["N.A."].tests[0]["passing"] += totals[ms].tests[0][
- "passing"]
- totals["N.A."].tests[0]["total"] += totals[ms].tests[0]["total"]
-
# add total back into 'entries'
- for key in totals.keys():
- if key != "N.A.": self.entries.append(totals[key])
+ for ms in TestplanEntry.milestones[1:]:
+ self.entries.append(totals[ms])
self.sort()
self.entries.append(unmapped)
self.entries.append(totals["N.A."])
@@ -248,11 +257,15 @@
for entry in self.entries:
entry.display()
- def results_table(self, regr_results, tablefmt="github"):
+ def results_table(self,
+ regr_results,
+ map_full_testplan=True,
+ tablefmt="pipe"):
'''Print the mapped regression results into a table.
'''
- self.map_regr_results(regr_results)
+ self.map_regr_results(regr_results, map_full_testplan)
table = [["Milestone", "Name", "Tests", "Results"]]
+ align = ["center", "center", "right", "center"]
for entry in self.entries:
milestone = entry.milestone
entry_name = entry.name
@@ -264,4 +277,7 @@
[milestone, entry_name, test["name"], results_str])
milestone = ""
entry_name = ""
- return tabulate(table, headers="firstrow", tablefmt=tablefmt)
+ return tabulate(table,
+ headers="firstrow",
+ tablefmt=tablefmt,
+ colalign=align)