[dvsim] Run cov_merge / cov_report as part of the main set of jobs

Now that dvsim has dependency support (where a job depends on zero or
more earlier jobs), rather than "job spawning" (where a job may spawn
some other jobs on completion), we can express the correct
dependencies for the cov_merge job. This should only run if all the
tests passed, so depends on all of them.

One nice side-effect is that the timer count shown on the console no
longer restarts when we get to coverage. The code is also much
simpler.

Finally, we no longer need to store print_legend as a Scheduler class
attribute (because we only actually run the scheduler once, so don't
need to avoid printing the legend multiple times).

Signed-off-by: Rupert Swarbrick <rswarbrick@lowrisc.org>
diff --git a/util/dvsim/Deploy.py b/util/dvsim/Deploy.py
index 95db494..af9dd0f 100644
--- a/util/dvsim/Deploy.py
+++ b/util/dvsim/Deploy.py
@@ -734,10 +734,12 @@
     # Register all builds with the class
     items = []
 
-    def __init__(self, sim_cfg):
+    def __init__(self, run_items, sim_cfg):
         # Initialize common vars.
         super().__init__(sim_cfg)
 
+        self.dependencies += run_items
+
         self.target = "cov_merge"
         self.pass_patterns = []
         self.fail_patterns = []
diff --git a/util/dvsim/Scheduler.py b/util/dvsim/Scheduler.py
index 26223de..4eab47e 100644
--- a/util/dvsim/Scheduler.py
+++ b/util/dvsim/Scheduler.py
@@ -243,7 +243,6 @@
 
 class Scheduler:
     '''An object to run one or more Deploy items'''
-    print_legend = True
 
     # Max jobs running at one time
     max_parallel = 16
@@ -274,12 +273,8 @@
         '''
         timer = Timer()
 
-        # Print the legend just once (at the start of the first run)
-        if Scheduler.print_legend:
-            log.info("[legend]: [Q: queued, D: dispatched, "
-                     "P: passed, F: failed, K: killed, T: total]")
-            Scheduler.print_legend = False
-
+        log.info("[legend]: [Q: queued, D: dispatched, "
+                 "P: passed, F: failed, K: killed, T: total]")
         results = {}
         for scheduler in self.schedulers.values():
             results.update(scheduler.run(timer, results))
diff --git a/util/dvsim/SimCfg.py b/util/dvsim/SimCfg.py
index 77a36d7..15cb631 100644
--- a/util/dvsim/SimCfg.py
+++ b/util/dvsim/SimCfg.py
@@ -13,7 +13,6 @@
 from collections import OrderedDict
 
 from Deploy import CompileSim, CovAnalyze, CovMerge, CovReport, CovUnr, RunTest
-import Scheduler
 from FlowCfg import FlowCfg
 from Modes import BuildModes, Modes, Regressions, RunModes, Tests
 from tabulate import tabulate
@@ -181,10 +180,6 @@
         self.cov_report_deploy = None
         self.results_summary = OrderedDict()
 
-        # If is_primary_cfg is set, then each cfg will have its own cov_deploy.
-        # Maintain an array of those in cov_deploys.
-        self.cov_deploys = []
-
         super().__init__(flow_cfg_file, hjson_data, args, mk_config)
 
     def _expand(self):
@@ -542,40 +537,16 @@
 
         self.deploy = self.builds + self.runs
 
-        # Create cov_merge and cov_report objects
-        if self.cov:
-            self.cov_merge_deploy = CovMerge(self)
+        # Create cov_merge and cov_report objects, so long as we've got at
+        # least one run to do.
+        if self.cov and self.runs:
+            self.cov_merge_deploy = CovMerge(self.runs, self)
             self.cov_report_deploy = CovReport(self.cov_merge_deploy, self)
+            self.deploy += [self.cov_merge_deploy, self.cov_report_deploy]
 
         # Create initial set of directories before kicking off the regression.
         self._create_dirs()
 
-    def create_deploy_objects(self):
-        '''Public facing API for _create_deploy_objects().
-        '''
-        super().create_deploy_objects()
-
-        # Also, create cov_deploys
-        if self.cov:
-            for item in self.cfgs:
-                if item.cov:
-                    self.cov_deploys.append(item.cov_merge_deploy)
-                    self.cov_deploys.append(item.cov_report_deploy)
-
-    # deploy additional commands as needed. We do this separated for coverage
-    # since that needs to happen at the end.
-    def deploy_objects(self):
-        '''This is a public facing API, so we use "self.cfgs" instead of self.
-        '''
-        # Invoke the base class method to run the regression.
-        results = super().deploy_objects()
-
-        # If coverage is enabled, then deploy the coverage tasks.
-        if self.cov:
-            results.update(Scheduler.run(self.cov_deploys))
-
-        return results
-
     def _cov_analyze(self):
         '''Use the last regression coverage data to open up the GUI tool to
         analyze the coverage.