[dvsim] Keep dependencies list
This set of changes is aimed at retaining the dependency order across
targets, even if the dependency is not scheduled to be run. The
Deploy::dependencies list once constructed, remains untouched. We
instead change the way the FlowCfg::deploy list is created - if targets
are not required to run (for example, when --build-only or --run-only
switch is passed), then they are not added to the deploy list. This
means that the deploy list needs to be constructed correctly - the
scheduler relies on it to know what to run.
The scheduler previously recursively went through the dependencies to
determine what all needed to be run - this required the FlowCfg to
delete dependencies after the fact, if flow modifier switches were
passed. This is no longer needed because we now explicitly provide it
the list of things to run instead. This also means when checking an
item's eligibility to be enqueued based on its dependencies' statuses,
it needs to ignore the deps that were not a part of the original deploy
list.
The reason for this change is our internal Google Cloud based launching
system, which runs each job (input -> process -> output) in an isolated
VM instance. The job's input and output are tarballs that flit between
the user's workstation, Google Cloud storage, and the VM instance. To
support --run-only for example, in our environment, the run deploy
object needs to be able to provide a pointer to its build dependency
(which would have run in the past) so that the built simulation
executable can be tarballed and uploaded as the run-job's input.
Signed-off-by: Srikrishna Iyer <sriyer@google.com>
diff --git a/util/dvsim/FlowCfg.py b/util/dvsim/FlowCfg.py
index fccb5be..0159177 100644
--- a/util/dvsim/FlowCfg.py
+++ b/util/dvsim/FlowCfg.py
@@ -359,6 +359,11 @@
deploy = []
for item in self.cfgs:
deploy.extend(item.deploy)
+
+ if not deploy:
+ log.fatal("Nothing to run!")
+ sys.exit(1)
+
return Scheduler(deploy).run()
def _gen_results(self, results):
diff --git a/util/dvsim/Scheduler.py b/util/dvsim/Scheduler.py
index e6db135..f7605bd 100644
--- a/util/dvsim/Scheduler.py
+++ b/util/dvsim/Scheduler.py
@@ -26,6 +26,8 @@
max_parallel = 16
def __init__(self, items):
+ self.items = items
+
# 'scheduled[target][cfg]' is a list of Deploy objects for the chosen
# target and cfg. As items in _scheduled are ready to be run (once
# their dependencies pass), they are moved to the _queued list, where
@@ -33,8 +35,7 @@
# When all items (in all cfgs) of a target are done, it is removed from
# this dictionary.
self._scheduled = {}
- for item in items:
- self.add_to_scheduled(item)
+ self.add_to_scheduled(items)
# Print status periodically using an external status printer.
self.status_printer = get_status_printer()
@@ -131,19 +132,17 @@
# We got to the end without anything exploding. Return the results.
return self.item_to_status
- def add_to_scheduled(self, item):
- '''Recursively add item and all of its dependencies to _scheduled.
+ def add_to_scheduled(self, items):
+ '''Add items to the list of _scheduled.
- 'item' is a Deploy object.
+ 'items' is a list of Deploy objects.
'''
- for dep in item.dependencies:
- self.add_to_scheduled(dep)
-
- target_dict = self._scheduled.setdefault(item.target, {})
- cfg_list = target_dict.setdefault(item.sim_cfg, [])
- if item not in cfg_list:
- cfg_list.append(item)
+ for item in items:
+ target_dict = self._scheduled.setdefault(item.target, {})
+ cfg_list = target_dict.setdefault(item.sim_cfg, [])
+ if item not in cfg_list:
+ cfg_list.append(item)
def _remove_from_scheduled(self, item):
'''Removes the item from _scheduled[target][cfg] list.
@@ -257,6 +256,10 @@
'''Returns true if ALL dependencies of item are complete.'''
for dep in item.dependencies:
+ # Ignore dependencies that were not scheduled to run.
+ if dep not in self.items:
+ continue
+
# Has the dep even been enqueued?
if dep not in self.item_to_status:
return False
@@ -277,6 +280,10 @@
# 'item' can run only if its dependencies have passed (their results
# should already show up in the item to status map).
for dep in item.dependencies:
+ # Ignore dependencies that were not scheduled to run.
+ if dep not in self.items:
+ continue
+
dep_status = self.item_to_status[dep]
assert dep_status in ['P', 'F', 'K']
diff --git a/util/dvsim/SimCfg.py b/util/dvsim/SimCfg.py
index 363214d..f47d000 100644
--- a/util/dvsim/SimCfg.py
+++ b/util/dvsim/SimCfg.py
@@ -504,21 +504,21 @@
self.runs = ([]
if self.build_only else self._expand_run_list(build_map))
- # Discard the build_job dependency that was added earlier if --run-only
- # switch is passed.
- if self.run_only:
- self.builds = []
- for run in self.runs:
- run.dependencies = []
+ # Add builds to the list of things to run, only if --run-only switch
+ # is not passed.
+ self.deploy = []
+ if not self.run_only:
+ self.deploy += self.builds
- self.deploy = self.builds + self.runs
+ if not self.build_only:
+ self.deploy += self.runs
- # Create cov_merge and cov_report objects, so long as we've got at
- # least one run to do.
- if self.cov and self.runs:
- self.cov_merge_deploy = CovMerge(self.runs, self)
- self.cov_report_deploy = CovReport(self.cov_merge_deploy, self)
- self.deploy += [self.cov_merge_deploy, self.cov_report_deploy]
+ # Create cov_merge and cov_report objects, so long as we've got at
+ # least one run to do.
+ if self.cov and self.runs:
+ self.cov_merge_deploy = CovMerge(self.runs, self)
+ self.cov_report_deploy = CovReport(self.cov_merge_deploy, self)
+ self.deploy += [self.cov_merge_deploy, self.cov_report_deploy]
# Create initial set of directories before kicking off the regression.
self._create_dirs()