[dvsim] Synthesis target integration
This adds the following features to dvsim:
- SynCfg for synthesis targets
- Dashboard for single synthesis runs (summaries from master configs are
not yet supported)
- Configuration and run / parsing scripts for wire-load-model-based
synthesis flow in DC
Signed-off-by: Michael Schaffner <msf@opentitan.org>
diff --git a/hw/ip/aes/aes.core b/hw/ip/aes/aes.core
index bb79b2a..723d25b 100644
--- a/hw/ip/aes/aes.core
+++ b/hw/ip/aes/aes.core
@@ -86,7 +86,7 @@
syn:
<<: *default_target
- # set default to DC once
+ # TODO: set default to DC once
# this option is available
# olofk/edalize#89
default_tool: icarus
diff --git a/hw/syn/Makefile b/hw/syn/Makefile
deleted file mode 100644
index 4e2932b..0000000
--- a/hw/syn/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright lowRISC contributors.
-# Licensed under the Apache License, Version 2.0, see LICENSE for details.
-# SPDX-License-Identifier: Apache-2.0
-#
-# Makefile with synthesis targets for OpenTitan
-
-CORE_ROOT ?= ../../
-REPORT_DIR ?= reports
-# this directory contains the library setup file (setup.tcl)
-LIB_SETUP_SCRIPT ?= `pwd`/../foundry/syn/dc/setup.tcl
-# sources for DC
-TOOL_SRCS = ./tools/dc/start-dc.sh \
- ./tools/dc/run-syn.tcl \
- ./tools/dc/constraints.sdc
-
-IPS ?= ip-aes \
- systems-top_earlgrey
-# ip-alert_handler \
-# ip-flash_ctrl \
-# ip-gpio \
-# ip-hmac \
-# ip-i2c \
-# ip-nmi_gen \
-# ip-padctrl \
-# ip-padring \
-# ip-pinmux \
-# ip-rv_core_ibex \
-# ip-rv_dm \
-# ip-rv_plic_example \
-# ip-rv_timer \
-# ip-spi_device \
-# ip-uart \
-# ip-usbdev \
-# ip-usb_fs_nb_pe \
-# ip-usbuart \
-# tlul-socket_1n \
-# tlul-socket_m1 \
-# tlul-adapter_reg \
-# tlul-adapter_sram \
-# tlul-sram2tlul \
-
-
-ips_syn = $(addsuffix _syn, $(IPS))
-
-######################
-# synthesis targets #
-######################
-
-all: clean
- @echo Discovered synth targets:
- @echo -e "\n $(patsubst %,%\\n,$(strip $(ips_syn)))"
- $(MAKE) $(ips_lint)
-
-$(ips_syn):
- mkdir -p ${REPORT_DIR}
- fusesoc --cores-root ${CORE_ROOT} run --target=syn --tool=icarus --setup lowrisc:$(subst -,:,$(patsubst %_syn,%,$@))
- cp ${TOOL_SRCS} build/lowrisc_*$(subst -,_,$(patsubst %_syn,%,$@))*/syn-icarus/
- export TOP_ENTITY="$(patsubst ip-%,%,$(patsubst tlul-%,%,$(patsubst systems-%,%,$(patsubst %_syn,%,$@))))" && \
- export LIB_SETUP_SCRIPT=${LIB_SETUP_SCRIPT} && \
- cd build/lowrisc_*$(subst -,_,$(patsubst %_syn,%,$@))*/syn-icarus/ && ./start-dc.sh -f run-syn.tcl
- cp build/lowrisc_*$(subst -,_,$(patsubst %_syn,%,$@))*/syn-icarus/REPORTS/*.rpt ${REPORT_DIR}/
-
-clean:
- rm -rf build
- rm -rf ${REPORT_DIR}/*
-
-.PHONY: all $(ips_syn) clean
diff --git a/hw/syn/data/common_syn_cfg.hjson b/hw/syn/data/common_syn_cfg.hjson
new file mode 100644
index 0000000..a7c5a1a
--- /dev/null
+++ b/hw/syn/data/common_syn_cfg.hjson
@@ -0,0 +1,30 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+ flow: syn
+ flow_makefile: "{proj_root}/hw/syn/data/syn.mk"
+
+ import_cfgs: [// common server configuration for results upload
+ // TODO: check whether this config file can be aligned such that it can
+ // be reused among different flow types
+ // "{proj_root}/hw/dv/data/fusesoc.hjson",
+ "{proj_root}/hw/data/common_project_cfg.hjson",
+ "{proj_root}/hw/syn/data/{tool}.hjson"]
+
+ // Default directory structure for the output
+ dut: "{name}"
+ build_dir: "{scratch_path}/{build_mode}"
+ build_log: "{build_dir}/synthesis.log"
+
+ // We rely on Fusesoc to generate the file list for us
+ sv_flist_gen_cmd: fusesoc
+ fusesoc_core_: "{eval_cmd} echo \"{fusesoc_core}\" | tr ':' '_'"
+
+ // TODO: switch the tool to dc once the corresponding edalize backend is available
+ sv_flist_gen_opts: ["--cores-root {proj_root} ",
+ "run --target={flow} --tool icarus --build-root={build_dir}",
+ "--setup {fusesoc_core}"]
+ sv_flist_gen_dir: "{build_dir}/syn-icarus"
+ sv_flist: "{sv_flist_gen_dir}/{fusesoc_core_}.scr"
+}
diff --git a/hw/syn/data/dc.hjson b/hw/syn/data/dc.hjson
new file mode 100644
index 0000000..bc57374
--- /dev/null
+++ b/hw/syn/data/dc.hjson
@@ -0,0 +1,26 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+ // The tool sources include the technology setup file,
+ // the main synthesis run script and the constraints file
+ tool_srcs: ["{proj_root}/hw/foundry/syn/{tool}/setup.tcl"
+ "{proj_root}/hw/syn/tools/{tool}/run-syn.tcl"
+ "{proj_root}/hw/syn/tools/{tool}/constraints.sdc"]
+
+ // Environment variables that are needed in the synthesis script
+ exports: [{"DUT" : "{dut}"},
+ {"BUILD_DIR" : "{build_dir}"},
+ {"SV_FLIST" : "{sv_flist}"}]
+
+ // Tool invocation
+ build_cmd: "dc_shell-xg-t "
+ build_opts: ["-f run-syn.tcl"]
+
+ // DC-specific results parsing script that is called after running synthesis
+ report_cmd: "{proj_root}/hw/syn/tools/{tool}/parse-syn-report.py "
+ report_opts: ["--dut {dut}",
+ "--logpath {build_dir} ",
+ "--reppath {build_dir}/REPORTS/ ",
+ "--outdir {build_dir}"]
+}
diff --git a/hw/syn/data/syn.mk b/hw/syn/data/syn.mk
new file mode 100644
index 0000000..fb2af5c
--- /dev/null
+++ b/hw/syn/data/syn.mk
@@ -0,0 +1,44 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+.DEFAULT_GOAL := all
+
+all: build
+
+###################
+## build targets ##
+###################
+build: compile_result
+
+gen_sv_flist:
+ @echo "[make]: gen_sv_flist"
+ cd ${build_dir} && ${sv_flist_gen_cmd} ${sv_flist_gen_opts}
+
+pre_compile: gen_sv_flist
+ @echo "[make]: pre_compile"
+ mkdir -p ${build_dir} && env | sort > ${build_dir}/env_vars
+ -cp -Ru ${tool_srcs} ${sv_flist_gen_dir}
+
+compile: pre_compile
+ @echo "[make]: compile"
+ # we check the status in the parse script below
+ cd ${sv_flist_gen_dir} && ${build_cmd} ${build_opts} | tee ${build_log}
+
+post_compile: compile
+ @echo "[make]: post_compile"
+
+# Parse out result
+compile_result: post_compile
+ @echo "[make]: compile_result"
+ ${report_cmd} ${report_opts}
+
+clean:
+ echo "[make]: clean"
+ rm -rf ${scratch_root}/${dut}/*
+
+.PHONY: build \
+ gen_sv_flist \
+ pre_compile \
+ compile \
+ post_compile \
+ compile_result
diff --git a/hw/syn/tools/dc/constraints.sdc b/hw/syn/tools/dc/constraints.sdc
index a3bc2d3..4791cbe 100644
--- a/hw/syn/tools/dc/constraints.sdc
+++ b/hw/syn/tools/dc/constraints.sdc
@@ -9,7 +9,7 @@
set SETUP_CLOCK_UNCERTAINTY 0.5
# TODO: consider splitting this into per-IP .sdc files
-if {$TOP_ENTITY == "top_earlgrey"} {
+if {$DUT == "top_earlgrey"} {
puts "Applying constraints for top level"
@@ -158,4 +158,4 @@
# set a nonzero critical range to be able to spot the violating paths better
# in the report
-set_critical_range 0.5 ${TOP_ENTITY}
+set_critical_range 0.5 ${DUT}
diff --git a/hw/syn/tools/dc/parse-syn-report.py b/hw/syn/tools/dc/parse-syn-report.py
new file mode 100755
index 0000000..f5ed801
--- /dev/null
+++ b/hw/syn/tools/dc/parse-syn-report.py
@@ -0,0 +1,548 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Parses lint report and dump filtered messages in hjson format.
+"""
+import argparse
+import datetime
+import re
+import sys
+from pathlib import Path
+
+import hjson
+import mistletoe
+import numpy as np
+
+# this allows both scientific and fixed point numbers
+FP_NUMBER = r"[-+]?\d+\.\d+[Ee]?[-+]?\d*"
+# fp relative error threshold for report checksums
+CROSSCHECK_REL_TOL = 0.001
+
+
+def _match_strings(full_file, master_key, patterns, results):
+ """
+ This searches for string patterns in the full_file buffer.
+ The argument patterns needs to be a list of tuples with
+ (<error_severity>, <pattern_to_match_for>).
+ """
+ for severity, pattern in patterns:
+ results[master_key][severity] += re.findall(pattern,
+ full_file,
+ flags=re.MULTILINE)
+ return results
+
+
+def _match_fp_number(full_file, master_key, patterns, results):
+ """
+ This extracts numbers from the sting buffer full_file.
+ The argument patterns needs to be a list of tuples with
+ (<key>, <pattern_to_match_for>).
+ """
+ for key, pattern in patterns:
+ match = re.findall(pattern, full_file, flags=re.MULTILINE)
+ if len(match) == 1:
+ try:
+ results[master_key][key] = float(match[0])
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+ elif len(match) > 0:
+ for item in match:
+ try:
+ results[master_key][key] += [float(item)]
+ except ValueError as err:
+ results["messages"]["flow_errors"] += [
+ "ValueError: %s" % err
+ ]
+ else:
+ results["messages"]["flow_errors"] += [
+ "Pattern %s not found" % pattern
+ ]
+
+ return results
+
+
+def _extract_messages(full_file, results, key):
+ """
+ This extracts error and warning messages from the sting buffer full_file.
+ """
+ err_warn_patterns = [("%s_errors" % key, r"^Error: .*"),
+ ("%s_warnings" % key, r"^Warning: .*")]
+ _match_strings(full_file, "messages", err_warn_patterns, results)
+
+ return results
+
+
+def _extract_gate_equiv(full_file, results, key):
+ """
+ This reads out the unit gate-equivalent.
+ """
+ try:
+ results[key]["ge"] = float(full_file.strip())
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+
+ return results
+
+
+def _rel_err(val, ref):
+ """
+ Calculate relative error with respect to reference
+ """
+ return abs(val - ref) / ref
+
+
+def _extract_area(full_file, results, key):
+ """
+ This extracts detailed area information from the report.
+ Area will be reported in gate equivalents.
+ """
+ # TODO: covert to gate equivalents
+ patterns = [("comb", r"^Combinational area: \s* (\d+\.\d+)"),
+ ("buf", r"^Buf/Inv area: \s* (\d+\.\d+)"),
+ ("reg", r"^Noncombinational area: \s* (\d+\.\d+)"),
+ ("macro", r"^Macro/Black Box area: \s* (\d+\.\d+)"),
+ ("total", r"^Total cell area: \s* (\d+\.\d+)")]
+
+ results = _match_fp_number(full_file, key, patterns, results)
+ # aggregate one level of sub-modules
+ pattern = r"^([\.0-9A-Za-z_\[\]]+){1}(?:(?:/[\.0-9A-Za-z_\[\]]+)*)"
+ for k in range(5):
+ pattern += r"\s+(" + FP_NUMBER + r")"
+ matches = re.findall(pattern, full_file, flags=re.MULTILINE)
+
+ # checksums
+ comb_check = 0.0
+ reg_check = 0.0
+ macro_check = 0.0
+ try:
+ for item in matches:
+ if item[0] not in results[key]["instances"]:
+ results[key]["instances"].update(
+ {item[0]: {
+ "comb": 0.0,
+ "reg": 0.0,
+ "buf": np.nan, # currently not available
+ "macro": 0.0,
+ "total": 0.0,
+ }})
+ results[key]["instances"][item[0]]["comb"] += float(item[3])
+ results[key]["instances"][item[0]]["reg"] += float(item[4])
+ results[key]["instances"][item[0]]["macro"] += float(item[5])
+ results[key]["instances"][item[0]]["total"] += float(item[3]) + \
+ float(item[4]) + \
+ float(item[5])
+ comb_check += float(item[3])
+ reg_check += float(item[4])
+ macro_check += float(item[5])
+
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+
+ # cross check whether the above accounting is correct
+ if _rel_err(comb_check, results["area"]["comb"]) > CROSSCHECK_REL_TOL:
+ results["messages"]["flow_errors"] += [
+ "Reporting error: comb_check (%e) != (%e)" %
+ (comb_check, results["area"]["comb"])
+ ]
+ if _rel_err(reg_check, results["area"]["reg"]) > CROSSCHECK_REL_TOL:
+ results["messages"]["flow_errors"] += [
+ "Reporting error: reg_check (%e) != (%e)" %
+ (reg_check, results["area"]["reg"])
+ ]
+ if _rel_err(macro_check, results["area"]["macro"]) > CROSSCHECK_REL_TOL:
+ results["messages"]["flow_errors"] += [
+ "Reporting error: macro_check (%e) != (%e)" %
+ (macro_check, results["area"]["macro"])
+ ]
+
+ return results
+
+
+def _extract_clocks(full_file, results, key):
+ """
+ Parse out the clocks and their period
+ """
+ clocks = re.findall(r"^(.+)\s+(\d+\.?\d*)\s+\{\d+.?\d* \d+.?\d*\}\s+",
+ full_file,
+ flags=re.MULTILINE)
+ try:
+ # get TNS and WNS in that group
+ for k, c in enumerate(clocks):
+ if c[0].strip() not in results[key]:
+ results[key].update({
+ c[0].strip(): {
+ "tns": 0.0,
+ "wns": 0.0,
+ "period": float(c[1])
+ }
+ })
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+
+ return results
+
+
+def _extract_timing(full_file, results, key):
+ """
+ This extracts the TNS and WNS for all defined clocks.
+ """
+ groups = re.findall(r"^ Path Group:\s(.+)\s",
+ full_file,
+ flags=re.MULTILINE)
+
+ slack = re.findall(r"^ slack \(.+\) \s*(" + FP_NUMBER + ")",
+ full_file,
+ flags=re.MULTILINE)
+ try:
+ # get TNS and WNS in that group
+ for k, g in enumerate(groups):
+ if g.strip() not in results[key]:
+ results[key].update(
+ {g.strip(): {
+ "tns": 0.0,
+ "wns": 0.0,
+ "period": np.nan
+ }})
+ value = float(slack[k]) if float(slack[k]) < 0.0 else 0.0
+ results[key][g]["wns"] = min(results[key][g]["wns"], value)
+ results[key][g]["tns"] += value
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+
+ return results
+
+
+def _match_units(full_file, patterns, key, results):
+ """
+ Compares the match to the units given and stores the corresponding
+ order of magnitude as a floating point value.
+ """
+ for subkey, pattern, units in patterns:
+ match = re.findall(pattern, full_file, flags=re.MULTILINE)
+ try:
+ if match:
+ if len(match[0]) == 2:
+ if match[0][1].strip() in units:
+ results[key][subkey] = float(match[0][0]) * \
+ units[match[0][1].strip()]
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+
+ return results
+
+
+def _extract_units(full_file, results, key):
+ """
+ Get the SI units configuration of this run
+ """
+ patterns = [
+ ("voltage", r"^ Voltage Units = (\d+\.?\d*)(nV|uV|mV|V)", {
+ "nV": 1E-9,
+ "uV": 1E-6,
+ "mV": 1E-3,
+ "V": 1E0
+ }),
+ ("capacitance", r"^ Capacitance Units = (\d+\.?\d*)(ff|pf|nf|uf)", {
+ "ff": 1E-15,
+ "pf": 1E-12,
+ "nf": 1E-9,
+ "uf": 1E-6
+ }),
+ ("time", r"^ Time Units = (\d+\.?\d*)(ps|ns|us|ms)", {
+ "ps": 1E-12,
+ "ns": 1E-9,
+ "us": 1E-6,
+ "ms": 1E-3
+ }),
+ ("dynamic", r"^ Dynamic Power Units = (\d+\.?\d*)(pW|nW|uW|mW|W)", {
+ "pW": 1E-12,
+ "nW": 1E-9,
+ "uW": 1E-6,
+ "mW": 1E-3,
+ "W": 1E0
+ }),
+ ("static", r"^ Leakage Power Units = (\d+\.?\d*)(pW|nW|uW|mW|W)", {
+ "pW": 1E-12,
+ "nW": 1E-9,
+ "uW": 1E-6,
+ "mW": 1E-3,
+ "W": 1E0
+ })
+ ]
+
+ _match_units(full_file, patterns, key, results)
+
+ return results
+
+
+def _extract_power(full_file, results, key):
+ """
+ This extracts power estimates for the top module from the report.
+ """
+
+ # extract first 3 columns on that line
+ patterns = [("net", r"^" + results["top"] + r"\s*(" + FP_NUMBER + r")\s*" +
+ FP_NUMBER + r" \s*" + FP_NUMBER),
+ ("int", r"^" + results["top"] + r"\s* " + FP_NUMBER +
+ r" \s*(" + FP_NUMBER + r")\s* " + FP_NUMBER),
+ ("leak", r"^" + results["top"] + r"\s* " + FP_NUMBER +
+ r" \s* " + FP_NUMBER + r" \s*(" + FP_NUMBER + ")")]
+
+ results = _match_fp_number(full_file, key, patterns, results)
+
+ return results
+
+
+def _parse_file(path, name, results, handler, key):
+ """
+ Attempts to open and aprse a given report file with the handler provided.
+ """
+ try:
+ with Path(path).joinpath(name).open() as f:
+ full_file = f.read()
+ results = handler(full_file, results, key)
+ except IOError as err:
+ results["messages"]["flow_errors"] += ["IOError: %s" % err]
+ return results
+
+
+def get_results(logpath, reppath, dut):
+ """
+ Parse report and corresponding logfiles and extract error, warning
+ and info messages for each IP present in the result folder
+ """
+
+ results = {
+ "tool": "dc",
+ "top": "",
+ "messages": {
+ "flow_errors": [],
+ "flow_warnings": [],
+ "analyze_errors": [],
+ "analyze_warnings": [],
+ "elab_errors": [],
+ "elab_warnings": [],
+ "compile_errors": [],
+ "compile_warnings": [],
+ },
+ "timing": {
+ # field for each timing group with tns, wns
+ # and the period if this is a clock
+ },
+ "area": {
+ # gate equivalent of a NAND2 gate
+ "ge": np.nan,
+ # summary, in GE
+ "comb": np.nan,
+ "buf": np.nan,
+ "reg": np.nan,
+ "macro": np.nan,
+ "total": np.nan,
+ # hierchical report with "comb", "buf", "reg", "macro", "total"
+ "instances": {},
+ },
+ "power": {
+ "net": np.nan,
+ "int": np.nan,
+ "leak": np.nan,
+ },
+ "units": {
+ "voltage": np.nan,
+ "capacitance": np.nan,
+ "time": np.nan,
+ "dynamic": np.nan,
+ "static": np.nan,
+ }
+ }
+
+ results["top"] = dut
+
+ # flow messages
+ results = _parse_file(logpath, 'synthesis.log',
+ results, _extract_messages, "flow")
+
+ # messages
+ for rep_type in ["analyze", "elab", "compile"]:
+ results = _parse_file(reppath, '%s.rpt' % rep_type,
+ results, _extract_messages, rep_type)
+
+ # get gate equivalents
+ results = _parse_file(reppath, 'gate_equiv.rpt',
+ results, _extract_gate_equiv, "area")
+ # area
+ results = _parse_file(reppath, 'area.rpt',
+ results, _extract_area, "area")
+ # clocks. this complements the timing report later below
+ results = _parse_file(reppath, 'clocks.rpt',
+ results, _extract_clocks, "timing")
+ # timing
+ results = _parse_file(reppath, 'timing.rpt',
+ results, _extract_timing, "timing")
+ # power
+ results = _parse_file(reppath, 'power.rpt',
+ results, _extract_power, "power")
+ # units
+ results = _parse_file(reppath, 'power.rpt',
+ results, _extract_units, "units")
+
+ return results
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="""This script parses DC log and report files from
+ a synthesis run, filters the messages and creates an aggregated result
+ .hjson file with the following fields:
+
+ results = {
+ "tool": "dc",
+ "top" : <name of toplevel>,
+
+ "messages": {
+ "flow_errors" : [],
+ "flow_warnings" : [],
+ "analyze_errors" : [],
+ "analyze_warnings" : [],
+ "elab_errors" : [],
+ "elab_warnings" : [],
+ "compile_errors" : [],
+ "compile_warnings" : [],
+ },
+
+ "timing": {
+ # per timing group (ususally a clock domain)
+ # in nano seconds
+ <group> : {
+ "tns" : <value>,
+ "wns" : <value>,
+ "period" : <value>,
+ ...
+ }
+ },
+
+ "area": {
+ # gate equivalent of a NAND2 gate
+ "ge" : <value>,
+
+ # summary report, in GE
+ "comb" : <value>,
+ "buf" : <value>,
+ "reg" : <value>,
+ "macro" : <value>,
+ "total" : <value>,
+
+ # hierchical report of first submodule level
+ "instances" : {
+ <name> : {
+ "comb" : <value>,
+ "buf" : <value>,
+ "reg" : <value>,
+ "macro" : <value>,
+ "total" : <value>,
+ },
+ ...
+ },
+ },
+
+ "power": {
+ "net" : <value>,
+ "int" : <value>,
+ "leak" : <value>,
+ },
+
+ "units": {
+ "voltage" : <value>,
+ "capacitance" : <value>,
+ "time" : <value>,
+ "dynamic" : <value>,
+ "static" : <value>,
+ }
+ }
+
+ The script returns nonzero status if any errors are present.
+ """)
+
+ parser.add_argument(
+ '--dut',
+ type=str,
+ help="""Name of the DUT. This is needed to parse the reports.""")
+
+ parser.add_argument(
+ '--logpath',
+ type=str,
+ help="""Path to log files for the flow.
+ This script expects the following log files to be present:
+
+ - <logpath>/synthesis.log : output of synopsys shell
+
+ """)
+
+ parser.add_argument(
+ '--reppath',
+ type=str,
+ help="""Path to report files of the flow.
+ This script expects the following report files to be present:
+
+ - <reppath>/analyze.rpt : output of analyze command
+ - <reppath>/elab.rpt : output of elab command
+ - <reppath>/compile.rpt : output of compile_ultra
+ - <reppath>/area.rpt : output of report_area
+ - <reppath>/timing.rpt : output of report_timing
+ - <reppath>/power.rpt : output of report_power
+
+ """)
+
+
+ parser.add_argument('--outdir',
+ type=str,
+ default="./",
+ help="""Output directory for the 'results.hjson' file.
+ Defaults to './'""")
+
+ args = parser.parse_args()
+ results = get_results(args.logpath, args.reppath, args.dut)
+
+ with Path(args.outdir).joinpath("results.hjson").open("w") as results_file:
+ hjson.dump(results,
+ results_file,
+ ensure_ascii=False,
+ for_json=True,
+ use_decimal=True)
+
+ # return nonzero status if any warnings or errors are present
+ # lint infos do not count as failures
+ nr_errors = len(results["messages"]["flow_errors"]) + \
+ len(results["messages"]["analyze_errors"]) + \
+ len(results["messages"]["elab_errors"]) + \
+ len(results["messages"]["compile_errors"])
+
+ print("""------------- Summary -------------
+Flow Warnings: %s
+Flow Errors: %s
+Analyze Warnings: %s
+Analyze Errors: %s
+Elab Warnings: %s
+Elab Errors: %s
+Compile Warnings: %s
+Compile Errors: %s
+-----------------------------------""" %
+ (len(results["messages"]["flow_warnings"]),
+ len(results["messages"]["flow_errors"]),
+ len(results["messages"]["analyze_warnings"]),
+ len(results["messages"]["analyze_errors"]),
+ len(results["messages"]["elab_warnings"]),
+ len(results["messages"]["elab_errors"]),
+ len(results["messages"]["compile_warnings"]),
+ len(results["messages"]["compile_errors"])))
+
+ if nr_errors > 0:
+ print("Synthesis not successful.")
+ sys.exit(1)
+
+ print("Synthesis successful.")
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/hw/syn/tools/dc/run-syn.tcl b/hw/syn/tools/dc/run-syn.tcl
index 409bbfc..ee77e3a 100644
--- a/hw/syn/tools/dc/run-syn.tcl
+++ b/hw/syn/tools/dc/run-syn.tcl
@@ -9,13 +9,17 @@
#####################
# tool setup
-source [getenv "LIB_SETUP_SCRIPT"]
+source setup.tcl
+
+# path to directory containing the source list file
+set SV_FLIST $::env(SV_FLIST)
+set BUILD_DIR $::env(BUILD_DIR)
# paths
-set WORKLIB "WORK"
-set REPDIR "REPORTS"
-set DDCDIR "DDC"
-set VLOGDIR "NETLISTS"
+set WORKLIB "${BUILD_DIR}/WORK"
+set REPDIR "${BUILD_DIR}/REPORTS"
+set DDCDIR "${BUILD_DIR}/DDC"
+set VLOGDIR "${BUILD_DIR}/NETLISTS"
exec mkdir -p ${REPDIR} ${DDCDIR} ${VLOGDIR} ${WORKLIB}
@@ -28,29 +32,12 @@
# just compile the "core" toplevel at the moment
# might want to switch to top_earlgrey_asic later on (with pads)
-set TOP_ENTITY [getenv "TOP_ENTITY"]
+set DUT $::env(DUT)
-# read source list generated by fusesoc
-set fp [open [ls "*.scr"] r]
-set file_data [read $fp]
-close $fp
-set data [split $file_data "\n"]
-
-# this TECH_LIB_SEL selects the appropriate technology by defining
+# this PRIM_DEFAULT_IMPL selects the appropriate technology by defining
# PRIM_DEFAULT_IMPL=prim_pkg::Impl<tech identifier>
-# TECH_LIB_SEL is set inside the library setup script
+# PRIM_DEFAULT_IMPL is set inside the library setup script
set DEFINE "PRIM_DEFAULT_IMPL=${PRIM_DEFAULT_IMPL} "
-# go through fusesoc file list and separate +defines from actual source files
-set SRC {}
-foreach item $data {
- if {[string range $item 0 7] == "+define+"} {
- set DEFINE "${DEFINE}[string range $item 8 end] "
- } elseif {[string range $item 0 7] == "+incdir+"} {
- lappend search_path "[string range $item 8 end]"
- } else {
- lappend SRC $item
- }
-}
# additional parameters
set PARAMS ""
@@ -63,12 +50,12 @@
remove_design -designs
sh rm -rf $WORKLIB/*
-analyze -define ${DEFINE} -format sv ${SRC}
-elaborate ${TOP_ENTITY} -parameters ${PARAMS}
-link > "${REPDIR}/${TOP_ENTITY}_link.rpt"
+analyze -vcs "-sverilog +define+${DEFINE} -f ${SV_FLIST}" > "${REPDIR}/analyze.rpt"
+elaborate ${DUT} -parameters ${PARAMS} > "${REPDIR}/elab.rpt"
+link > "${REPDIR}/link.rpt"
-write_file -format ddc -hierarchy -output "${DDCDIR}/${TOP_ENTITY}_elab.ddc"
-write_file -format verilog -hierarchy -output "${DDCDIR}/${TOP_ENTITY}_elab.v"
+write_file -format ddc -hierarchy -output "${DDCDIR}/elab.ddc"
+write_file -format verilog -hierarchy -output "${DDCDIR}/elab.v"
#############################
## CLOCK GATING SETUP ##
@@ -98,17 +85,22 @@
# that are only half-finished
# preserve hierarchy for reports
-compile_ultra -gate_clock -scan -no_autoungroup
+compile_ultra -gate_clock -scan -no_autoungroup > "${REPDIR}/compile.rpt"
#################
## REPORTS ##
#################
-report_timing -nosplit -nworst 100 > "${REPDIR}/${TOP_ENTITY}_timing.rpt"
-report_timing -nosplit -nworst 1000 -input -net -trans -cap > "${REPDIR}/${TOP_ENTITY}_timing_long.rpt"
-report_area -hier -nosplit > "${REPDIR}/${TOP_ENTITY}_area.rpt"
-report_power -hier -nosplit > "${REPDIR}/${TOP_ENTITY}_power.rpt"
-report_constraints -all_violators > "${REPDIR}/${TOP_ENTITY}_constraints.rpt"
+# write NAND2 equivalent to file for the reporting scripts
+sh echo ${NAND2_GATE_EQUIVALENT} > "${REPDIR}/gate_equiv.rpt"
+
+report_clocks > "${REPDIR}/clocks.rpt"
+report_timing -nosplit -slack_lesser_than 0.0 > "${REPDIR}/timing.rpt"
+report_area -hier -nosplit > "${REPDIR}/area.rpt"
+report_power -hier -nosplit > "${REPDIR}/power.rpt"
+report_constraints -all_violators > "${REPDIR}/constraints.rpt"
+
+report_timing -nosplit -nworst 1000 -input -net -trans -cap > "${REPDIR}/timing_long.rpt"
#################
## NETLIST ##
@@ -117,8 +109,8 @@
# change_names -rules verilog -hierarchy
# define_name_rules fixbackslashes -allowed "A-Za-z0-9_" -first_restricted "\\" -remove_chars
# change_names -rule fixbackslashes -h
-write_file -format ddc -hierarchy -output "${DDCDIR}/${TOP_ENTITY}_mapped.ddc"
-write_file -format verilog -hierarchy -output "${VLOGDIR}/${TOP_ENTITY}_mapped.v"
+write_file -format ddc -hierarchy -output "${DDCDIR}/mapped.ddc"
+write_file -format verilog -hierarchy -output "${VLOGDIR}/mapped.v"
# ##############################
# ## INCREMENTAL FLATTENING ##
@@ -130,17 +122,17 @@
# ## REPORTS ##
# #################
-# report_timing -nosplit -nworst 100 > "${REPDIR}/${TOP_ENTITY}_flat_timing.rpt"
-# report_timing -nosplit -nworst 1000 -input -net -trans -cap > "${REPDIR}/${TOP_ENTITY}_flat_timing_long.rpt"
-# report_area -hier -nosplit > "${REPDIR}/${TOP_ENTITY}_flat_area.rpt"
-# report_power -hier -nosplit > "${REPDIR}/${TOP_ENTITY}_flat_power.rpt"
-# report_constraints -all_violators > "${REPDIR}/${TOP_ENTITY}_flat_constraints.rpt"
+# report_timing -nosplit -nworst 100 > "${REPDIR}/flat_timing.rpt"
+# report_timing -nosplit -nworst 1000 -input -net -trans -cap > "${REPDIR}/flat_timing_long.rpt"
+# report_area -hier -nosplit > "${REPDIR}/flat_area.rpt"
+# report_power -hier -nosplit > "${REPDIR}/flat_power.rpt"
+# report_constraints -all_violators > "${REPDIR}/flat_constraints.rpt"
# #################
# ## NETLIST ##
# #################
-# write_file -format ddc -hierarchy -output "${DDCDIR}/${TOP_ENTITY}_flat.ddc"
-# write_file -format verilog -hierarchy -output "${VLOGDIR}/${TOP_ENTITY}_flat.v"
+# write_file -format ddc -hierarchy -output "${DDCDIR}/flat.ddc"
+# write_file -format verilog -hierarchy -output "${VLOGDIR}/flat.v"
exit
diff --git a/hw/top_earlgrey/syn/top_earlgrey_syn_cfg.hjson b/hw/top_earlgrey/syn/top_earlgrey_syn_cfg.hjson
new file mode 100644
index 0000000..fb03be8
--- /dev/null
+++ b/hw/top_earlgrey/syn/top_earlgrey_syn_cfg.hjson
@@ -0,0 +1,15 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+ // Top level dut name (sv module).
+ name: top_earlgrey
+
+ // Fusesoc core file used for building the file list.
+ fusesoc_core: lowrisc:systems:top_earlgrey:0.1
+
+ import_cfgs: [// Project wide common synthesis config file
+ "{proj_root}/hw/syn/data/common_syn_cfg.hjson"]
+
+ // TODO: add support for loading the constraints here
+ }
diff --git a/hw/top_earlgrey/top_earlgrey.core b/hw/top_earlgrey/top_earlgrey.core
index 664c511..d34ddb3 100644
--- a/hw/top_earlgrey/top_earlgrey.core
+++ b/hw/top_earlgrey/top_earlgrey.core
@@ -99,7 +99,7 @@
syn:
<<: *default_target
- # set default to DC once
+ # TODO: set default to DC once
# this option is available
# olofk/edalize#89
default_tool: icarus
diff --git a/util/dvsim/Deploy.py b/util/dvsim/Deploy.py
index a856218..e412c40 100644
--- a/util/dvsim/Deploy.py
+++ b/util/dvsim/Deploy.py
@@ -553,10 +553,16 @@
"tool_srcs": False,
"tool_srcs_dir": False,
+ # Flist gen
+ "sv_flist_gen_cmd": False,
+ "sv_flist_gen_dir": False,
+ "sv_flist_gen_opts": False,
+
# Build
"build_dir": False,
"build_cmd": False,
"build_opts": False,
+ "build_log": False,
# Report processing
"report_cmd": False,
diff --git a/util/dvsim/OneShotCfg.py b/util/dvsim/OneShotCfg.py
index 1b01bdb..1157a58 100644
--- a/util/dvsim/OneShotCfg.py
+++ b/util/dvsim/OneShotCfg.py
@@ -26,8 +26,13 @@
self.tool = args.tool
self.email = args.email
self.verbose = args.verbose
+ self.flist_gen_cmd = ""
+ self.flist_gen_opts = []
+ self.sv_flist_gen_dir = ""
+ self.flist_file = ""
self.build_cmd = ""
self.build_opts = []
+ self.build_log = ""
self.report_cmd = ""
self.report_opts = []
self.build_opts.extend(args.build_opts)
diff --git a/util/dvsim/SynCfg.py b/util/dvsim/SynCfg.py
new file mode 100644
index 0000000..7479fd6
--- /dev/null
+++ b/util/dvsim/SynCfg.py
@@ -0,0 +1,363 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""
+Class describing synthesis configuration object
+"""
+
+import logging as log
+import sys
+from pathlib import Path
+
+from tabulate import tabulate
+
+from Deploy import *
+from Modes import *
+from OneShotCfg import OneShotCfg
+from utils import *
+
+
+class SynCfg(OneShotCfg):
+ """Derivative class for synthesis purposes.
+ """
+ def __init__(self, flow_cfg_file, proj_root, args):
+ super().__init__(flow_cfg_file, proj_root, args)
+
+ def __post_init__(self):
+ super().__post_init__()
+ # Set the title for synthesis results.
+ self.results_title = self.name.upper() + " Synthesis Results"
+
+ @staticmethod
+ def create_instance(flow_cfg_file, proj_root, args):
+ '''Create a new instance of this class as with given parameters.
+ '''
+ return SynCfg(flow_cfg_file, proj_root, args)
+
+ def gen_results_summary(self):
+ '''
+ Gathers the aggregated results from all sub configs
+ '''
+
+ # Generate results table for runs.
+ log.info("Create summary of synthesis results")
+
+ results_str = "## " + self.results_title + " (Summary)\n\n"
+ results_str += "### " + self.timestamp_long + "\n\n"
+
+ self.results_summary_md = results_str + "\nNot supported yet.\n"
+
+ print(self.results_summary_md)
+
+ # Return only the tables
+ return self.results_summary_md
+
+ def _gen_results(self):
+ # '''
+ # The function is called after the regression has completed. It looks
+ # for a regr_results.hjson file with aggregated results from the
+ # synthesis run. The hjson needs to have the following (potentially
+ # empty) fields
+ #
+ # results = {
+ # "tool": "dc",
+ # "top" : <name of toplevel>,
+ #
+ # "messages": {
+ # "flow_errors" : [],
+ # "flow_warnings" : [],
+ # "analyze_errors" : [],
+ # "analyze_warnings" : [],
+ # "elab_errors" : [],
+ # "elab_warnings" : [],
+ # "compile_errors" : [],
+ # "compile_warnings" : [],
+ # },
+ #
+ # "timing": {
+ # # per timing group (ususally a clock domain)
+ # # in nano seconds
+ # <group> : {
+ # "tns" : <value>,
+ # "wns" : <value>,
+ # "period" : <value>,
+ # ...
+ # }
+ # },
+ #
+ # "area": {
+ # # gate equivalent of a NAND2 gate
+ # "ge" : <value>,
+ #
+ # # summary report, in GE
+ # "comb" : <value>,
+ # "buf" : <value>,
+ # "reg" : <value>,
+ # "macro" : <value>,
+ # "total" : <value>,
+ #
+ # # hierchical report of first submodule level
+ # "instances" : {
+ # <name> : {
+ # "comb" : <value>,
+ # "buf" : <value>,
+ # "reg" : <value>,
+ # "macro" : <value>,
+ # "total" : <value>,
+ # },
+ # ...
+ # },
+ # },
+ #
+ # "power": {
+ # "net" : <value>,
+ # "int" : <value>,
+ # "leak" : <value>,
+ # },
+ #
+ # "units": {
+ # "voltage" : <value>,
+ # "capacitance" : <value>,
+ # "time" : <value>,
+ # "dynamic" : <value>,
+ # "static" : <value>,
+ # }
+ # }
+ #
+ # note that if this is a master config, the results will
+ # be generated using the _gen_results_summary function
+ # '''
+
+ def _create_entry(val, norm=1.0, total=None, perctag="%"):
+ """
+ Create normalized entry with an optional
+ percentage appended in brackets.
+ """
+ if val is not None and norm is not None:
+ if total is not None:
+ perc = float(val) / float(total) * 100.0
+ entry = "%2.1f %s" % (perc, perctag)
+ else:
+ value = float(val) / norm
+ entry = "%2.1f" % (value)
+ else:
+ entry = "--"
+
+ return entry
+
+ self.result = {}
+
+ # Generate results table for runs.
+ results_str = "## " + self.results_title + "\n\n"
+ results_str += "### " + self.timestamp_long + "\n"
+ results_str += "### Synthesis Tool: " + self.tool.upper() + "\n\n"
+
+ # TODO: extend this to support multiple build modes
+ for mode in self.build_modes:
+
+ # results_str += "## Build Mode: " + mode.name + "\n\n"
+
+ result_data = Path(
+ subst_wildcards(self.build_dir, {"build_mode": mode.name}) +
+ '/results.hjson')
+ log.info("looking for result data file at %s", result_data)
+
+ try:
+ with open(result_data, "r") as results_file:
+ self.result = hjson.load(results_file, use_decimal=True)
+ except IOError as err:
+ log.warning("%s", err)
+ self.result = {
+ "messages": {
+ "flow_errors": ["IOError: %s" % err],
+ "flow_warnings": [],
+ "analyze_errors": [],
+ "analyze_warnings": [],
+ "elab_errors": [],
+ "elab_warnings": [],
+ "compile_errors": [],
+ "compile_warnings": [],
+ },
+ }
+
+ # Message summary
+ # results_str += "### Tool Message Summary\n\n"
+ if "messages" in self.result:
+
+ header = [
+ "Build Mode", "Flow Warnings", "Flow Errors",
+ "Analyze Warnings", "Analyze Errors", "Elab Warnings",
+ "Elab Errors", "Compile Warnings", "Compile Errors"
+ ]
+ colalign = ("left", ) + ("center", ) * (len(header) - 1)
+ table = [header]
+
+ messages = self.result["messages"]
+ table.append([
+ mode.name,
+ str(len(messages["flow_warnings"])) + " W ",
+ str(len(messages["flow_errors"])) + " E ",
+ str(len(messages["analyze_warnings"])) + " W ",
+ str(len(messages["analyze_errors"])) + " E ",
+ str(len(messages["elab_warnings"])) + " W ",
+ str(len(messages["elab_errors"])) + " E ",
+ str(len(messages["compile_warnings"])) + " W ",
+ str(len(messages["compile_errors"])) + " E ",
+ ])
+
+ if len(table) > 1:
+ results_str += tabulate(table,
+ headers="firstrow",
+ tablefmt="pipe",
+ colalign=colalign) + "\n\n"
+ else:
+ results_str += "No messages found\n\n"
+ else:
+ results_str += "No messages found\n\n"
+
+ # Hierarchical Area report
+ results_str += "### Circuit Complexity in [kGE]\n\n"
+ if "area" in self.result:
+
+ header = [
+ "Instance", "Comb ", "Buf/Inv", "Regs", "Macros", "Total",
+ "Total [%]"
+ ]
+ colalign = ("left", ) + ("center", ) * (len(header) - 1)
+ table = [header]
+
+ # print top-level summary first
+ row = ["**" + self.result["top"] + "**"]
+ try:
+ kge = float(self.result["area"]["ge"]) * 1000.0
+
+ for field in ["comb", "buf", "reg", "macro", "total"]:
+ row += [
+ "**" +
+ _create_entry(self.result["area"][field], kge) +
+ "**"
+ ]
+
+ row += ["**--**"]
+ table.append(row)
+
+ # go through submodules
+ for name in self.result["area"]["instances"].keys():
+ if name == self.result["top"]: continue
+ row = [name]
+ for field in ["comb", "buf", "reg", "macro", "total"]:
+ row += [
+ _create_entry(
+ self.result["area"]["instances"][name]
+ [field], kge)
+ ]
+
+ # add percentage of total
+ row += [
+ _create_entry(
+ self.result["area"]["instances"][name][field],
+ kge, self.result["area"]["total"], "%u")
+ ]
+
+ table.append(row)
+
+ except TypeError:
+ results_str += "Gate equivalent is not properly defined\n\n"
+
+ if len(table) > 1:
+ results_str += tabulate(table,
+ headers="firstrow",
+ tablefmt="pipe",
+ colalign=colalign) + "\n\n"
+ else:
+ results_str += "No area report found\n\n"
+ else:
+ results_str += "No area report found\n\n"
+
+ # Timing report
+ results_str += "### Timing in [ns]\n\n"
+ if "timing" in self.result and "units" in self.result:
+
+ header = ["Clock", "Period", "WNS", "TNS"]
+ colalign = ("left", ) + ("center", ) * (len(header) - 1)
+ table = [header]
+
+ for clock in self.result["timing"].keys():
+ row = [clock]
+ row += [
+ _create_entry(
+ self.result["timing"][clock]["period"],
+ 1.0E-09 / float(self.result["units"]["time"])),
+ _create_entry(
+ self.result["timing"][clock]["wns"], 1.0E-09 /
+ float(self.result["units"]["time"])) + " EN",
+ _create_entry(
+ self.result["timing"][clock]["tns"], 1.0E-09 /
+ float(self.result["units"]["time"])) + " EN"
+ ]
+ table.append(row)
+
+ if len(table) > 1:
+ results_str += tabulate(table,
+ headers="firstrow",
+ tablefmt="pipe",
+ colalign=colalign) + "\n\n"
+ else:
+ results_str += "No timing report found\n\n"
+ else:
+ results_str += "No timing report found\n\n"
+
+ # Power report
+ results_str += "### Power Estimates in [mW]\n\n"
+ if "power" in self.result and "units" in self.result:
+
+ header = ["Network", "Internal", "Leakage", "Total"]
+ colalign = ("center", ) * len(header)
+ table = [header]
+
+ try:
+ self.result["power"]["net"]
+
+ power = [
+ float(self.result["power"]["net"]) *
+ float(self.result["units"]["dynamic"]),
+ float(self.result["power"]["int"]) *
+ float(self.result["units"]["dynamic"]),
+ float(self.result["power"]["leak"]) *
+ float(self.result["units"]["static"])
+ ]
+
+ total_power = sum(power)
+
+ row = [_create_entry(power[0], 1.0E-3) + " / " + \
+ _create_entry(power[0], 1.0E-3, total_power),
+ _create_entry(power[1], 1.0E-3) + " / " + \
+ _create_entry(power[1], 1.0E-3, total_power),
+ _create_entry(power[2], 1.0E-3) + " / " + \
+ _create_entry(power[2], 1.0E-3, total_power),
+ _create_entry(total_power, 1.0E-3)]
+
+ table.append(row)
+ # in case fp values are NoneType
+ except TypeError:
+ results_str += "No power report found\n\n"
+
+ if len(table) > 1:
+ results_str += tabulate(table,
+ headers="firstrow",
+ tablefmt="pipe",
+ colalign=colalign) + "\n\n"
+ else:
+ results_str += "No power report found\n\n"
+
+ # TODO: add support for pie / bar charts for area splits and
+ # QoR history
+
+ self.results_md = results_str
+ # Write results to the scratch area
+ self.results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
+ log.info("Detailed results are available at %s", self.results_file)
+ with open(self.results_file, 'w') as f:
+ f.write(self.results_md)
+
+ return self.results_md
diff --git a/util/dvsim/dvsim.py b/util/dvsim/dvsim.py
index d61d304..b832fc8 100755
--- a/util/dvsim/dvsim.py
+++ b/util/dvsim/dvsim.py
@@ -22,6 +22,7 @@
import Deploy
import LintCfg
import SimCfg
+import SynCfg
import utils
# TODO: add dvsim_cfg.hjson to retrieve this info
@@ -128,7 +129,7 @@
parser.add_argument("-t",
"--tool",
default="",
- metavar="vcs|xcelium|ascentlint|...",
+ metavar="vcs|xcelium|ascentlint|dc|...",
help="Override the tool that is set in hjson file")
parser.add_argument(
@@ -476,6 +477,8 @@
# and other ASIC flow targets.
if args.tool == 'ascentlint':
cfg = LintCfg.LintCfg(args.cfg, proj_root, args)
+ elif args.tool == 'dc':
+ cfg = SynCfg.SynCfg(args.cfg, proj_root, args)
else:
cfg = SimCfg.SimCfg(args.cfg, proj_root, args)
diff --git a/util/dvsim/utils.py b/util/dvsim/utils.py
index e42c2a6..2d734be 100644
--- a/util/dvsim/utils.py
+++ b/util/dvsim/utils.py
@@ -206,17 +206,45 @@
def htmc_color_pc_cells(text):
- '''This function finds cells in a html table that contains a "%" sign. It then
- uses the number in front if the % sign to color the cell based on the value
- from a shade from red to green. These color styles are encoded in ./style.css
- which is assumed to be accessible by the final webpage.
+ '''This function finds cells in a html table that contain numerical values
+ (and a few known strings) followed by a single space and an identifier.
+ Depending on the identifier, it shades the cell in a specific way. A set of
+ 12 color palettes for setting those shades are encoded in ./style.css.
+ These are 'cna' (grey), 'c0' (red), 'c1' ... 'c10' (green). The shade 'cna'
+ is used for items that are maked as 'not applicable'. The shades 'c1' to
+ 'c9' form a gradient from red to lime-green to indicate 'levels of
+ completeness'. 'cna' is used for greying out a box for 'not applicable'
+ items, 'c0' is for items that are considered risky (or not yet started) and
+ 'c10' for items that have completed successfully, or that are
+ 'in good standing'.
- This function is now augmented to also take "E" or "W" as identifiers along
- with "%". For example, '10 W' is indicative of 10 warnings, and will be color
- coded with yellow. Likewise, "7 E" indicates 7 errors and will be color coded
- with red. A value of 0 in both cases will be color coded with green.
+ These are the supported identifiers: %, %u, G, B, E, W, EN, WN.
+ The shading behavior for these is described below.
- Note that a space between the value and the indicators (%, E, W) is mandatory.
+ %: Coloured percentage, where the number in front of the '%' sign is mapped
+ to a color for the cell ranging from red ('c0') to green ('c10').
+ %u: Uncoloured percentage, where no markup is applied and '%u' is replaced
+ with '%' in the output.
+ G: This stands for 'Good' and results in a green cell.
+ B: This stands for 'Bad' and results in a red cell.
+ E: This stands for 'Errors' and the cell is colored with red if the number
+ in front of the indicator is larger than 0. Otherwise the cell is
+ colored with green.
+ W: This stands for 'Warnings' and the cell is colored with yellow ('c6')
+ if the number in front of the indicator is larger than 0. Otherwise
+ the cell is colored with green.
+ EN: This stands for 'Errors Negative', which behaves the same as 'E' except
+ that the cell is colored red if the number in front of the indicator is
+ negative.
+ WN: This stands for 'Warnings Negative', which behaves the same as 'W'
+ except that the cell is colored yellow if the number in front of the
+ indicator is negative.
+
+ N/A items can have any of the following indicators and need not be
+ preceeded with a numerical value:
+
+ '--', 'NA', 'N.A.', 'N.A', 'N/A', 'na', 'n.a.', 'n.a', 'n/a'
+
'''
# Replace <td> with <td class="color-class"> based on the fp
@@ -234,10 +262,10 @@
na_list_patterns = '|'.join(na_list)
# List of floating point patterns: '0', '0.0' & '.0'
- fp_patterns = "\d+|\d+\.\d+|\.\d+"
+ fp_patterns = "[\+\-]?\d+\.?\d*"
patterns = fp_patterns + '|' + na_list_patterns
- indicators = "%|E|W"
+ indicators = "%|%u|G|B|E|W|EN|WN"
match = re.findall(
r"(<td.*>\s*(" + patterns + ")\s+(" + indicators + ")\s*</td>)", text)
if len(match) > 0:
@@ -261,8 +289,8 @@
log.error("Percentage item \"%s\" in cell \"%s\" is not an " + \
"integer or a floating point number", fp_num, cell)
continue
+ # Percentage, colored.
if indicator == "%":
- # Item is a percentage.
if fp >= 0.0 and fp < 10.0: subst = color_cell(cell, "c0")
elif fp >= 10.0 and fp < 20.0:
subst = color_cell(cell, "c1")
@@ -284,15 +312,33 @@
subst = color_cell(cell, "c9")
elif fp >= 100.0:
subst = color_cell(cell, "c10")
- else:
- # Item is a error or a warning num.
- # Use "c6" (yellow) for warnings and "c0" (red) for errors.
- if fp == 0:
+ # Percentage, uncolored.
+ elif indicator == "%u":
+ subst = cell.replace("%u", "%")
+ # Good: green
+ elif indicator == "G":
+ subst = color_cell(cell, "c10", indicator)
+ # Bad: red
+ elif indicator == "B":
+ subst = color_cell(cell, "c0", indicator)
+ # Bad if positive: red for errors, yellow for warnings,
+ # otherwise green.
+ elif indicator in ["E", "W"]:
+ if fp <= 0:
subst = color_cell(cell, "c10", indicator)
elif indicator == "W":
subst = color_cell(cell, "c6", indicator)
elif indicator == "E":
subst = color_cell(cell, "c0", indicator)
+ # Bad if negative: red for errors, yellow for warnings,
+ # otherwise green.
+ elif indicator in ["EN", "WN"]:
+ if fp >= 0:
+ subst = color_cell(cell, "c10", indicator)
+ elif indicator == "WN":
+ subst = color_cell(cell, "c6", indicator)
+ elif indicator == "EN":
+ subst = color_cell(cell, "c0", indicator)
subst_list[cell] = subst
for item in subst_list:
text = text.replace(item, subst_list[item])