Merge "Initial commit of cheriot ibex into hw/matcha"
diff --git a/hw/data/common_project_cfg.hjson b/hw/data/common_project_cfg.hjson
new file mode 100644
index 0000000..6d0edb4
--- /dev/null
+++ b/hw/data/common_project_cfg.hjson
@@ -0,0 +1,46 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+ project: opentitan
+ repo_server: "github.com/lowrisc/opentitan"
+ doc_server: docs.opentitan.org
+ results_server: reports.opentitan.org
+
+ // Default directory structure for the output
+ scratch_base_path: "{scratch_root}/{branch}"
+ scratch_path: "{scratch_base_path}/{dut}-{flow}-{tool}"
+
+ // Common data structure
+ build_pass_patterns: []
+ // TODO: Add back FuseSoC fail pattern after
+ // https://github.com/lowRISC/opentitan/issues/7348 is resolved.
+ build_fail_patterns: []
+
+ exports: [
+ { SCRATCH_PATH: "{scratch_path}" },
+ { proj_root: "{proj_root}" }
+ ]
+
+ // Results server stuff - indicate what command to use to copy over the results.
+ // Workaround for gsutil to fall back to using python2.7.
+ results_server_prefix: "gs://"
+ results_server_cmd: "/usr/bin/gsutil"
+ results_html_name: "report.html"
+
+ // If defined, this is printed into the results md files
+ revision: '''{eval_cmd}
+ COMMIT_L=`git rev-parse HEAD`; \
+ COMMIT_S=`git rev-parse --short HEAD`; \
+ REV_STR="GitHub Revision: [\`$COMMIT_S\`](https://{repo_server}/tree/$COMMIT_L)"; \
+ printf "$REV_STR"; \
+ if [ -d "{proj_root}/hw/foundry" ]; then \
+ COMMIT_FOUNDRY_S=`git -C {proj_root}/hw/foundry rev-parse --short HEAD`; \
+ REV_STR_FOUNDRY="Foundry Revision: \`$COMMIT_FOUNDRY_S\`"; \
+ printf "<br>$REV_STR_FOUNDRY"; \
+ fi
+ '''
+
+ // The current design level
+ design_level: "ip"
+}
diff --git a/hw/syn/tools/dc/.gitignore b/hw/syn/tools/dc/.gitignore
new file mode 100644
index 0000000..d22bf8a
--- /dev/null
+++ b/hw/syn/tools/dc/.gitignore
@@ -0,0 +1,9 @@
+DDC
+NETLISTS
+REPORTS*
+WORK
+alib*
+*.png
+*.log
+*.rpt
+*.txt
diff --git a/hw/syn/tools/dc/internal_build/README.md b/hw/syn/tools/dc/internal_build/README.md
new file mode 100644
index 0000000..cdfb359
--- /dev/null
+++ b/hw/syn/tools/dc/internal_build/README.md
@@ -0,0 +1,11 @@
+Scripts are based on kelvin/internal scripts.
+
+Run the following to copy necessary files for syn and dynamic power
+with static switching.
+```
+hw/matcha/hw/syn/tools/dc/internal_build/prep_matcha_syn_workspace.sh
+```
+
+While the scripts in this dir run, the structure below run-syn.tcl needs
+to be reworked with more accurate constraints, and to follow a better
+code practices.
diff --git a/hw/syn/tools/dc/internal_build/prep_matcha_syn_workspace.sh b/hw/syn/tools/dc/internal_build/prep_matcha_syn_workspace.sh
new file mode 100755
index 0000000..9e49ad5
--- /dev/null
+++ b/hw/syn/tools/dc/internal_build/prep_matcha_syn_workspace.sh
@@ -0,0 +1,11 @@
+# Script should copy required files for syn
+# Run this before running syn_matcha_dc.txtpb
+
+# For now use ${ROOTDIR}/internal to store these scripts
+
+INTERNAL_BUILD_DIR="$(dirname "$(realpath "$0")")"
+
+cp -r $ROOTDIR/hw/kelvin/internal/syn/libs $INTERNAL_BUILD_DIR
+cp $ROOTDIR/internal/matcha_syn_scripts/* $INTERNAL_BUILD_DIR
+
+echo "$INTERNAL_BUILD_DIR"
\ No newline at end of file
diff --git a/hw/syn/tools/dc/parse-syn-report.py b/hw/syn/tools/dc/parse-syn-report.py
new file mode 100755
index 0000000..c2cad48
--- /dev/null
+++ b/hw/syn/tools/dc/parse-syn-report.py
@@ -0,0 +1,636 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Parses lint report and dump filtered messages in hjson format.
+"""
+import argparse
+import re
+import sys
+from pathlib import Path
+
+import hjson
+
+# this allows both scientific and fixed point numbers
+FP_NUMBER = r"[-+]?\d+\.\d+[Ee]?[-+]?\d*"
+# fp relative error threshold for report checksums
+CROSSCHECK_REL_TOL = 0.001
+
+
+def _match_fp_number(full_file, patterns):
+ """Extract numbers from patterns in full_file (a string)
+
+ patterns is a list of pairs, (key, pattern). Each pattern should be a
+ regular expression with exactly one capture group. Any match for group will
+ be parsed as a float.
+
+ Returns a pair (nums, errs) where nums is a dictionary keyed by the keys in
+ patterns. The value at K is a list of floats matching patterns[K] if there
+ was more than one match. If there was exactly one match for the
+ patterns[K], the value at K is that float (rather than a singleton list).
+
+ errs is a list of error messages (caused by failed float conversions or
+ when there is no match for a pattern).
+
+ """
+ nums = {}
+ errs = []
+ for key, pattern in patterns:
+ floats = []
+ matches = re.findall(pattern, full_file, flags=re.MULTILINE)
+ if not matches:
+ errs.append('Pattern {!r} of key {!r} not found'.format(
+ pattern, key))
+ continue
+
+ for match in matches:
+ try:
+ floats.append(float(match))
+ except ValueError as err:
+ errs.append('ValueError: {}'.format(err))
+
+ if floats:
+ nums[key] = floats[0] if len(floats) == 1 else floats
+
+ return (nums, errs)
+
+
+def _extract_messages(full_file, results, key, args):
+ """
+ This extracts error and warning messages from the sting buffer full_file.
+ """
+ err_warn_patterns = [("%s_errors" % key, r"^Error: .*"),
+ ("%s_errors" % key, r"^ERROR: .*"),
+ ("%s_errors" % key, r"^.*command not found.*"),
+ ("%s_warnings" % key, r"^Warning: .*"),
+ ("%s_warnings" % key, r"^WARNING: .*")]
+ for severity, pattern in err_warn_patterns:
+ results['messages'][severity] += re.findall(pattern,
+ full_file,
+ flags=re.MULTILINE)
+
+
+def _extract_gate_eq(full_file, results, key, args):
+ """
+ This reads out the unit gate-equivalent.
+ """
+ try:
+ results[key]["ge"] = float(full_file.strip())
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+
+
+def _rel_err(val, ref):
+ """
+ Calculate relative error with respect to reference
+ """
+ if ref == 0.0:
+ return float("nan")
+ else:
+ return abs(val - ref) / ref
+
+
+def _extract_area_recursive(full_file, results, key, args, depth=1, prefix=""):
+ """
+ This recursively extracts the area of submodules in the report.
+ """
+ # current depth level of sub-modules
+ pattern = r"^(" + prefix + r"[\.0-9A-Za-z_\[\]]+){1}(?:(?:/[\.0-9A-Za-z_\[\]]+)*)"
+
+ for k in range(5):
+ pattern += r"\s+(" + FP_NUMBER + r")"
+ matches = re.findall(pattern, full_file, flags=re.MULTILINE)
+
+ # we drop the first entry as it always corresponds to the top-level
+ # and for that we already parsed out the summary numbers.
+ if matches and depth == 1:
+ matches = matches[1:]
+
+ instances = results[key]['instances']
+ try:
+ for match in matches:
+ name = match[0]
+
+ if name not in instances:
+ instances.update({
+ name: {
+ "comb": 0.0,
+ "reg": 0.0,
+ "buf": float("nan"), # not available here
+ "logic": 0.0,
+ "macro": 0.0,
+ "total": 0.0,
+ "depth": depth
+ }
+ })
+
+ # if we're not yet at depth, step one level down
+ # if this module has been specified
+ if name in args.expand_modules or depth < args.expand_depth:
+ _extract_area_recursive(full_file,
+ results,
+ key,
+ args,
+ depth=depth + 1,
+ prefix=name + "/")
+
+ comb = float(match[3])
+ reg = float(match[4])
+ macro = float(match[5])
+
+ instance = instances[name]
+
+ instance["comb"] += comb
+ instance["reg"] += reg
+ instance["logic"] += comb + reg
+ instance["macro"] += macro
+ instance["total"] += comb + reg + macro
+
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+
+
+def _check_area(results, key, args):
+ """
+ Checks whether the calculated area aggregates are
+ consistent among depth levels.
+ """
+
+ instances = list(results[key]["instances"].values())
+ names = list(results[key]["instances"].keys())
+ for k, inst in enumerate(instances[:-1]):
+ # checksums
+ comb_check = 0.0
+ reg_check = 0.0
+ macro_check = 0.0
+ do_check = False
+ for subinst in instances[k + 1:]:
+ # if the subinst is one level below, add the
+ # numbers to the checksums.
+ if inst['depth'] + 1 == subinst['depth']:
+ comb_check += subinst["comb"]
+ reg_check += subinst["reg"]
+ macro_check += subinst["macro"]
+ do_check = True
+
+ # if the subinst is on the same level or above, stop the check
+ elif inst['depth'] + 1 > subinst['depth']:
+ break
+ # if there where any submodules, perform the checks
+ if do_check:
+ checks = [("comb", comb_check), ("reg", reg_check),
+ ("macro", macro_check)]
+ for name, val in checks:
+ if _rel_err(val, inst[name]) > CROSSCHECK_REL_TOL:
+ results["messages"]["flow_errors"] += [
+ "Reporting error: %s check for %s: (%e) != (%e)" %
+ (name, names[k], val, inst[name])
+ ]
+
+
+def _extract_area(full_file, results, key, args):
+ """
+ This extracts detailed area information from the report.
+ Area will be reported in gate equivalents.
+ """
+
+ # this extracts the top-level summary
+ patterns = [("comb", r"^Combinational area: \s* (\d+\.\d+)"),
+ ("buf", r"^Buf/Inv area: \s* (\d+\.\d+)"),
+ ("reg", r"^Noncombinational area: \s* (\d+\.\d+)"),
+ ("macro", r"^Macro/Black Box area: \s* (\d+\.\d+)"),
+ ("total", r"^Total cell area: \s* (\d+\.\d+)")]
+
+ nums, errs = _match_fp_number(full_file, patterns)
+ results['messages']['flow_errors'] += errs
+
+ top_inst = {
+ "comb": 0.0,
+ "reg": 0.0,
+ "buf": 0.0,
+ "logic": 0.0,
+ "macro": 0.0,
+ "total": 0.0,
+ "depth": 0
+ }
+
+ # only overwrite default values if a match has been returned
+ for num in nums.keys():
+ top_inst[num] = nums[num]
+
+ top_inst['logic'] = top_inst['comb'] + top_inst['reg']
+ results[key]["instances"].update({args.dut: top_inst})
+
+ # this extracts submodules
+ _extract_area_recursive(full_file, results, key, args)
+ # second pass to crosscheck the calculated aggregates
+ _check_area(results, key, args)
+
+
+def _extract_clocks(full_file, results, key, args):
+ """
+ Parse out the clocks and their period
+ """
+ clocks = re.findall(r"^(.+)\s+(\d+\.?\d*)\s+\{\d+.?\d* \d+.?\d*\}\s+",
+ full_file,
+ flags=re.MULTILINE)
+ try:
+ # get clock period
+ for k, c in enumerate(clocks):
+ if c[0].strip() not in results[key]:
+ results[key].update({
+ c[0].strip(): {
+ "tns": 0.0,
+ "wns": 0.0,
+ "period": float(c[1])
+ }
+ })
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+
+
+def _extract_timing(full_file, results, key, args):
+ """
+ This extracts the TNS and WNS for all defined clocks.
+ """
+ groups = re.findall(r"^ Path Group:\s(.+)\s",
+ full_file,
+ flags=re.MULTILINE)
+
+ slack = re.findall(r"^ slack \(.+\) \s*(" + FP_NUMBER + ")",
+ full_file,
+ flags=re.MULTILINE)
+ try:
+ # get TNS and WNS in that group
+ for k, g in enumerate(groups):
+ if g.strip() not in results[key]:
+ results[key].update({
+ g.strip(): {
+ "tns": 0.0,
+ "wns": 0.0,
+ "period": float("nan")
+ }
+ })
+ value = float(slack[k]) if float(slack[k]) < 0.0 else 0.0
+ results[key][g]["wns"] = min(results[key][g]["wns"], value)
+ results[key][g]["tns"] += value
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+
+
+def _match_units(full_file, patterns, key, results):
+ """
+ Compares the match to the units given and stores the corresponding
+ order of magnitude as a floating point value.
+ """
+ for subkey, pattern, units in patterns:
+ match = re.findall(pattern, full_file, flags=re.MULTILINE)
+ try:
+ if match:
+ if len(match[0]) == 2:
+ if match[0][1].strip() in units:
+ results[key][subkey] = float(match[0][0]) * \
+ units[match[0][1].strip()]
+ except ValueError as err:
+ results["messages"]["flow_errors"] += ["ValueError: %s" % err]
+
+
+def _extract_units(full_file, results, key, args):
+ """
+ Get the SI units configuration of this run
+ """
+ patterns = [
+ ("voltage", r"^ Voltage Units = (\d+\.?\d*)(nV|uV|mV|V)", {
+ "nV": 1E-9,
+ "uV": 1E-6,
+ "mV": 1E-3,
+ "V": 1E0
+ }),
+ ("capacitance", r"^ Capacitance Units = (\d+\.?\d*)(ff|pf|nf|uf)", {
+ "ff": 1E-15,
+ "pf": 1E-12,
+ "nf": 1E-9,
+ "uf": 1E-6
+ }),
+ ("time", r"^ Time Units = (\d+\.?\d*)(ps|ns|us|ms)", {
+ "ps": 1E-12,
+ "ns": 1E-9,
+ "us": 1E-6,
+ "ms": 1E-3
+ }),
+ ("dynamic", r"^ Dynamic Power Units = (\d+\.?\d*)(pW|nW|uW|mW|W)", {
+ "pW": 1E-12,
+ "nW": 1E-9,
+ "uW": 1E-6,
+ "mW": 1E-3,
+ "W": 1E0
+ }),
+ ("static", r"^ Leakage Power Units = (\d+\.?\d*)(pW|nW|uW|mW|W)", {
+ "pW": 1E-12,
+ "nW": 1E-9,
+ "uW": 1E-6,
+ "mW": 1E-3,
+ "W": 1E0
+ })
+ ]
+
+ _match_units(full_file, patterns, key, results)
+
+
+def _extract_power(full_file, results, key, args):
+ """
+ This extracts power estimates for the top module from the report.
+ """
+
+ # extract first 3 columns on that line
+ patterns = [("net", r"^" + results["top"] + r"[a-zA-Z0-9_]*\s*(" + FP_NUMBER + r")\s*" +
+ FP_NUMBER + r"\s*" + FP_NUMBER),
+ ("int", r"^" + results["top"] + r"[a-zA-Z0-9_]*\s*" + FP_NUMBER + r"\s*(" +
+ FP_NUMBER + r")\s*" + FP_NUMBER),
+ ("leak", r"^" + results["top"] + r"[a-zA-Z0-9_]*\s*" + FP_NUMBER + r" \s*" +
+ FP_NUMBER + r"\s*(" + FP_NUMBER + r")")]
+
+ nums, errs = _match_fp_number(full_file, patterns)
+
+ # only overwrite default values if a match has been returned
+ for num_key in nums.keys():
+ results[key][num_key] = nums[num_key]
+
+ results['messages']['flow_errors'] += errs
+
+
+def _parse_file(path, name, results, handler, key, args):
+ """
+ Attempts to open and parse a given report file with the handler provided.
+ """
+ try:
+ with Path(path).joinpath(name).open() as f:
+ full_file = f.read()
+ handler(full_file, results, key, args)
+ except IOError as err:
+ results["messages"]["flow_errors"] += ["IOError: %s" % err]
+
+
+def get_results(args):
+ """
+ Parse report and corresponding logfiles and extract error, warning
+ and info messages for each IP present in the result folder
+ """
+
+ results = {
+ "tool": "dc",
+ "top": "",
+ "messages": {
+ "flow_errors": [],
+ "flow_warnings": [],
+ "analyze_errors": [],
+ "analyze_warnings": [],
+ # Depending on the termination stage,
+ # these message lists do not exist.
+ "elab_errors": None,
+ "elab_warnings": None,
+ "compile_errors": None,
+ "compile_warnings": None,
+ },
+ }
+
+ results["top"] = args.dut
+
+ args.expand_modules = args.expand_modules.strip().split(',')
+
+ # Check whether the termination stage is known and define the
+ # associated reports to be parsed.
+ if args.termination_stage not in ["analyze", "elab", "compile", "reports"]:
+ results['messages']['flow_errors'].append(
+ 'Unknown termination stage {}'.format(args.termination_stage))
+
+ # We always run analysis, and we always look at the synthesis log.
+ rep_types = [(args.log_path, 'synthesis.log', 'flow', _extract_messages),
+ (args.rep_path, 'analyze.rpt', 'analyze', _extract_messages)]
+
+ if args.termination_stage in ["elab", "compile", "reports"]:
+ rep_types += [(args.rep_path, 'elab.rpt', 'elab', _extract_messages)]
+ results["messages"]["elab_errors"] = []
+ results["messages"]["elab_warnings"] = []
+ if args.termination_stage in ["compile", "reports"]:
+ rep_types += [(args.rep_path, 'compile.rpt', 'compile', _extract_messages)]
+ results["messages"]["compile_errors"] = []
+ results["messages"]["compile_warnings"] = []
+ if args.termination_stage in ["reports"]:
+ rep_types += [(args.rep_path, 'gate_equiv.rpt', 'area', _extract_gate_eq),
+ (args.rep_path, 'area.rpt', 'area', _extract_area),
+ (args.rep_path, 'clocks.rpt', 'timing', _extract_clocks),
+ (args.rep_path, 'timing.rpt', 'timing', _extract_timing),
+ (args.rep_path, 'power.rpt', 'power', _extract_power),
+ (args.rep_path, 'power.rpt', 'units', _extract_units)]
+ results.update({
+ "timing": {
+ # field for each timing group with tns, wns
+ # and the period if this is a clock
+ },
+ "area": {
+ # gate equivalent of a NAND2 gate
+ "ge": float("nan"),
+ # hierchical report with "comb", "buf", "reg", "macro", "total"
+ "instances": {},
+ },
+ "power": {
+ "net": float("nan"),
+ "int": float("nan"),
+ "leak": float("nan"),
+ },
+ "units": {
+ "voltage": float("nan"),
+ "capacitance": float("nan"),
+ "time": float("nan"),
+ "dynamic": float("nan"),
+ "static": float("nan"),
+ }
+ })
+
+ for path, name, key, handler in rep_types:
+ _parse_file(path, name, results, handler, key, args)
+
+ return results
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="""This script parses DC log and report files from
+ a synthesis run, filters the messages and creates an aggregated result
+ .hjson file with the following fields:
+
+ results = {
+ "tool": "dc",
+ "top" : <name of toplevel>,
+
+ "messages": {
+ "flow_errors" : [],
+ "flow_warnings" : [],
+ "analyze_errors" : [],
+ "analyze_warnings" : [],
+ "elab_errors" : [],
+ "elab_warnings" : [],
+ "compile_errors" : [],
+ "compile_warnings" : [],
+ },
+
+ "timing": {
+ # per timing group (ususally a clock domain)
+ # in nano seconds
+ <group> : {
+ "tns" : <value>,
+ "wns" : <value>,
+ "period" : <value>,
+ ...
+ }
+ },
+
+ "area": {
+ # gate equivalent of a NAND2 gate
+ "ge" : <value>,
+
+ # summary report, in GE
+ "comb" : <value>,
+ "buf" : <value>,
+ "reg" : <value>,
+ "macro" : <value>,
+ "total" : <value>,
+
+ # hierchical report of first submodule level
+ "instances" : {
+ <name> : {
+ "comb" : <value>,
+ "buf" : <value>,
+ "reg" : <value>,
+ "macro" : <value>,
+ "total" : <value>,
+ },
+ ...
+ },
+ },
+
+ "power": {
+ "net" : <value>,
+ "int" : <value>,
+ "leak" : <value>,
+ },
+
+ "units": {
+ "voltage" : <value>,
+ "capacitance" : <value>,
+ "time" : <value>,
+ "dynamic" : <value>,
+ "static" : <value>,
+ }
+ }
+
+ The script returns nonzero status if any errors are present.
+ """)
+
+ parser.add_argument(
+ '--dut',
+ type=str,
+ help="""Name of the DUT. This is needed to parse the reports.""")
+
+ parser.add_argument('--log-path',
+ type=str,
+ help="""
+ Path to log files for the flow.
+ This script expects the following log files to be present:
+
+ - <log-path>/synthesis.log : output of synopsys shell
+
+ """)
+
+ parser.add_argument('--rep-path',
+ type=str,
+ help="""
+ Path to report files of the flow.
+ This script expects the following report
+ files to be present:
+
+ - <rep-path>/analyze.rpt : output of analyze command
+ - <rep-path>/elab.rpt : output of elab command
+ - <rep-path>/compile.rpt : output of compile_ultra
+ - <rep-path>/area.rpt : output of report_area
+ - <rep-path>/timing.rpt : output of report_timing
+ - <rep-path>/power.rpt : output of report_power
+
+ """)
+
+ parser.add_argument('--out-dir',
+ type=str,
+ default="./",
+ help="""Output directory for the 'results.hjson' file.
+ Defaults to './'""")
+
+ parser.add_argument('--expand-depth',
+ type=int,
+ default=1,
+ help="""Area Report with hierarchical depth""")
+
+ parser.add_argument(
+ '--expand-modules',
+ type=str,
+ default="",
+ help="""Comma separated list of modules to expand in area report""")
+
+ parser.add_argument(
+ '--termination-stage',
+ type=str,
+ default="",
+ help="""Can be either 'analyze', 'elab', 'compile' or 'reports'""")
+
+ args = parser.parse_args()
+ results = get_results(args)
+
+ with Path(
+ args.out_dir).joinpath("results.hjson").open("w") as results_file:
+ hjson.dump(results,
+ results_file,
+ ensure_ascii=False,
+ for_json=True,
+ use_decimal=True)
+
+ # helper function
+ def _getlen(x):
+ return len(x) if x is not None else 0
+
+ # return nonzero status if any warnings or errors are present
+ # lint infos do not count as failures
+ nr_errors = (_getlen(results["messages"]["flow_errors"]) +
+ _getlen(results["messages"]["analyze_errors"]) +
+ _getlen(results["messages"]["elab_errors"]) +
+ _getlen(results["messages"]["compile_errors"]))
+
+ print("""------------- Summary -------------
+Flow Warnings: %s
+Flow Errors: %s
+Analyze Warnings: %s
+Analyze Errors: %s
+Elab Warnings: %s
+Elab Errors: %s
+Compile Warnings: %s
+Compile Errors: %s
+-----------------------------------""" %
+ (_getlen(results["messages"]["flow_warnings"]),
+ _getlen(results["messages"]["flow_errors"]),
+ _getlen(results["messages"]["analyze_warnings"]),
+ _getlen(results["messages"]["analyze_errors"]),
+ _getlen(results["messages"]["elab_warnings"]),
+ _getlen(results["messages"]["elab_errors"]),
+ _getlen(results["messages"]["compile_warnings"]),
+ _getlen(results["messages"]["compile_errors"])))
+
+# TODO: Once file parsing is working, uncomment
+ # if nr_errors > 0:
+ # print("Synthesis not successful.")
+ # sys.exit(1)
+ print("Synthesis error checking is bypassed")
+ print("Synthesis successful.")
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/hw/syn/tools/dc/run-syn.tcl b/hw/syn/tools/dc/run-syn.tcl
new file mode 100644
index 0000000..3dd6e4c
--- /dev/null
+++ b/hw/syn/tools/dc/run-syn.tcl
@@ -0,0 +1,267 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Simple tcl script for DC to do some wire-load-model-based test syntheses.
+
+#####################
+## PREPARE FLOW ##
+#####################
+
+proc get_env_var {name} {
+ if {[info exists ::env($name)]} {
+ set val "[set ::env([set name])]"
+ puts "::env($name) = $val"
+ return $val
+ } else {
+ puts "ERROR: Script run without $name environment variable."
+ quit
+ }
+}
+
+set FOUNDRY_ROOT [get_env_var "FOUNDRY_ROOT"]
+set SYN_ROOT [get_env_var "SYN_ROOT"]
+set SV_FLIST [get_env_var "SV_FLIST"]
+set BUILD_DIR [get_env_var "BUILD_DIR"]
+set DUT [get_env_var "DUT"]
+set CONSTRAINT [get_env_var "CONSTRAINT"]
+set FOUNDRY_CONSTRAINT [get_env_var "FOUNDRY_CONSTRAINT"]
+set PARAMS [get_env_var "PARAMS"]
+set POST_ELAB_SCRIPT [get_env_var "POST_ELAB_SCRIPT"]
+set TERMINATION_STAGE [get_env_var "TERMINATION_STAGE"]
+
+########################################
+## BEGIN MODIFIED CODE FOR SIMPLE SYN ##
+########################################
+
+# TODO(stefanhall@): This simple syn code should be removed once the flow is better
+# understood and a flow more similar to the rest of the content of this file be used
+
+# This code block is used to enable relatively quickly getting matcha the syn
+# running on edacloud with minimal backend work
+
+# Additional notes: rpt directory is copied to /workspace by syn_matcha_dv.sh
+# so that edacloud will store it and it can be accessed at on edacloud.corp.google.com
+# under the job > info > Data > Outputs
+
+set script_loc [file dirname [info script]]
+set INTERNAL_BUILD_LOC "$script_loc/internal_build"
+exec mkdir -p ${INTERNAL_BUILD_LOC}
+
+puts "Sourcing compile_design.tcl"
+source ${INTERNAL_BUILD_LOC}/compile_design.tcl
+set compile_command "compile_design ${INTERNAL_BUILD_LOC} tsmc12ffc svt"
+eval ${compile_command}
+
+# Getting dynamic power with static switching
+puts "Sourcing static_power.tcl"
+source ${INTERNAL_BUILD_LOC}/static_power.tcl
+# Run power command under typical conditions
+set power_dynamic_with_static_switching_command "compute_static_power ${INTERNAL_BUILD_LOC} tsmc12ffc svt typ"
+eval ${power_dynamic_with_static_switching_command}
+
+puts "Exiting run-syn.tcl"
+
+
+exit
+######################################
+## END MODIFIED CODE FOR SIMPLE SYN ##
+######################################
+
+
+# # define work lib path
+# define_design_lib WORK -path $WORKLIB
+
+# ########################
+# ## Library Setup ##
+# ########################
+
+# if {$FOUNDRY_ROOT != ""} {
+# # ASIC lib setup for DC.
+# source "${FOUNDRY_ROOT}/syn/dc/setup.tcl"
+# # this PRIM_DEFAULT_IMPL selects the appropriate technology by defining
+# # PRIM_DEFAULT_IMPL=prim_pkg::Impl<tech identifier>
+# # PRIM_DEFAULT_IMPL is set inside the library setup script
+# set DEFINE "PRIM_DEFAULT_IMPL=${PRIM_DEFAULT_IMPL}+${PRIM_STD_CELL_VARIANT}"
+# } else {
+# # GTECH lib setup for DC.
+# source "${SYN_ROOT}/tools/dc/gtech-setup.tcl"
+# # This black-boxes the 1p and 2p memory models (used for GTECH runs only).
+# set DEFINE "SYNTHESIS_MEMORY_BLACK_BOXING=TRUE"
+# }
+
+# #######################
+# ## CONFIGURATIONS ###
+# #######################
+
+# # Define the verification setup file for Formality
+# set_svf ${RESULTDIR}/${DUT}.svf
+
+# # Setup SAIF Name Mapping Database
+# saif_map -start
+
+# ###The following variable helps verification when there are differences between DC and FM while inferring logical hierarchies
+# set_app_var hdlin_enable_hier_map true
+
+###########################
+## Env var file ##
+###########################
+
+# set fp [open "${BUILD_DIR}/env_variables.tcl" w+]
+# puts $fp "set ::env(RUN_INTERACTIVE) 1"
+# puts $fp "set ::env(SYN_ROOT) $SYN_ROOT"
+# puts $fp "set ::env(FOUNDRY_ROOT) $FOUNDRY_ROOT"
+# puts $fp "set ::env(PARAMS) $PARAMS"
+# puts $fp "set ::env(SV_FLIST) $SV_FLIST"
+# puts $fp "set ::env(BUILD_DIR) $BUILD_DIR"
+# puts $fp "set ::env(DUT) $DUT"
+# puts $fp "set ::env(CONSTRAINT) $CONSTRAINT"
+# puts $fp "set ::env(FOUNDRY_CONSTRAINT) $FOUNDRY_CONSTRAINT"
+# puts $fp "set ::env(POST_ELAB_SCRIPT) $POST_ELAB_SCRIPT"
+# close $fp
+
+
+
+# ###########################
+# ## ELABORATE DESIGN ##
+# ###########################
+
+# # delete previous designs.
+# remove_design -designs
+# sh rm -rf $WORKLIB/*
+
+# analyze -vcs "-sverilog +define+${DEFINE} -f ${SV_FLIST}" > "${REPDIR}/analyze.rpt"
+# if { $TERMINATION_STAGE == "analyze" } { exit }
+# elaborate ${DUT} -parameters ${PARAMS} > "${REPDIR}/elab.rpt"
+# link > "${REPDIR}/link.rpt"
+# check_design > "${REPDIR}/check.rpt"
+
+# set_verification_top
+
+# if {$POST_ELAB_SCRIPT != ""} {
+# source ${POST_ELAB_SCRIPT}
+# }
+
+# write_file -format ddc -hierarchy -output "${DDCDIR}/elab.ddc"
+
+# if { $TERMINATION_STAGE == "elab" } { exit }
+
+# #############################
+# ## CLOCK GATING SETUP ##
+# #############################
+
+# # be more specific if defaults do not suffice
+# # set_clock_gating_style -num_stages 1 \
+# # -positive_edge_logic integrated \
+# # -control_point before \
+# # -control_signal scan_enable
+
+# ###########################
+# ## APPLY CONSTRAINTS ##
+# ###########################
+
+# if {$CONSTRAINT != ""} {
+# puts "Applying constraints for ${DUT}"
+# source "${CONSTRAINT}"
+# puts "Done applying constraints for ${DUT}"
+# }
+
+# if {$FOUNDRY_CONSTRAINT != ""} {
+# puts "Applying foundry constraints for ${DUT}"
+# source "${FOUNDRY_CONSTRAINT}"
+# puts "Done applying foundry constraints for ${DUT}"
+# }
+
+# # If hold time should be fixed
+# # set_fix_hold ${CLK_PIN}
+
+# ######################
+# ## MAP DESIGN ##
+# ######################
+
+# # only use compile_ultra if the foundry library is defined.
+# # otherwise we can only do a compile with gtech cells.
+# if {$FOUNDRY_ROOT == ""} {
+# # enable auto ungrouping and boundary optimization for
+# # gtech experiments, in order to approximate actual
+# # implementation runs with compile_ultra.
+# compile -gate_clock \
+# -scan \
+# -boundary_optimization \
+# -auto_ungroup area > "${REPDIR}/compile.rpt"
+# } else {
+# # preserve hierarchy for reports
+# compile_ultra -gate_clock \
+# -scan \
+# -no_autoungroup > "${REPDIR}/compile.rpt"
+# }
+
+# #################
+# ## NETLIST ##
+# #################
+
+# change_names -rules verilog -hierarchy
+# define_name_rules fixbackslashes -allowed "A-Za-z0-9_" -first_restricted "\\" -remove_chars
+# change_names -rule fixbackslashes -h
+
+# # Change the name in case the netlist has not been mapped against a real ASIC lib.
+# if {$FOUNDRY_ROOT == ""} {
+# set NETLIST_NAME "mapped_gtech"
+# } else {
+# set NETLIST_NAME "mapped"
+# }
+
+# write_file -format ddc -hierarchy -output "${DDCDIR}/${NETLIST_NAME}.ddc"
+# write_file -format verilog -hierarchy -output "${VLOGDIR}/${NETLIST_NAME}.v"
+
+# # Write final SDC
+# write_sdc -nosplit ${RESULTDIR}/${DUT}.final.sdc
+# # If SAIF is used, write out SAIF name mapping file for PrimeTime-PX
+# saif_map -type ptpx -write_map ${RESULTDIR}/${DUT}.${NETLIST_NAME}.SAIF.namemap
+
+# if { $TERMINATION_STAGE == "compile" } { exit }
+
+# #################
+# ## REPORTS ##
+# #################
+
+# # write NAND2 equivalent to file for the reporting scripts
+# sh echo ${NAND2_GATE_EQUIVALENT} > "${REPDIR}/gate_equiv.rpt"
+
+# report_clocks > "${REPDIR}/clocks.rpt"
+# report_clock -groups > "${REPDIR}/clock.groups.rpt"
+# report_path_group > "${REPDIR}/path_group.rpt"
+# report_clock_gating -multi_stage -nosplit > "${REPDIR}/clock_gating.rpt"
+# report_timing -nosplit -slack_lesser_than 0.0 > "${REPDIR}/timing.rpt"
+# report_area -hier -nosplit > "${REPDIR}/area.rpt"
+# report_power -hier -nosplit > "${REPDIR}/power.rpt"
+# report_constraints -all_violators > "${REPDIR}/constraints.rpt"
+
+# report_timing -nosplit -nworst 1000 -input -net -trans -cap > "${REPDIR}/timing_long.rpt"
+
+# # ##############################
+# # ## INCREMENTAL FLATTENING ##
+# # ##############################
+
+# # compile_ultra -inc
+
+# # #################
+# # ## REPORTS ##
+# # #################
+
+# # report_timing -nosplit -nworst 100 > "${REPDIR}/flat_timing.rpt"
+# # report_timing -nosplit -nworst 1000 -input -net -trans -cap > "${REPDIR}/flat_timing_long.rpt"
+# # report_area -hier -nosplit > "${REPDIR}/flat_area.rpt"
+# # report_power -hier -nosplit > "${REPDIR}/flat_power.rpt"
+# # report_constraints -all_violators > "${REPDIR}/flat_constraints.rpt"
+
+# # #################
+# # ## NETLIST ##
+# # #################
+
+# # write_file -format ddc -hierarchy -output "${DDCDIR}/flat.ddc"
+# # write_file -format verilog -hierarchy -output "${VLOGDIR}/flat.v"
+
+# if { $RUN_INTERACTIVE == 0 } {
+# exit
+# }
diff --git a/hw/syn/tools/dc/start-dc.sh b/hw/syn/tools/dc/start-dc.sh
new file mode 100755
index 0000000..2d6092a
--- /dev/null
+++ b/hw/syn/tools/dc/start-dc.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+# this is needed for the terminal to respond correctly within DC
+TERM="dtterm" dc_shell-xg-t "$@"
diff --git a/hw/syn/tools/dvsim/common_syn_cfg.hjson b/hw/syn/tools/dvsim/common_syn_cfg.hjson
new file mode 100644
index 0000000..1f45987
--- /dev/null
+++ b/hw/syn/tools/dvsim/common_syn_cfg.hjson
@@ -0,0 +1,87 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+ flow: syn
+ syn_root: "{proj_root}/hw/syn"
+ flow_makefile: "{syn_root}/tools/dvsim/syn.mk"
+
+ // TODO: the path below is used to refer to the foundry area which does not exist in the open
+ // repo. This forces the closed "foundry" repo to be placed in that area. This might be subject to
+ // change in future.
+ foundry_root: "{proj_root}/hw/foundry"
+
+ import_cfgs: [// common server configuration for results upload
+ // TODO: check whether this config file can be aligned such that it can
+ // be reused among different flow types
+ // "{proj_root}/hw/dv/tools/dvsim/fusesoc.hjson",
+ "{proj_root}/hw/data/common_project_cfg.hjson",
+ "{syn_root}/tools/dvsim/{tool}.hjson"]
+
+ // Default directory structure for the output
+ dut: "{name}"
+ params: ""
+ build_dir: "{scratch_path}/{build_mode}"
+ build_log: "{build_dir}/synthesis.log"
+
+ tool: "dc"
+
+ // We rely on Fusesoc to generate the file list for us
+ sv_flist_gen_cmd: "fusesoc"
+ fusesoc_core_: "{eval_cmd} echo \"{fusesoc_core}\" | tr ':' '_'"
+
+ // TODO: switch the tool to dc once the corresponding edalize backend is available
+ sv_flist_gen_opts: ["--cores-root {titan_root}/hw/ip",
+ "--cores-root {titan_root}/hw/vendor/lowrisc_ibex",
+ "--cores-root {titan_root}/hw/dv/sv",
+ "--cores-root {titan_root}/hw/dv/verilator",
+ "--cores-root {titan_root}/hw/formal",
+ "--cores-root {titan_root}/hw/vendor",
+ "--cores-root {proj_root}/../../out/kelvin/hw/kelvin_core",
+ "--cores-root {proj_root}/../../out/matcha/hw/fastvdma_core",
+ "--cores-root {proj_root}/../ip/isp/ispyocto",
+ "--cores-root {proj_root}/../ip/isp/axi2sramcrs",
+ "--cores-root {proj_root}/hw/dv",
+ "--cores-root {proj_root}/hw/ip",
+ "--cores-root {proj_root}/hw/top_matcha",
+ "run"
+ "{sv_flist_gen_flags}",
+ "--target={flow}",
+ "--tool icarus", //TODO: change core files to accept DC. Using tool icarus for now since that will generate useful filelist
+ "--build-root={build_dir}",
+ "--setup",
+ "{fusesoc_core}"]
+ sv_flist_gen_dir: "{build_dir}/syn-dc"
+ sv_flist: "{sv_flist_gen_dir}/{fusesoc_core_}.scr"
+ sv_flist_gen_flags: ["--flag=fileset_{design_level}"]
+
+ // Can be used to hook in an additional post elab scripting step.
+ post_elab_script: ""
+
+ // By default we run full synthesis including ATP reporting.
+ // This can be overridden with either of the following
+ // values in order to terminate earlier (listed in order):
+ // - "analyze"
+ // - "elab"
+ // - "compile"
+ // - "reports"
+ // Every stage includes the prior stages, and the report parsing script
+ // will expect the associated reports to be available (otherwise an
+ // error will be generated and the flow will fail).
+ termination_stage: "reports"
+
+ // Common pass or fail patterns.
+ build_fail_patterns: [// FuseSoC build error
+ "^ERROR:.*$"]
+
+ exports: [
+ { SYN_ROOT: "{syn_root}" },
+ { FOUNDRY_ROOT: "{foundry_root}" },
+ { BUILD_DIR: "{build_dir}" },
+ { DUT: "{dut}" },
+ { PARAMS: "{params}" },
+ { SV_FLIST: "{sv_flist}" },
+ { POST_ELAB_SCRIPT: "{post_elab_script}" }
+ { TERMINATION_STAGE: "{termination_stage}" }
+ ]
+}
diff --git a/hw/syn/tools/dvsim/dc.hjson b/hw/syn/tools/dvsim/dc.hjson
new file mode 100644
index 0000000..df76e35
--- /dev/null
+++ b/hw/syn/tools/dvsim/dc.hjson
@@ -0,0 +1,38 @@
+// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+{
+ // Synopsys Design Compiler
+ tool: dc
+
+ // Environment variables that are needed in the synthesis script
+ exports: [
+ { CONSTRAINT: "{sdc_file}" },
+ { FOUNDRY_CONSTRAINT: "{foundry_sdc_file}" },
+ ]
+
+ // Tool invocation
+ build_cmd: "{job_prefix} dc_shell-xg-t "
+ build_opts: ["-f {syn_root}/tools/dc/run-syn.tcl"]
+
+ // DC-specific results parsing script that is called after running synthesis
+ report_cmd: "{syn_root}/tools/dc/parse-syn-report.py"
+ report_opts: ["--dut {dut}",
+ "--expand-modules {expand_modules}",
+ "--expand-depth {expand_depth}",
+ "--log-path {build_dir} ",
+ "--rep-path {build_dir}/REPORTS",
+ "--out-dir {build_dir}",
+ "--termination-stage {termination_stage}"]
+
+ // By default, 1 level of hierarchy is always expanded in the area report.
+ // This can be changed by setting the expansion depth to a higher value,
+ // or by listing explicitly which submodules shall be expanded further.
+ expand_modules: "{name}"
+ expand_depth: 1
+
+ // Restrict the maximum message count in each category
+ max_msg_count: 100
+ // Sanitize the published report
+ sanitize_publish_results: true
+}
diff --git a/hw/syn/tools/dvsim/syn.mk b/hw/syn/tools/dvsim/syn.mk
new file mode 100644
index 0000000..9a63228
--- /dev/null
+++ b/hw/syn/tools/dvsim/syn.mk
@@ -0,0 +1,46 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+export SHELL := /bin/bash
+.DEFAULT_GOAL := all
+
+all: build
+
+###################
+## build targets ##
+###################
+build: build_result
+
+gen_sv_flist:
+ @echo "[make]: gen_sv_flist"
+ cd ${build_dir} && ${sv_flist_gen_cmd} ${sv_flist_gen_opts}
+
+pre_build: gen_sv_flist
+ @echo "[make]: pre_build"
+ mkdir -p ${build_dir}
+ mkdir -p ${sv_flist_gen_dir}
+ifneq (${pre_build_cmds},)
+ cd ${build_dir} && ${pre_build_cmds}
+endif
+
+do_build: pre_build
+ @echo "[make]: do_build"
+ cd ${sv_flist_gen_dir} && ${build_cmd} ${build_opts} 2>&1 | tee ${build_log}
+
+post_build: do_build
+ @echo "[make]: post_build"
+ifneq (${post_build_cmds},)
+ cd ${build_dir} && ${post_build_cmds}
+endif
+
+build_result: post_build
+ @echo "[make]: build_result"
+ ${report_cmd} ${report_opts}
+
+.PHONY: build \
+ gen_sv_flist \
+ pre_build \
+ do_build \
+ post_build \
+ build_result
diff --git a/hw/top_matcha/syn/chip_matcha_asic_syn_cfg.hjson b/hw/top_matcha/syn/chip_matcha_asic_syn_cfg.hjson
index 40bd59a..7d156d2 100644
--- a/hw/top_matcha/syn/chip_matcha_asic_syn_cfg.hjson
+++ b/hw/top_matcha/syn/chip_matcha_asic_syn_cfg.hjson
@@ -45,8 +45,9 @@
]
// Timing constraints for this module
- sdc_file: "{proj_root}/hw/top_matcha/syn/asic.constraints.sdc"
+ sdc_file: "{proj_root}/hw/top_matcha/syn/chip_matcha_asic.sdc"
// Technology specific timing constraints for this module
- foundry_sdc_file: "{foundry_root}/top_matcha/syn/foundry.constraints.sdc"
+ foundry_sdc_file: ""
+ // foundry_sdc_file: "{foundry_root}/top_matcha/syn/foundry.constraints.sdc"
}
diff --git a/sw/device/cheriot/soundstream/ml_top.cc b/sw/device/cheriot/soundstream/ml_top.cc
index 270e9e7..2e57a71 100644
--- a/sw/device/cheriot/soundstream/ml_top.cc
+++ b/sw/device/cheriot/soundstream/ml_top.cc
@@ -84,10 +84,10 @@
EventWaiterSource events[2];
const uint32_t* mlTopFinishFutex =
interrupt_futex_get(STATIC_SEALED_VALUE(mlTopFinishInterruptCapability));
- events[0] = {(void*)mlTopFinishFutex, EventWaiterFutex, *mlTopFinishFutex};
+ events[0] = {(void*)mlTopFinishFutex, *mlTopFinishFutex};
const uint32_t* mlTopFaultFutex =
interrupt_futex_get(STATIC_SEALED_VALUE(mlTopFaultInterruptCapability));
- events[1] = {(void*)mlTopFaultFutex, EventWaiterFutex, *mlTopFaultFutex};
+ events[1] = {(void*)mlTopFaultFutex, *mlTopFaultFutex};
for (;;) {
Debug::Assert(multiwaiter_wait(&unlimited, mw, events, 2) == 0, "multiwaiter_wait");