[util] Move design-related helper scripts to util/design
This also updates the paths and consolidates some subfunctions
Signed-off-by: Michael Schaffner <msf@opentitan.org>
diff --git a/util/design/gen-lc-state-enc.py b/util/design/gen-lc-state-enc.py
new file mode 100755
index 0000000..40bf2d6
--- /dev/null
+++ b/util/design/gen-lc-state-enc.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Given an ECC encoding matrix, this script generates random life cycle
+state encodings that can be incrementally written to a memory protected with
+the ECC code specified.
+"""
+import argparse
+import logging as log
+import random
+from pathlib import Path
+
+import hjson
+from lib.common import wrapped_docstring
+from lib.LcStEnc import LcStEnc
+from mako.template import Template
+
+# State encoding definition
+LC_STATE_DEFINITION_FILE = "hw/ip/lc_ctrl/data/lc_ctrl_state.hjson"
+# Code templates to render
+TEMPLATES = ["hw/ip/lc_ctrl/rtl/lc_ctrl_state_pkg.sv.tpl"]
+
+
+def main():
+ log.basicConfig(level=log.INFO,
+ format="%(asctime)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M")
+
+ parser = argparse.ArgumentParser(
+ prog="gen-lc-state-enc",
+ description=wrapped_docstring(),
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ parser.add_argument('-s',
+ '--seed',
+ type=int,
+ metavar='<seed>',
+ help='Custom seed for RNG.')
+
+ args = parser.parse_args()
+
+ with open(LC_STATE_DEFINITION_FILE, 'r') as infile:
+ config = hjson.load(infile)
+
+ # If specified, override the seed for random netlist constant computation.
+ if args.seed:
+ log.warning('Commandline override of seed with {}.'.format(
+ args.seed))
+ config['seed'] = args.seed
+ # Otherwise, we either take it from the .hjson if present, or
+ # randomly generate a new seed if not.
+ else:
+ random.seed()
+ new_seed = random.getrandbits(64)
+ if config.setdefault('seed', new_seed) == new_seed:
+ log.warning(
+ 'No seed specified, setting to {}.'.format(new_seed))
+
+ # validate config and generate encoding
+ lc_st_enc = LcStEnc(config)
+
+ # render all templates
+ for template in TEMPLATES:
+ with open(template, 'r') as tplfile:
+ tpl = Template(tplfile.read())
+ with open(
+ Path(template).parent.joinpath(Path(template).stem),
+ 'w') as outfile:
+ outfile.write(tpl.render(lc_st_enc=lc_st_enc))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/util/design/gen-lfsr-seed.py b/util/design/gen-lfsr-seed.py
new file mode 100755
index 0000000..33e5007
--- /dev/null
+++ b/util/design/gen-lfsr-seed.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""This script generates random seeds and state permutations for LFSRs
+and outputs them them as a packed SV logic vectors suitable for use with
+prim_lfsr.sv.
+"""
+import argparse
+import logging as log
+import random
+
+from lib import common as common
+
+SV_INSTRUCTIONS = """
+------------------------------------------------
+| COPY PASTE THE CODE TEMPLATE BELOW INTO YOUR |
+| RTL CODE, INLUDING THE COMMENT IN ORDER TO |
+| EASE AUDITABILITY AND REPRODUCIBILITY. |
+------------------------------------------------
+"""
+
+
+def main():
+ log.basicConfig(level=log.INFO,
+ format="%(asctime)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M")
+
+ parser = argparse.ArgumentParser(
+ prog="gen-lfsre-perm",
+ description=common.wrapped_docstring(),
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.add_argument('-w',
+ '--width',
+ type=int,
+ default=32,
+ metavar='<#bitwidth>',
+ help='LFSR width.')
+ parser.add_argument('-s',
+ '--seed',
+ type=int,
+ metavar='<seed>',
+ help='Custom seed for RNG.')
+ parser.add_argument('-p',
+ '--prefix',
+ type=str,
+ metavar='name',
+ default="",
+ help='Optional prefix to add to '
+ 'types and parameters. '
+ 'Make sure this is PascalCase.')
+
+ args = parser.parse_args()
+
+ if args.width <= 0:
+ log.error("LFSR width must be nonzero")
+ exit(1)
+
+ if args.seed is None:
+ random.seed()
+ args.seed = random.getrandbits(32)
+
+ random.seed(args.seed)
+
+ print(SV_INSTRUCTIONS)
+
+ type_prefix = common.as_snake_case_prefix(args.prefix)
+
+ outstr = '''
+// These LFSR parameters have been generated with
+// $ ./util/design/gen-lfsr-seed.py --width {} --seed {} --prefix "{}"
+parameter int {}LfsrWidth = {};
+typedef logic [{}LfsrWidth-1:0] {}lfsr_seed_t;
+typedef logic [{}LfsrWidth-1:0][$clog2({}LfsrWidth)-1:0] {}lfsr_perm_t;
+parameter {}lfsr_seed_t RndCnst{}LfsrSeedDefault = {};
+parameter {}lfsr_perm_t RndCnst{}LfsrPermDefault = {{
+ {}
+}};
+'''.format(args.width, args.seed, args.prefix, args.prefix, args.width,
+ args.prefix, type_prefix, args.prefix, args.prefix, type_prefix,
+ type_prefix, args.prefix,
+ common.get_random_data_hex_literal(args.width), type_prefix,
+ args.prefix, common.get_random_perm_hex_literal(args.width))
+
+ print(outstr)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/util/design/gen-otp-mmap.py b/util/design/gen-otp-mmap.py
new file mode 100755
index 0000000..7d7684d
--- /dev/null
+++ b/util/design/gen-otp-mmap.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Generate RTL and documentation collateral from OTP memory
+map definition file (hjson).
+"""
+import argparse
+import logging as log
+import random
+from pathlib import Path
+
+import hjson
+from lib.common import wrapped_docstring
+from lib.OtpMemMap import OtpMemMap
+from mako.template import Template
+
+TABLE_HEADER_COMMENT = '''<!--
+DO NOT EDIT THIS FILE DIRECTLY.
+It has been generated with ./util/design/gen-otp-mmap.py
+-->
+
+'''
+
+# memory map source
+MMAP_DEFINITION_FILE = "hw/ip/otp_ctrl/data/otp_ctrl_mmap.hjson"
+# documentation tables to generate
+PARTITIONS_TABLE_FILE = "hw/ip/otp_ctrl/doc/otp_ctrl_partitions.md"
+DIGESTS_TABLE_FILE = "hw/ip/otp_ctrl/doc/otp_ctrl_digests.md"
+MMAP_TABLE_FILE = "hw/ip/otp_ctrl/doc/otp_ctrl_mmap.md"
+# code templates to render
+TEMPLATES = [
+ "hw/ip/otp_ctrl/data/otp_ctrl.hjson.tpl",
+ "hw/ip/otp_ctrl/rtl/otp_ctrl_part_pkg.sv.tpl"
+]
+
+
+def main():
+ log.basicConfig(level=log.INFO,
+ format="%(asctime)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M")
+
+ parser = argparse.ArgumentParser(
+ prog="gen-otp-mmap",
+ description=wrapped_docstring(),
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ # Generator options for compile time random netlist constants
+ parser.add_argument('--seed',
+ type=int,
+ metavar='<seed>',
+ help='Custom seed for RNG to compute default values.')
+
+ args = parser.parse_args()
+
+ with open(MMAP_DEFINITION_FILE, 'r') as infile:
+ config = hjson.load(infile)
+
+ # If specified, override the seed for random netlist constant computation.
+ if args.seed:
+ log.warning('Commandline override of seed with {}.'.format(
+ args.seed))
+ config['seed'] = args.seed
+ # Otherwise, we either take it from the .hjson if present, or
+ # randomly generate a new seed if not.
+ else:
+ random.seed()
+ new_seed = random.getrandbits(64)
+ if config.setdefault('seed', new_seed) == new_seed:
+ log.warning(
+ 'No seed specified, setting to {}.'.format(new_seed))
+
+ otp_mmap = OtpMemMap(config)
+
+ with open(PARTITIONS_TABLE_FILE, 'w') as outfile:
+ outfile.write(TABLE_HEADER_COMMENT +
+ otp_mmap.create_partitions_table())
+
+ with open(DIGESTS_TABLE_FILE, 'w') as outfile:
+ outfile.write(TABLE_HEADER_COMMENT +
+ otp_mmap.create_digests_table())
+
+ with open(MMAP_TABLE_FILE, 'w') as outfile:
+ outfile.write(TABLE_HEADER_COMMENT + otp_mmap.create_mmap_table())
+
+ # render all templates
+ for template in TEMPLATES:
+ with open(template, 'r') as tplfile:
+ tpl = Template(tplfile.read())
+ with open(
+ Path(template).parent.joinpath(Path(template).stem),
+ 'w') as outfile:
+ outfile.write(tpl.render(otp_mmap=otp_mmap))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/util/design/get-lfsr-coeffs.py b/util/design/get-lfsr-coeffs.py
new file mode 100755
index 0000000..9bb84da
--- /dev/null
+++ b/util/design/get-lfsr-coeffs.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+import argparse
+import os
+import shutil
+import subprocess
+import sys
+
+import wget
+
+USAGE = """./get_lfsr_coeffs.py [-t <temporary folder>] [-o <outfile>] [-f] [--fib]
+
+Downloads LFSR constants from [1] and dumps them in SystemVerilog format
+(for use in prim_lfsr.sv). These coeffs are for a Galois XOR type LFSR, and cover
+implementations ranging from 4 to 64bits.
+
+Alternatively, the script can also extract the XNOR Fibonacci type LFSR coefficients
+from the XILINX application note 52 [2] by specifying the --fib switch. Note that this
+depends on the pdftotext utility for Linux.
+
+[1] https://users.ece.cmu.edu/~koopman/lfsr/
+
+[2] https://www.xilinx.com/support/documentation/application_notes/xapp052.pdf
+"""
+
+# configuration for Galois
+MIN_LFSR_LEN = 4
+MAX_LFSR_LEN = 64
+BASE_URL = 'https://users.ece.cmu.edu/~koopman/lfsr/'
+
+# configuration for Fibonacci
+FIB_URL = 'https://www.xilinx.com/support/documentation/application_notes/xapp052.pdf'
+PDF_NAME = 'xapp052'
+LINE_FILTER = [
+ 'Table 3: Taps for Maximum-Length LFSR Counters',
+ 'XAPP 052 July 7,1996 (Version 1.1)'
+]
+
+
+# helper function to write out coeffs
+def dump_coeffs(lfsrType, widths, coeffs, outfile):
+ # widths consistency check
+ for k in range(widths[0], widths[-1] + 1):
+ # print("%d -- %d" % (k,widths[k-widths[0]]))
+ if k != widths[k - widths[0]]:
+ print("Error: widths is not consistently increasing")
+ sys.exit(1)
+
+ # select first coefficient in each file and print to SV LUT
+ with outfile:
+ decl_str = "localparam int unsigned %s_LUT_OFF = %d;\n" \
+ % (lfsrType, min(widths))
+ outfile.write(decl_str)
+ decl_str = "localparam logic [%d:0] %s_COEFFS [%d] = '{ " \
+ % (max(widths) - 1, lfsrType, max(widths) - min(widths) + 1)
+ outfile.write(decl_str)
+ comma = ',\n'
+ spaces = ''
+ for k in widths:
+ if k == max(widths):
+ comma = ""
+ if k == min(widths) + 1:
+ spaces += ' ' * len(decl_str)
+ outfile.write("%s%d'h%s%s" %
+ (spaces, max(widths), coeffs[k - widths[0]], comma))
+ outfile.write(' };\n')
+
+
+# converts list with bit positions to a hex bit mask string
+def to_bit_mask(bitPositions):
+
+ bitMask = 0
+ for b in bitPositions:
+ bitMask += 2**(b - 1)
+
+ return "%X" % bitMask
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ prog="get-lfsr-coeffs",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ usage=USAGE,
+ description=__doc__,
+ epilog='defaults or the filename - can be used for stdin/stdout')
+ parser.add_argument(
+ '-t',
+ '--tempfolder',
+ help="""temporary folder to download the lfsr constant files
+to (defaults to lfsr_tmp)""",
+ default='lfsr_tmp')
+ parser.add_argument('--fib',
+ help='download fibonacci coefficients',
+ action='store_true')
+ parser.add_argument('-f',
+ '--force',
+ help='overwrites tempfolder',
+ action='store_true')
+ parser.add_argument('-o',
+ '--output',
+ type=argparse.FileType('w'),
+ default=sys.stdout,
+ metavar='file',
+ help='Output file (default stdout)')
+
+ args = parser.parse_args()
+
+ if args.force and os.path.exists(args.tempfolder):
+ shutil.rmtree(args.tempfolder)
+
+ if not os.path.exists(args.tempfolder):
+ # download coefficient files
+ os.makedirs(args.tempfolder, exist_ok=args.force)
+ os.chdir(args.tempfolder)
+
+ if args.fib:
+ lfsrType = 'FIB_XNOR'
+
+ wget.download(FIB_URL)
+ cmd = ['pdftotext %s.pdf' % PDF_NAME, '> %s.txt' % PDF_NAME]
+ subprocess.call(cmd, shell=True)
+ print("")
+ cmd = [
+ 'grep -A 350 "%s" %s.txt > table.txt' %
+ (LINE_FILTER[0], PDF_NAME)
+ ]
+ subprocess.call(cmd, shell=True)
+
+ # parse the table
+ widths = []
+ coeffs = []
+ columnType = 0
+ with open('table.txt') as infile:
+ for line in infile:
+ line = line.strip()
+ if line and line not in LINE_FILTER:
+ if line == 'n':
+ columnType = 0
+ # yes, this is a typo in the PDF :)
+ elif line == 'XNOR from':
+ columnType = 1
+ elif columnType:
+ tmpCoeffs = [int(c) for c in line.split(',')]
+ coeffs += [tmpCoeffs]
+ else:
+ widths += [int(line)]
+
+ # # printout for checking
+ # for (w,c) in zip(widths,coeffs):
+ # print("width: %d > coeffs: %s" % (w, str(c)))
+
+ # convert to bitmask
+ for k in range(len(coeffs)):
+ coeffs[k] = to_bit_mask(coeffs[k])
+
+ else:
+ lfsrType = 'GAL_XOR'
+
+ for k in range(MIN_LFSR_LEN, MAX_LFSR_LEN + 1):
+ url = '%s%d.txt' % (BASE_URL, k)
+ print("\nDownloading %d bit LFSR coeffs from %s..." % (k, url))
+ wget.download(url)
+ print("")
+
+ widths = []
+ coeffs = []
+ for k in range(MIN_LFSR_LEN, MAX_LFSR_LEN + 1):
+ filename = '%d.txt' % k
+ with open(filename) as infile:
+ # read the first line
+ widths += [k]
+ coeffs += [infile.readline().strip()]
+
+ # write to stdout or file
+ dump_coeffs(lfsrType, widths, coeffs, outfile=args.output)
+ else:
+ print("Temporary directory already exists, abort...")
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/util/design/keccak_rc.py b/util/design/keccak_rc.py
new file mode 100755
index 0000000..1ee97d0
--- /dev/null
+++ b/util/design/keccak_rc.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Calculate Round Constant
+"""
+
+import argparse
+import bitarray as ba
+import logging as log
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ prog="keccak round constant generator",
+ description=
+ '''This tool generates the round constants based on the given max round number'''
+ )
+ parser.add_argument(
+ '-r',
+ type=int,
+ default=24,
+ help='''Max Round value. Default is SHA3 Keccak round %(default)''')
+ parser.add_argument('--verbose', '-v', action='store_true', help='Verbose')
+
+ args = parser.parse_args()
+
+ if (args.verbose):
+ log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
+ else:
+ log.basicConfig(format="%(levelname)s: %(message)s")
+
+ if args.r < 1:
+ log.error("Max Round value should be greater than 0")
+
+ # Create 0..255 bit array
+ rc = ba.bitarray(256)
+ rc.setall(0)
+
+ r = ba.bitarray('10000000')
+ rc[0] = True # t%255 == 0 -> 1
+ for i in range(1, 256):
+ # Update from t=1 to t=255
+ r_d = ba.bitarray('0') + r
+ if r_d[8]:
+ #Flip 0,4,5,6
+ r = r_d[0:8] ^ ba.bitarray('10001110')
+ else:
+ r = r_d[0:8]
+
+ rc[i] = r[0]
+
+ ## Print rc
+ print(rc)
+
+ ## Round
+
+ rcs = [] # Each entry represent the round
+ for rnd in range(0, args.r):
+ # Let RC=0
+ rndconst = ba.bitarray(64)
+ rndconst.setall(0)
+ # for j [0 .. L] RC[2**j-1] = rc(j+7*rnd)
+ for j in range(0, 7): #0 to 6
+ rndconst[2**j - 1] = rc[(j + 7 * rnd) % 255]
+ print("64'h{}, // Round {}".format(rndhex(rndconst), rnd))
+
+
+def rndhex(bit) -> str:
+ return bit[::-1].tobytes().hex()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/util/design/lib/LcStEnc.py b/util/design/lib/LcStEnc.py
new file mode 100644
index 0000000..9b92ea2
--- /dev/null
+++ b/util/design/lib/LcStEnc.py
@@ -0,0 +1,242 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Contains life cycle state encoding class which is
+used to generate new life cycle encodings.
+"""
+import logging as log
+import random
+
+from lib.common import (check_int, ecc_encode, get_hd, hd_histogram,
+ is_valid_codeword, scatter_bits)
+
+
+def _is_incremental_codeword(word1, word2):
+ '''Test whether word2 is incremental wrt word1.'''
+ if len(word1) != len(word2):
+ log.error('Words are not of equal size')
+ exit(1)
+
+ _word1 = int(word1, 2)
+ _word2 = int(word2, 2)
+
+ # This basically checks that the second word does not
+ # clear any bits that are set to 1 in the first word.
+ return ((_word1 & _word2) == _word1)
+
+
+def _get_incremental_codewords(config, base_ecc, existing_words):
+ '''Get all possible incremental codewords fulfilling the constraints.'''
+
+ base_data = base_ecc[config['secded']['ecc_width']:]
+
+ # We only need to spin through data bits that have not been set yet.
+ # Hence, we first count how many bits are zero (and hence still
+ # modifyable). Then, we enumerate all possible combinations and scatter
+ # the bits of the enumerated values into the correct bit positions using
+ # the scatter_bits() function.
+ incr_cands = []
+ free_bits = base_data.count('0')
+ for k in range(1, 2**free_bits):
+ # Get incremental dataword by scattering the enumeration bits
+ # into the zero bit positions in base_data.
+ incr_cand = scatter_bits(base_data,
+ format(k, '0' + str(free_bits) + 'b'))
+ incr_cand_ecc = ecc_encode(config, incr_cand)
+
+ # Dataword is correct by construction, but we need to check whether
+ # the ECC bits are incremental.
+ if _is_incremental_codeword(base_ecc, incr_cand_ecc):
+ # Check whether the candidate fulfills the maximum
+ # Hamming weight constraint.
+ if incr_cand_ecc.count('1') <= config['max_hw']:
+ # Check Hamming distance wrt all existing words.
+ for w in existing_words + [base_ecc]:
+ if get_hd(incr_cand_ecc, w) < config['min_hd']:
+ break
+ else:
+ incr_cands.append(incr_cand_ecc)
+
+ return incr_cands
+
+
+def _get_new_state_word_pair(config, existing_words):
+ '''Randomly generate a new incrementally writable word pair'''
+ while 1:
+ # Draw a random number and check whether it is unique and whether
+ # the Hamming weight is in range.
+ width = config['secded']['data_width']
+ ecc_width = config['secded']['ecc_width']
+ base = random.getrandbits(width)
+ base = format(base, '0' + str(width) + 'b')
+ base_cand_ecc = ecc_encode(config, base)
+ # disallow all-zero and all-one states
+ pop_cnt = base_cand_ecc.count('1')
+ if pop_cnt >= config['min_hw'] and pop_cnt <= config['max_hw']:
+
+ # Check Hamming distance wrt all existing words
+ for w in existing_words:
+ if get_hd(base_cand_ecc, w) < config['min_hd']:
+ break
+ else:
+ # Get encoded incremental candidates.
+ incr_cands_ecc = _get_incremental_codewords(
+ config, base_cand_ecc, existing_words)
+ # there are valid candidates, draw one at random.
+ # otherwise we just start over.
+ if incr_cands_ecc:
+ incr_cand_ecc = random.choice(incr_cands_ecc)
+ log.info('word {}: {}|{} -> {}|{}'.format(
+ int(len(existing_words) / 2),
+ base_cand_ecc[ecc_width:], base_cand_ecc[0:ecc_width],
+ incr_cand_ecc[ecc_width:], incr_cand_ecc[0:ecc_width]))
+ existing_words.append(base_cand_ecc)
+ existing_words.append(incr_cand_ecc)
+ return (base_cand_ecc, incr_cand_ecc)
+
+
+def _validate_words(config, words):
+ '''Validate generated words (base and incremental).'''
+ for k, w in enumerate(words):
+ # Check whether word is valid wrt to ECC polynomial.
+ if not is_valid_codeword(config, w):
+ log.error('Codeword {} at index {} is not valid'.format(w, k))
+ exit(1)
+ # Check that word fulfills the Hamming weight constraints.
+ pop_cnt = w.count('1')
+ if pop_cnt < config['min_hw'] or pop_cnt > config['max_hw']:
+ log.error(
+ 'Codeword {} at index {} has wrong Hamming weight'.format(
+ w, k))
+ exit(1)
+ # Check Hamming distance wrt to all other existing words.
+ # If the constraint is larger than 0 this implies uniqueness.
+ if k < len(words) - 1:
+ for k2, w2 in enumerate(words[k + 1:]):
+ if get_hd(w, w2) < config['min_hd']:
+ log.error(
+ 'Hamming distance between codeword {} at index {} '
+ 'and codeword {} at index {} is too low.'.format(
+ w, k, w2, k + 1 + k2))
+ exit(1)
+
+
+class LcStEnc():
+ '''Life cycle state encoding generator class
+
+ The constructor expects the parsed configuration
+ hjson to be passed in.
+ '''
+
+ # This holds the config dict.
+ config = {}
+ # Holds generated life cycle words.
+ gen = {
+ 'ab_words': [],
+ 'cd_words': [],
+ 'ef_words': [],
+ 'stats': [],
+ }
+
+ def __init__(self, config):
+ '''The constructor validates the configuration dict.'''
+
+ log.info('')
+ log.info('Generate life cycle state')
+ log.info('')
+
+ if 'seed' not in config:
+ log.error('Missing seed in configuration')
+ exit(1)
+
+ if 'secded' not in config:
+ log.error('Missing secded configuration')
+ exit(1)
+
+ config['secded'].setdefault('data_width', 0)
+ config['secded'].setdefault('ecc_width', 0)
+ config['secded'].setdefault('ecc_matrix', [[]])
+ config.setdefault('num_ab_words', 0)
+ config.setdefault('num_cd_words', 0)
+ config.setdefault('num_ef_words', 0)
+ config.setdefault('min_hw', 0)
+ config.setdefault('max_hw', 0)
+ config.setdefault('min_hd', 0)
+
+ config['seed'] = check_int(config['seed'])
+
+ log.info('Seed: {0:x}'.format(config['seed']))
+ log.info('')
+
+ config['secded']['data_width'] = check_int(
+ config['secded']['data_width'])
+ config['secded']['ecc_width'] = check_int(
+ config['secded']['ecc_width'])
+ config['num_ab_words'] = check_int(config['num_ab_words'])
+ config['num_cd_words'] = check_int(config['num_cd_words'])
+ config['num_ef_words'] = check_int(config['num_ef_words'])
+ config['min_hw'] = check_int(config['min_hw'])
+ config['max_hw'] = check_int(config['max_hw'])
+ config['min_hd'] = check_int(config['min_hd'])
+
+ total_width = config['secded']['data_width'] + config['secded'][
+ 'ecc_width']
+
+ if config['min_hw'] >= total_width or \
+ config['max_hw'] > total_width or \
+ config['min_hw'] >= config['max_hw']:
+ log.error('Hamming weight constraints are inconsistent.')
+ exit(1)
+
+ if config['max_hw'] - config['min_hw'] + 1 < config['min_hd']:
+ log.error('Hamming distance constraint is inconsistent.')
+ exit(1)
+
+ if config['secded']['ecc_width'] != len(
+ config['secded']['ecc_matrix']):
+ log.error('ECC matrix does not have correct number of rows')
+ exit(1)
+
+ log.info('SECDED Matrix:')
+ for i, l in enumerate(config['secded']['ecc_matrix']):
+ log.info('ECC Bit {} Fanin: {}'.format(i, l))
+ for j, e in enumerate(l):
+ e = check_int(e)
+ config['secded']['ecc_matrix'][i][j] = e
+
+ log.info('')
+
+ self.config = config
+
+ # Re-initialize with seed to make results reproducible.
+ random.seed(int(self.config['seed']))
+
+ # Generate new encoding words
+ word_types = ['ab_words', 'cd_words', 'ef_words']
+ existing_words = []
+ for w in word_types:
+ while len(self.gen[w]) < self.config['num_' + w]:
+ new_word = _get_new_state_word_pair(self.config,
+ existing_words)
+ self.gen[w].append(new_word)
+
+ # Validate words (this must not fail at this point).
+ _validate_words(self.config, existing_words)
+
+ # Print out HD histogram
+ self.gen['stats'] = hd_histogram(existing_words)
+
+ log.info('')
+ log.info('Hamming distance histogram:')
+ log.info('')
+ for bar in self.gen['stats']["bars"]:
+ log.info(bar)
+ log.info('')
+ log.info('Minimum HD: {}'.format(self.gen['stats']['min_hd']))
+ log.info('Maximum HD: {}'.format(self.gen['stats']['max_hd']))
+ log.info('Minimum HW: {}'.format(self.gen['stats']['min_hw']))
+ log.info('Maximum HW: {}'.format(self.gen['stats']['max_hw']))
+
+ log.info('')
+ log.info('Successfully generated life cycle state.')
+ log.info('')
diff --git a/util/design/lib/OtpMemMap.py b/util/design/lib/OtpMemMap.py
new file mode 100644
index 0000000..0601481
--- /dev/null
+++ b/util/design/lib/OtpMemMap.py
@@ -0,0 +1,287 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""OTP memory map class, used to create the associated RTL and
+documentation, and to create OTP memory images for preloading.
+"""
+
+import logging as log
+import random
+from math import ceil, log2
+
+from tabulate import tabulate
+from lib.common import check_bool, check_int
+
+DIGEST_SUFFIX = "_DIGEST"
+DIGEST_SIZE = 8
+
+
+class OtpMemMap():
+
+ # This holds the config dict.
+ config = {}
+
+ def __init__(self, config):
+
+ log.info('')
+ log.info('Parse and translate OTP memory map.')
+ log.info('')
+
+ if "seed" not in config:
+ log.error("Missing seed in configuration.")
+ exit(1)
+
+ config["seed"] = check_int(config["seed"])
+
+ # Initialize RNG.
+ random.seed(int(config['seed']))
+
+ offset = 0
+ num_part = 0
+ for part in config["partitions"]:
+ num_part += 1
+ # Defaults
+ part.setdefault("offset", offset)
+ part.setdefault("name", "unknown_name")
+ part.setdefault("variant", "Unbuffered")
+ part.setdefault("size", "0")
+ part.setdefault("secret", "false")
+ part.setdefault("sw_digest", "false")
+ part.setdefault("hw_digest", "false")
+ part.setdefault("write_lock", "none")
+ part.setdefault("read_lock", "none")
+ part.setdefault("key_sel", "NoKey")
+ log.info("Partition {} at offset {} with size {}".format(
+ part["name"], part["offset"], part["size"]))
+
+ # make sure these are boolean types (simplifies the mako templates)
+ part["secret"] = check_bool(part["secret"])
+ part["sw_digest"] = check_bool(part["sw_digest"])
+ part["hw_digest"] = check_bool(part["hw_digest"])
+ part["bkout_type"] = check_bool(part["bkout_type"])
+
+ # basic checks
+ if part["variant"] not in ["Unbuffered", "Buffered", "LifeCycle"]:
+ log.error("Invalid partition type {}".format(part["variant"]))
+ exit(1)
+
+ if part["key_sel"] not in [
+ "NoKey", "Secret0Key", "Secret1Key", "Secret2Key"
+ ]:
+ log.error("Invalid key sel {}".format(part["key_sel"]))
+ exit(1)
+
+ if check_bool(part["secret"]) and part["key_sel"] == "NoKey":
+ log.error(
+ "A secret partition needs a key select value other than NoKey"
+ )
+ exit(1)
+
+ if part["write_lock"].lower() not in ["digest", "csr", "none"]:
+ log.error("Invalid value for write_lock")
+ exit(1)
+
+ if part["read_lock"].lower() not in ["digest", "csr", "none"]:
+ log.error("Invalid value for read_lock")
+ exit(1)
+
+ if part["sw_digest"] and part["hw_digest"]:
+ log.error(
+ "Partition cannot support both a SW and a HW digest at the same time."
+ )
+ exit(1)
+
+ if part["variant"] == "Unbuffered" and not part["sw_digest"]:
+ log.error(
+ "Unbuffered partitions without digest are not supported at the moment."
+ )
+ exit(1)
+
+ if not part["sw_digest"] and not part["hw_digest"]:
+ if part["write_lock"].lower(
+ ) == "digest" or part["read_lock"].lower() == "digest":
+ log.error(
+ "A partition can only be write/read lockable if it has a hw or sw digest."
+ )
+ exit(1)
+
+ if check_int(part["offset"]) % 8:
+ log.error("Partition offset must be 64bit aligned")
+ exit(1)
+
+ if check_int(part["size"]) % 8:
+ log.error("Partition size must be 64bit aligned")
+ exit(1)
+
+ # Loop over items within a partition
+ for item in part["items"]:
+ item.setdefault("name", "unknown_name")
+ item.setdefault("size", "0")
+ item.setdefault("isdigest", "false")
+ item.setdefault("offset", offset)
+ # Generate random constant to be used when partition has
+ # not been initialized yet or when it is in error state.
+ if check_bool(item.setdefault("rand_inv_default", "false")):
+ inv_default = random.getrandbits(
+ check_int(item["size"]) * 8)
+ else:
+ inv_default = 0
+ item.setdefault(
+ "inv_default", "{}'h{:0X}".format(
+ check_int(item["size"]) * 8, inv_default))
+ log.info("> Item {} at offset {} with size {}".format(
+ item["name"], offset, item["size"]))
+ offset += check_int(item["size"])
+
+ # Place digest at the end of a partition.
+ if part["sw_digest"] or part["hw_digest"]:
+ part["items"].append({
+ "name":
+ part["name"] + DIGEST_SUFFIX,
+ "size":
+ DIGEST_SIZE,
+ "offset":
+ check_int(part["offset"]) + check_int(part["size"]) -
+ DIGEST_SIZE,
+ "isdigest":
+ "True",
+ "inv_default":
+ "{256{1'b1}}"
+ })
+
+ log.info("> Adding digest {} at offset {} with size {}".format(
+ part["name"] + DIGEST_SUFFIX, offset, DIGEST_SIZE))
+ offset += DIGEST_SIZE
+
+ if len(part["items"]) == 0:
+ log.warning("Partition does not contain any items.")
+
+ # check offsets and size
+ if offset > check_int(part["offset"]) + check_int(part["size"]):
+ log.error("Not enough space in partitition "
+ "{} to accommodate all items. Bytes available "
+ "= {}, bytes requested = {}".format(
+ part["name"], part["size"],
+ offset - part["offset"]))
+ exit(1)
+
+ offset = check_int(part["offset"]) + check_int(part["size"])
+
+ otp_size = check_int(config["otp"]["depth"]) * check_int(
+ config["otp"]["width"])
+ config["otp"]["size"] = otp_size
+ config["otp"]["addr_width"] = ceil(
+ log2(check_int(config["otp"]["depth"])))
+ config["otp"]["byte_addr_width"] = ceil(log2(check_int(otp_size)))
+
+ if offset > otp_size:
+ log.error(
+ "OTP is not big enough to store all partitions. "
+ "Bytes available {}, bytes required {}",
+ otp_size, offset)
+ exit(1)
+
+ log.info("Total number of partitions: {}".format(num_part))
+ log.info("Bytes available in OTP: {}".format(otp_size))
+ log.info("Bytes required for partitions: {}".format(offset))
+
+ self.config = config
+
+ log.info('')
+ log.info('Successfully parsed and translated OTP memory map.')
+ log.info('')
+
+
+ def create_partitions_table(self):
+ header = [
+ "Partition", "Secret", "Buffered", "WR Lockable", "RD Lockable",
+ "Description"
+ ]
+ table = [header]
+ colalign = ("center", ) * len(header)
+
+ for part in self.config["partitions"]:
+ is_secret = "yes" if check_bool(part["secret"]) else "no"
+ is_buffered = "yes" if part["variant"] in [
+ "Buffered", "LifeCycle"
+ ] else "no"
+ wr_lockable = "no"
+ if part["write_lock"].lower() in ["csr", "digest"]:
+ wr_lockable = "yes (" + part["write_lock"] + ")"
+ rd_lockable = "no"
+ if part["read_lock"].lower() in ["csr", "digest"]:
+ rd_lockable = "yes (" + part["read_lock"] + ")"
+ # remove newlines
+ desc = ' '.join(part["desc"].split())
+ row = [
+ part["name"], is_secret, is_buffered, wr_lockable, rd_lockable,
+ desc
+ ]
+ table.append(row)
+
+ return tabulate(table,
+ headers="firstrow",
+ tablefmt="pipe",
+ colalign=colalign)
+
+ def create_mmap_table(self):
+ header = [
+ "Index", "Partition", "Size [B]", "Access Granule", "Item",
+ "Byte Address", "Size [B]"
+ ]
+ table = [header]
+ colalign = ("center", ) * len(header)
+
+ for k, part in enumerate(self.config["partitions"]):
+ for j, item in enumerate(part["items"]):
+ granule = "64bit" if check_bool(part["secret"]) else "32bit"
+
+ if check_bool(item["isdigest"]):
+ granule = "64bit"
+ name = "[{}](#Reg_{}_0)".format(item["name"],
+ item["name"].lower())
+ else:
+ name = item["name"]
+
+ if j == 0:
+ row = [str(k), part["name"], str(part["size"]), granule]
+ else:
+ row = ["", "", "", granule]
+
+ row.extend([
+ name, "0x{:03X}".format(check_int(item["offset"])),
+ str(item["size"])
+ ])
+
+ table.append(row)
+
+ return tabulate(table,
+ headers="firstrow",
+ tablefmt="pipe",
+ colalign=colalign)
+
+ def create_digests_table(self):
+ header = ["Digest Name", " Affected Partition", "Calculated by HW"]
+ table = [header]
+ colalign = ("center", ) * len(header)
+
+ for part in self.config["partitions"]:
+ if check_bool(part["hw_digest"]) or check_bool(part["sw_digest"]):
+ is_hw_digest = "yes" if check_bool(part["hw_digest"]) else "no"
+ for item in part["items"]:
+ if check_bool(item["isdigest"]):
+ name = "[{}](#Reg_{}_0)".format(
+ item["name"], item["name"].lower())
+ row = [name, part["name"], is_hw_digest]
+ table.append(row)
+ break
+ else:
+ log.error(
+ "Partition with digest does not contain a digest item")
+ exit(1)
+
+ return tabulate(table,
+ headers="firstrow",
+ tablefmt="pipe",
+ colalign=colalign)
diff --git a/util/design/lib/common.py b/util/design/lib/common.py
new file mode 100644
index 0000000..0d12e92
--- /dev/null
+++ b/util/design/lib/common.py
@@ -0,0 +1,206 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Shared subfunctions.
+"""
+import logging as log
+import random
+import textwrap
+from math import ceil, log2
+
+
+def wrapped_docstring():
+ '''Return a text-wrapped version of the module docstring'''
+ paras = []
+ para = []
+ for line in __doc__.strip().split('\n'):
+ line = line.strip()
+ if not line:
+ if para:
+ paras.append('\n'.join(para))
+ para = []
+ else:
+ para.append(line)
+ if para:
+ paras.append('\n'.join(para))
+
+ return '\n\n'.join(textwrap.fill(p) for p in paras)
+
+
+def check_bool(x):
+ """check_bool checks if input 'x' either a bool or
+ one of the following strings: ["true", "false"]
+ It returns value as Bool type.
+ """
+ if isinstance(x, bool):
+ return x
+ if not x.lower() in ["true", "false"]:
+ log.error("{} is not a boolean value.".format(x))
+ exit(1)
+ else:
+ return (x.lower() == "true")
+
+
+def check_int(x):
+ """check_int checks if input 'x' is decimal integer.
+ It returns value as an int type.
+ """
+ if isinstance(x, int):
+ return x
+ if not x.isdecimal():
+ log.error("{} is not a decimal number".format(x))
+ exit(1)
+ return int(x)
+
+
+def as_snake_case_prefix(name):
+ """ Convert PascalCase name into snake_case name"""
+ outname = ""
+ for c in name:
+ if c.isupper() and len(outname) > 0:
+ outname += '_'
+ outname += c.lower()
+ return outname + ('_' if name else '')
+
+
+def get_random_data_hex_literal(width):
+ """ Fetch 'width' random bits and return them as hex literal"""
+ width = int(width)
+ literal_str = hex(random.getrandbits(width))
+ literal_str = str(width) + "'h" + literal_str[2:]
+ return literal_str
+
+
+def blockify(s, size, limit):
+ """ Make sure the output does not exceed a certain size per line"""
+
+ str_idx = 2
+ remain = size % (limit * 4)
+ numbits = remain if remain else limit * 4
+ s_list = []
+
+ remain = size
+ while remain > 0:
+ s_incr = int(numbits / 4)
+ s_list.append("{}'h{}".format(numbits, s[str_idx:str_idx + s_incr]))
+ str_idx += s_incr
+ remain -= numbits
+ numbits = limit * 4
+
+ return (",\n ".join(s_list))
+
+
+def get_random_perm_hex_literal(numel):
+ """ Compute a random permutation of 'numel' elements and
+ return as packed hex literal"""
+ num_elements = int(numel)
+ width = int(ceil(log2(num_elements)))
+ idx = [x for x in range(num_elements)]
+ random.shuffle(idx)
+ literal_str = ""
+ for k in idx:
+ literal_str += format(k, '0' + str(width) + 'b')
+ # convert to hex for space efficiency
+ literal_str = hex(int(literal_str, 2))
+ return blockify(literal_str, width * numel, 64)
+
+
+def hist_to_bars(hist, m):
+ '''Convert histogramm list into ASCII bar plot'''
+ bars = []
+ for i, j in enumerate(hist):
+ bar_prefix = "{:2}: ".format(i)
+ spaces = len(str(m)) - len(bar_prefix)
+ hist_bar = bar_prefix + (" " * spaces)
+ for k in range(j * 20 // max(hist)):
+ hist_bar += "|"
+ hist_bar += " ({:.2f}%)".format(100.0 * j / sum(hist)) if j else "--"
+ bars += [hist_bar]
+ return bars
+
+
+def get_hd(word1, word2):
+ '''Calculate Hamming distance between two words.'''
+ if len(word1) != len(word2):
+ log.error('Words are not of equal size')
+ exit(1)
+ return bin(int(word1, 2) ^ int(word2, 2)).count('1')
+
+
+def hd_histogram(existing_words):
+ '''Build Hamming distance histogram'''
+ minimum_hd = len(existing_words[0])
+ maximum_hd = 0
+ minimum_hw = len(existing_words[0])
+ maximum_hw = 0
+ hist = [0] * (len(existing_words[0]) + 1)
+ for i, j in enumerate(existing_words):
+ minimum_hw = min(j.count('1'), minimum_hw)
+ maximum_hw = max(j.count('1'), maximum_hw)
+ if i < len(existing_words) - 1:
+ for k in existing_words[i + 1:]:
+ dist = get_hd(j, k)
+ hist[dist] += 1
+ minimum_hd = min(dist, minimum_hd)
+ maximum_hd = max(dist, maximum_hd)
+
+ stats = {}
+ stats["hist"] = hist
+ stats["bars"] = hist_to_bars(hist, len(existing_words))
+ stats["min_hd"] = minimum_hd
+ stats["max_hd"] = maximum_hd
+ stats["min_hw"] = minimum_hw
+ stats["max_hw"] = maximum_hw
+ return stats
+
+
+def is_valid_codeword(config, codeword):
+ '''Checks whether the bitstring is a valid ECC codeword.'''
+
+ data_width = config['secded']['data_width']
+ ecc_width = config['secded']['ecc_width']
+ if len(codeword) != (data_width + ecc_width):
+ log.error("Invalid codeword length {}".format(len(codeword)))
+ exit(1)
+
+ # Build syndrome and check whether it is zero.
+ syndrome = [0 for k in range(ecc_width)]
+
+ # The bitstring must be formatted as "data bits[N-1:0]" + "ecc bits[M-1:0]".
+ for j, fanin in enumerate(config['secded']['ecc_matrix']):
+ syndrome[j] = int(codeword[ecc_width - 1 - j])
+ for k in fanin:
+ syndrome[j] ^= int(codeword[ecc_width + data_width - 1 - k])
+
+ return sum(syndrome) == 0
+
+
+def ecc_encode(config, dataword):
+ '''Calculate and prepend ECC bits.'''
+ if len(dataword) != config['secded']['data_width']:
+ log.error("Invalid codeword length {}".format(len(dataword)))
+ exit(1)
+
+ # Build syndrome
+ eccbits = ""
+ for fanin in config['secded']['ecc_matrix']:
+ bit = 0
+ for k in fanin:
+ bit ^= int(dataword[config['secded']['data_width'] - 1 - k])
+ eccbits += format(bit, '01b')
+
+ return eccbits[::-1] + dataword
+
+
+def scatter_bits(mask, bits):
+ '''Scatter the bits into unset positions of mask.'''
+ j = 0
+ scatterword = ''
+ for b in mask:
+ if b == '1':
+ scatterword += '1'
+ else:
+ scatterword += bits[j]
+ j += 1
+
+ return scatterword
\ No newline at end of file
diff --git a/util/design/secded_gen.py b/util/design/secded_gen.py
new file mode 100755
index 0000000..53142b2
--- /dev/null
+++ b/util/design/secded_gen.py
@@ -0,0 +1,393 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""SECDED encoder/decoder generator
+
+Current version doesn't optimize Fan-In. It uses Hsiao code (modified version
+of Hamming code + parity). Please refer https://arxiv.org/pdf/0803.1217.pdf
+"""
+
+# TODO: Add FPV assertions in the encoder/decoder module
+
+import argparse
+import itertools
+import logging as log
+import math
+import os
+import random
+import sys
+import time
+from pathlib import PurePath
+
+COPYRIGHT = """// Copyright lowRISC contributors.
+// Licensed under the Apache License, Version 2.0, see LICENSE for details.
+// SPDX-License-Identifier: Apache-2.0
+//
+"""
+CODE_OPTIONS = ['hsiao', 'hamming']
+
+def min_paritysize(k):
+ # SECDED --> Hamming distance 'd': 4
+ # 2^(m-1) should cover (m+k)
+ for m in range(2, 10):
+ if 2**m >= (k + m + 1):
+ return m + 1
+ return -1
+
+
+def ideal_fanin(k, m):
+ """Compute Ideal Max Fanin of any bit in the ecc codes."""
+ fanin = 0
+ needed = k
+ for select in range(3, m + 1, 2):
+ combinations = list(itertools.combinations(range(m), select))
+ if len(combinations) <= needed:
+ fanin += int(math.ceil(float(len(combinations) * select) / m))
+ needed -= len(combinations)
+ else:
+ fanin += int(math.ceil(float(needed * select) / m))
+ needed = 0
+ if not needed:
+ break
+ return fanin
+
+
+def calc_fanin(width, codes):
+ """Sum the ones in a column"""
+ fanins = [0] * width
+ log.info("Calc Code: {}".format(codes))
+ for i in codes:
+ for e in i:
+ fanins[e] += 1
+
+ return fanins
+
+
+def print_comb(n,
+ k,
+ m,
+ cur_m,
+ codes,
+ start_cnt,
+ max_width=100,
+ prefix="",
+ first_indent=0):
+ """Print XOR comb.
+
+ @param[max_width] Maximum Width of str
+ @param[prefix] The prepend string at the first line
+ @param[first_indent] The number of character that indented at the first line
+ e.g. first_indent := 2
+ {prefix}in[nn] ...
+ ^ in[nn] ^ in[nn]
+
+ result:
+ {prefix}in[nn] ^ ... in[nn]
+ ^ in[nn] ^ ... in[nn];
+ """
+ outstr = ""
+ line = prefix
+ prepend_len = len(prefix)
+ cnt = start_cnt
+ first = True
+ for j in range(k):
+ temp_str = ""
+ if cur_m in codes[j]:
+ if not first:
+ temp_str += " ^"
+ if first:
+ first = False
+ temp_str += " in[%d]" % (j)
+ temp_len = len(temp_str)
+
+ if len(line) + temp_len > max_width:
+ outstr += line + "\n"
+ line = ' ' * (prepend_len - first_indent) + temp_str
+ else:
+ line += temp_str
+ outstr += line + ";\n"
+ return outstr
+
+
+def print_enc(n, k, m, codes):
+ outstr = ""
+ for i in range(k):
+ outstr += " assign out[%d] = in[%d] ;\n" % (i, i)
+
+ for i in range(m):
+ # Print parity computation
+ outstr += print_comb(n, k, m, i, codes, 0, 100,
+ " assign out[%d] =" % (i + k), 2)
+ return outstr
+
+
+def calc_syndrome(code):
+ log.info("in syncrome {}".format(code))
+ return sum(map((lambda x: 2**x), code))
+
+# return whether an integer is a power of 2
+def is_pow2(n):
+ return (n & (n-1) == 0) and n != 0
+
+def is_odd(n):
+ return (n % 2) > 0
+
+# k = data bits
+# m = parity bits
+# generate hsiao code
+def hsiao_code(k, m):
+ # using itertools combinations, generate odd number of 1 in a row
+
+ required_row = k # k rows are needed, decreasing everytime when it acquite
+
+ fanin_ideal = ideal_fanin(k, m)
+ log.info("Ideal Fan-In value: %d" % fanin_ideal)
+
+ # Each entry represents a row in below parity matrix
+ # Entry is tuple and the value inside is the position of ones
+ # e.g. (0,1,2) in m:=7
+ # row -> [1 1 1 0 0 0 0]
+ codes = []
+
+ ## Find code matrix =======================================================
+ # This is main part to find the parity matrix.
+ # For example, find SECDED for 4bit message is to find 4x4 matrix as below
+ # | 1 0 0 0 x x x x |
+ # | 0 1 0 0 x x x x |
+ # | 0 0 1 0 x x x x |
+ # | 0 0 0 1 x x x x |
+ # Then message _k_ X matrix_code ==> original message with parity
+ #
+ # Make a row to have even number of 1 including the I matrix.
+ # This helps to calculate the syndrom at the decoding stage.
+ # To reduce the max fan-in, Starting with smallest number 3.
+ # the number means the number of one in a row.
+ # Small number of ones means smaller fan-in overall.
+
+ for step in range(3, m + 1, 2):
+ # starting from 3 as I matrix represents data
+ # Increased by 2 as number of 1 should be even in a row (odd excluding I)
+
+ # get the list of combinations [0, .., m-1] with `step`
+ # e.g. step := 3 ==> [(0,1,2), (0,1,3), ... ]
+ candidate = list(itertools.combinations(range(m), step))
+
+ if len(candidate) <= required_row:
+ # we need more round use all of them
+ codes.extend(candidate)
+ required_row -= len(candidate)
+ else:
+ ## Find optimized fan-in ==========================================
+
+ # Calculate each row fan-in with current
+ fanins = calc_fanin(m, codes)
+ while required_row != 0:
+ # Let's shuffle
+ # Shuffling makes the sequence randomized --> it reduces the
+ # fanin as the code takes randomly at the end of the round
+
+ # TODO: There should be a clever way to find the subset without
+ # random retrying.
+ # Suggested this algorithm
+ # https://en.wikipedia.org/wiki/Assignment_problem
+ random.shuffle(candidate)
+
+ # Take a subset
+ subset = candidate[0:required_row]
+
+ subset_fanins = calc_fanin(m, subset)
+ # Check if it exceeds Ideal Fan-In
+ ideal = True
+ for i in range(m):
+ if fanins[i] + subset_fanins[i] > fanin_ideal:
+ # Exceeded. Retry
+ ideal = False
+ break
+
+ if ideal:
+ required_row = 0
+
+ # Append to the code matrix
+ codes.extend(subset)
+
+ if required_row == 0:
+ # Found everything!
+ break
+
+ log.info("Hsiao codes {}".format(codes))
+ return codes
+
+# n = total bits
+# k = data bits
+# m = parity bits
+# generate hamming code
+def hamming_code(n, k, m):
+
+ # construct a list of code tuples.
+ # Tuple corresponds to each bit position and shows which parity bit it participates in
+ # Only the data bits are shown, the parity bits are not.
+ codes = []
+ for pos in range(1, n+1):
+ # this is a valid parity bit position or the final parity bit
+ if (is_pow2(pos) or pos == n):
+ continue
+ else:
+ code = ()
+ for p in range(m):
+
+ # this is the starting parity position
+ parity_pos = 2**p
+
+ # back-track to the closest parity bit multiple and see if it is even or odd
+ # If even, we are in the skip phase, do not include
+ # If odd, we are in the include phase
+ parity_chk = int((pos - (pos % parity_pos)) / parity_pos)
+ log.debug("At position {} parity value {}, {}" \
+ .format(pos, parity_pos, parity_chk))
+
+ # valid for inclusion or final parity bit that includes everything
+ if is_odd(parity_chk) or p == m-1:
+ code = code + (p,)
+ log.info("add {} to tuple {}".format(p, code))
+
+ codes.append(code)
+
+ log.info("Hamming codes {}".format(codes))
+ return codes
+
+
+def print_dec(n, k, m, codes):
+ outstr = ""
+ outstr += " logic single_error;\n"
+ outstr += "\n"
+ outstr += " // Syndrome calculation\n"
+ for i in range(m):
+ # Print combination
+ outstr += print_comb(n, k, m, i, codes, 1, 100,
+ " assign syndrome_o[%d] = in[%d] ^" % (i, k + i),
+ len(" in[%d] ^" % (k + i)) + 2)
+
+ outstr += "\n"
+ outstr += " // Corrected output calculation\n"
+ for i in range(k):
+ synd_v = calc_syndrome(codes[i])
+ outstr += " assign d_o[%d] = (syndrome_o == %d'h%x) ^ in[%d];\n" % (
+ i, m, calc_syndrome(codes[i]), i)
+ outstr += "\n"
+ outstr += " // err_o calc. bit0: single error, bit1: double error\n"
+ outstr += " assign single_error = ^syndrome_o;\n"
+ outstr += " assign err_o[0] = single_error;\n"
+ outstr += " assign err_o[1] = ~single_error & (|syndrome_o);\n"
+ return outstr
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ prog="secded_gen",
+ description='''This tool generates Single Error Correction Double Error
+ Detection(SECDED) encoder and decoder modules in SystemVerilog.
+ ''')
+ parser.add_argument(
+ '-m',
+ type=int,
+ default=7,
+ help=
+ 'parity length. If fan-in is too big, increasing m helps. (default: %(default)s)'
+ )
+ parser.add_argument(
+ '-k',
+ type=int,
+ default=32,
+ help=
+ 'code length. Minimum \'m\' is calculated by the tool (default: %(default)s)'
+ )
+ parser.add_argument(
+ '-c',
+ default='hsiao',
+ help=
+ 'ECC code used. Options: hsiao / hamming (default: %(default)s)'
+ )
+ parser.add_argument(
+ '--outdir',
+ default='../rtl',
+ help=
+ 'output directory. The output file will be named `prim_secded_<n>_<k>_enc/dec.sv` (default: %(default)s)'
+ )
+ parser.add_argument('--verbose', '-v', action='store_true', help='Verbose')
+
+ args = parser.parse_args()
+
+ if (args.verbose):
+ log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
+ else:
+ log.basicConfig(format="%(levelname)s: %(message)s")
+
+ # Error checking
+ if (args.k <= 1 or args.k > 120):
+ log.error("Current tool doesn't support the value k (%d)", args.k)
+ k = args.k
+
+ if (args.m <= 1 or args.m > 20):
+ log.error("Current tool doesn't support the value m (%d)", args.m)
+
+ # Calculate 'm' (parity size)
+ min_m = min_paritysize(k)
+ if (args.m < min_m):
+ log.warning("given \'m\' argument is smaller than minimum requirement " +
+ "using calculated minimum")
+ m = min_m
+ else:
+ m = args.m
+
+ n = m + k
+ log.info("n(%d), k(%d), m(%d)", n, k, m)
+
+ random.seed(time.time())
+
+ # Error check code selection
+ codes = []
+ name = ''
+ if (args.c == 'hsiao'):
+ codes = hsiao_code(k, m)
+ elif (args.c == 'hamming'):
+ name = '_hamming'
+ codes = hamming_code(n, k, m)
+ else:
+ log.error("Invalid code {} selected, use one of {}".format(args.c, CODE_OPTIONS))
+ return
+
+ # Print Encoder
+ enc_out = print_enc(n, k, m, codes)
+ #log.info(enc_out)
+
+ module_name = "prim_secded%s_%d_%d" % (name, n, k)
+
+ with open(args.outdir + "/" + module_name + "_enc.sv", "w") as f:
+ f.write(COPYRIGHT)
+ f.write("// SECDED Encoder generated by secded_gen.py\n\n")
+
+ f.write("module " + module_name + "_enc (\n")
+ f.write(" input [%d:0] in,\n" % (k - 1))
+ f.write(" output logic [%d:0] out\n" % (n - 1))
+ f.write(");\n\n")
+ f.write(enc_out)
+ f.write("endmodule\n\n")
+
+ dec_out = print_dec(n, k, m, codes)
+
+ with open(args.outdir + "/" + module_name + "_dec.sv", "w") as f:
+ f.write(COPYRIGHT)
+ f.write("// SECDED Decoder generated by secded_gen.py\n\n")
+
+ f.write("module " + module_name + "_dec (\n")
+ f.write(" input [%d:0] in,\n" % (n - 1))
+ f.write(" output logic [%d:0] d_o,\n" % (k - 1))
+ f.write(" output logic [%d:0] syndrome_o,\n" % (m - 1))
+ f.write(" output logic [1:0] err_o\n")
+ f.write(");\n\n")
+ f.write(dec_out)
+ f.write("endmodule\n\n")
+
+if __name__ == "__main__":
+ main()
diff --git a/util/design/sparse-fsm-encode.py b/util/design/sparse-fsm-encode.py
new file mode 100755
index 0000000..a534787
--- /dev/null
+++ b/util/design/sparse-fsm-encode.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""This script generates sparse FSM encodings that fulfill a minimum
+Hamming distance requirement. It uses a heuristic that incrementally
+draws random state encodings until a solution has been found.
+
+Depending on the parameterization, the script may not find a solution right
+away. In such cases, the script should be rerun after tweaking the d/m/n
+parameters. E.g. in order to increase the chances for success, the state
+space can be made more sparse by increasing n, or the Hamming distance
+threshold d can be lowered.
+
+Note however that the Hamming distance d should be set to 3 at minimum.
+It is recommended to set this value to 4-5 for security critical FSMs.
+
+The custom seed s can be used to make subsequent runs of the script
+deterministic. If not specified, the script randomly picks a seed.
+
+"""
+import argparse
+import logging
+import math
+import random
+import sys
+
+from lib.common import get_hd, hd_histogram, wrapped_docstring
+
+MAX_DRAWS = 10000
+MAX_RESTARTS = 10000
+
+SV_INSTRUCTIONS = """
+------------------------------------------------------
+| COPY PASTE THE CODE TEMPLATE BELOW INTO YOUR RTL |
+| IMPLEMENTATION, INLUDING THE COMMENT AND PRIM_FLOP |
+| IN ORDER TO EASE AUDITABILITY AND REPRODUCIBILITY. |
+------------------------------------------------------
+"""
+
+C_INSTRUCTIONS = """
+------------------------------------------------
+| COPY PASTE THE CODE TEMPLATE BELOW INTO YOUR |
+| C HEADER, INLUDING THE COMMENT IN ORDER TO |
+| EASE AUDITABILITY AND REPRODUCIBILITY. |
+------------------------------------------------
+"""
+
+RUST_INSTRUCTIONS = """
+------------------------------------------------
+| COPY PASTE THE CODE TEMPLATE BELOW INTO YOUR |
+| RUST FILE, INLUDING THE COMMENT IN ORDER TO |
+| EASE AUDITABILITY AND REPRODUCIBILITY. |
+------------------------------------------------
+"""
+
+
+def main():
+ logging.basicConfig(level=logging.INFO,
+ format="%(asctime)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M")
+
+ parser = argparse.ArgumentParser(
+ prog="sparse-fsm-encode",
+ description=wrapped_docstring(),
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.add_argument(
+ '-d',
+ type=int,
+ default=5,
+ metavar='<minimum HD>',
+ help='Minimum Hamming distance between encoded states.')
+ parser.add_argument('-m',
+ type=int,
+ default=7,
+ metavar='<#states>',
+ help='Number of states to encode.')
+ parser.add_argument('-n',
+ type=int,
+ default=10,
+ metavar='<#nbits>',
+ help='Encoding length [bit].')
+ parser.add_argument('-s',
+ type=int,
+ metavar='<seed>',
+ help='Custom seed for RNG.')
+ parser.add_argument('--language',
+ choices=['sv', 'c', 'rust'],
+ default='sv',
+ help='Choose the language of the generated enum.')
+
+ args = parser.parse_args()
+
+ if args.language in ['c', 'rust']:
+ if args.n not in [8, 16, 32]:
+ logging.error(
+ "When using C or Rust, widths must be a power-of-two "
+ "at least a byte (8 bits) wide. You chose %d." % (args.n, ))
+ sys.exit(1)
+
+ if args.m > 2**args.n:
+ logging.error(
+ 'Statespace 2^%d not large enough to accommodate %d states.' %
+ (args.n, args.m))
+ sys.exit(1)
+
+ if args.d >= args.n:
+ logging.error(
+ 'State is only %d bits wide, which is not enough to fulfill a '
+ 'minimum Hamming distance constraint of %d. ' % (args.n, args.d))
+ sys.exit(1)
+
+ if args.d <= 0:
+ logging.error('Hamming distance must be > 0.')
+ sys.exit(1)
+
+ if args.d < 3:
+ logging.warning(
+ 'A value of 4-5 is recommended for the minimum Hamming distance '
+ 'constraint. At a minimum, this should be set to 3.')
+
+ # If no seed has been provided, we choose a seed and print it
+ # into the generated output later on such that this run can be
+ # reproduced.
+ if args.s is None:
+ random.seed()
+ args.s = random.getrandbits(32)
+
+ random.seed(args.s)
+
+ # This is a heuristic that opportunistically draws random
+ # state encodings and check whether they fulfill the minimum
+ # Hamming distance constraint.
+ # Other solutions that use a brute-force approach would be
+ # possible as well (see e.g. https://math.stackexchange.com/
+ # questions/891528/generating-a-binary-code-with-maximized-hamming-distance).
+ # However, due to the sparse nature of the state space, this
+ # probabilistic heuristic works pretty well for most practical
+ # cases, and it scales favorably to large N.
+ num_draws = 0
+ num_restarts = 0
+ rnd = random.getrandbits(args.n)
+ encodings = [format(rnd, '0' + str(args.n) + 'b')]
+ while len(encodings) < args.m:
+ # if we iterate for too long, start over.
+ if num_draws >= MAX_DRAWS:
+ num_draws = 0
+ num_restarts += 1
+ rnd = random.getrandbits(args.n)
+ encodings = [format(rnd, '0' + str(args.n) + 'b')]
+ # if we restarted for too many times, abort.
+ if num_restarts >= MAX_RESTARTS:
+ logging.error(
+ 'Did not find a solution after restarting {} times. This is '
+ 'an indicator that not many (or even no) solutions exist for '
+ 'the current parameterization. Rerun the script and/or adjust '
+ 'the d/m/n parameters. E.g. make the state space more sparse by '
+ 'increasing n, or lower the minimum Hamming distance threshold d.'
+ .format(num_restarts))
+ sys.exit(1)
+ num_draws += 1
+ # draw a candidate and check whether it fulfills the minimum
+ # distance requirement with respect to other encodings.
+ rnd = random.getrandbits(args.n)
+ cand = format(rnd, '0' + str(args.n) + 'b')
+ # disallow all-zero and all-one states
+ pop_cnt = cand.count('1')
+ if pop_cnt < args.n and pop_cnt > 0:
+ for k in encodings:
+ # disallow candidates that are the complement of other states
+ if int(cand, 2) == ~int(k, 2):
+ break
+ # disallow candidates that are too close to other states
+ if get_hd(cand, k) < args.d:
+ break
+ else:
+ encodings.append(cand)
+
+ # Get Hamming distance statistics.
+ stats = hd_histogram(encodings)
+
+ if args.language == "sv":
+ print(SV_INSTRUCTIONS)
+ print("// Encoding generated with:\n"
+ "// $ ./util/design/sparse-fsm-encode.py -d {} -m {} -n {} \\\n"
+ "// -s {} --language=sv\n"
+ "//\n"
+ "// Hamming distance histogram:\n"
+ "//".format(args.d, args.m, args.n, args.s))
+ for bar in stats['bars']:
+ print('// ' + bar)
+ print("//\n"
+ "// Minimum Hamming distance: {}\n"
+ "// Maximum Hamming distance: {}\n"
+ "// Minimum Hamming weight: {}\n"
+ "// Maximum Hamming weight: {}\n"
+ "//\n"
+ "localparam int StateWidth = {};\n"
+ "typedef enum logic [StateWidth-1:0] {{".format(
+ stats['min_hd'], stats['max_hd'], stats['min_hw'],
+ stats['max_hw'], args.n))
+ fmt_str = " State{} {}= {}'b{}"
+ state_str = ""
+ for j, k in enumerate(encodings):
+ pad = ""
+ for i in range(len(str(args.m)) - len(str(j))):
+ pad += " "
+ comma = "," if j < len(encodings) - 1 else ""
+ print(fmt_str.format(j, pad, args.n, k) + comma)
+ state_str += " State{}: ;\n".format(j)
+
+ # print FSM template
+ print('''}} state_e;
+
+state_e state_d, state_q;
+
+always_comb begin : p_fsm
+ // Default assignments
+ state_d = state_q;
+
+ unique case (state_q)
+{} default: ; // Consider triggering an error or alert in this case.
+ endcase
+end
+
+// This primitive is used to place a size-only constraint on the
+// flops in order to prevent FSM state encoding optimizations.
+logic [StateWidth-1:0] state_raw_q;
+assign state_q = state_e'(state_raw_q);
+prim_flop #(
+ .Width(StateWidth),
+ .ResetValue(StateWidth'(State0))
+) u_state_regs (
+ .clk_i,
+ .rst_ni,
+ .d_i ( state_d ),
+ .q_o ( state_raw_q )
+);
+'''.format(state_str))
+
+ elif args.language == "c":
+ print(C_INSTRUCTIONS)
+ print("/*\n"
+ " * Encoding generated with\n"
+ " * $ ./util/design/sparse-fsm-encode.py -d {} -m {} -n {} \\\n"
+ " * -s {} --language=c\n"
+ " *\n"
+ " * Hamming distance histogram:\n"
+ " *".format(args.d, args.m, args.n, args.s))
+ for hist_bar in stats['bars']:
+ print(" * " + hist_bar)
+ print(" *\n"
+ " * Minimum Hamming distance: {}\n"
+ " * Maximum Hamming distance: {}\n"
+ " * Minimum Hamming weight: {}\n"
+ " * Maximum Hamming weight: {}\n"
+ " */\n"
+ "typedef enum my_state {{".format(stats['min_hd'],
+ stats['max_hd'],
+ stats['min_hw'],
+ stats['max_hw']))
+ fmt_str = " kMyState{0:} {1:}= 0x{3:0" + str(math.ceil(
+ args.n / 4)) + "x}"
+ for j, k in enumerate(encodings):
+ pad = ""
+ for i in range(len(str(args.m)) - len(str(j))):
+ pad += " "
+ print(fmt_str.format(j, pad, args.n, int(k, 2)) + ",")
+
+ # print FSM template
+ print("} my_state_t;")
+ elif args.language == 'rust':
+ print(RUST_INSTRUCTIONS)
+ print("///```text\n"
+ "/// Encoding generated with\n"
+ "/// $ ./util/design/sparse-fsm-encode.py -d {} -m {} -n {} \\\n"
+ "/// -s {} --language=rust\n"
+ "///\n"
+ "/// Hamming distance histogram:\n"
+ "///".format(args.d, args.m, args.n, args.s))
+ for hist_bar in stats['bars']:
+ print("/// " + hist_bar)
+ print("///\n"
+ "/// Minimum Hamming distance: {}\n"
+ "/// Maximum Hamming distance: {}\n"
+ "/// Minimum Hamming weight: {}\n"
+ "/// Maximum Hamming weight: {}\n"
+ "///```\n"
+ "#[derive(Clone,Copy,Eq,PartialEq,Ord,ParitalOrd,Hash,Debug)]\n"
+ "#[repr(transparent)]\n"
+ "struct MyState(u{});\n"
+ "\n"
+ "impl MyState {{".format(stats['min_hd'], stats['max_hd'],
+ stats['min_hw'], stats['max_hw'],
+ args.n))
+ fmt_str = " const MY_STATE{0:}: MyState {1:}= MyState(0x{3:0" + str(
+ math.ceil(args.n / 4)) + "x})"
+ for j, k in enumerate(encodings):
+ pad = ""
+ for i in range(len(str(args.m)) - len(str(j))):
+ pad += " "
+ print(fmt_str.format(j, pad, args.n, int(k, 2)) + ";")
+ print("}")
+
+
+if __name__ == "__main__":
+ main()