[otp_ctrl/lc_ctrl] Refactor util scripts and share common code

Signed-off-by: Michael Schaffner <msf@opentitan.org>
diff --git a/hw/ip/otp_ctrl/util/LcStEnc.py b/hw/ip/otp_ctrl/util/LcStEnc.py
new file mode 100644
index 0000000..16256d1
--- /dev/null
+++ b/hw/ip/otp_ctrl/util/LcStEnc.py
@@ -0,0 +1,342 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Contains life cycle state encoding class which is
+used to generate new life cycle encodings.
+"""
+import logging as log
+import random
+
+from common import check_int
+
+
+def _is_valid_codeword(config, codeword):
+    '''Checks whether the bitstring is a valid ECC codeword.'''
+
+    data_width = config['secded']['data_width']
+    ecc_width = config['secded']['ecc_width']
+    if len(codeword) != (data_width + ecc_width):
+        log.error("Invalid codeword length {}".format(len(codeword)))
+        exit(1)
+
+    # Build syndrome and check whether it is zero.
+    syndrome = [0 for k in range(ecc_width)]
+
+    # The bitstring must be formatted as "data bits[N-1:0]" + "ecc bits[M-1:0]".
+    for j, fanin in enumerate(config['secded']['ecc_matrix']):
+        syndrome[j] = int(codeword[ecc_width - 1 - j])
+        for k in fanin:
+            syndrome[j] ^= int(codeword[ecc_width + data_width - 1 - k])
+
+    return sum(syndrome) == 0
+
+
+def _ecc_encode(config, dataword):
+    '''Calculate and prepend ECC bits.'''
+    if len(dataword) != config['secded']['data_width']:
+        log.error("Invalid codeword length {}".format(len(dataword)))
+        exit(1)
+
+    # Build syndrome
+    eccbits = ""
+    for fanin in config['secded']['ecc_matrix']:
+        bit = 0
+        for k in fanin:
+            bit ^= int(dataword[config['secded']['data_width'] - 1 - k])
+        eccbits += format(bit, '01b')
+
+    return eccbits[::-1] + dataword
+
+
+def _is_incremental_codeword(word1, word2):
+    '''Test whether word2 is incremental wrt word1.'''
+    if len(word1) != len(word2):
+        log.error('Words are not of equal size')
+        exit(1)
+
+    _word1 = int(word1, 2)
+    _word2 = int(word2, 2)
+
+    # This basically checks that the second word does not
+    # clear any bits that are set to 1 in the first word.
+    return ((_word1 & _word2) == _word1)
+
+
+def _scatter_bits(mask, bits):
+    '''Scatter the bits into unset positions of mask.'''
+    j = 0
+    scatterword = ''
+    for b in mask:
+        if b == '1':
+            scatterword += '1'
+        else:
+            scatterword += bits[j]
+            j += 1
+
+    return scatterword
+
+
+def _get_hd(word1, word2):
+    '''Calculate Hamming distance between two words.'''
+    if len(word1) != len(word2):
+        log.error('Words are not of equal size')
+        exit(1)
+    return bin(int(word1, 2) ^ int(word2, 2)).count('1')
+
+
+def _get_incremental_codewords(config, base_ecc, existing_words):
+    '''Get all possible incremental codewords fulfilling the constraints.'''
+
+    base_data = base_ecc[config['secded']['ecc_width']:]
+
+    # We only need to spin through data bits that have not been set yet.
+    # Hence, we first count how many bits are zero (and hence still
+    # modifyable). Then, we enumerate all possible combinations and scatter
+    # the bits of the enumerated values into the correct bit positions using
+    # the _scatter_bits() function.
+    incr_cands = []
+    free_bits = base_data.count('0')
+    for k in range(1, 2**free_bits):
+        # Get incremental dataword by scattering the enumeration bits
+        # into the zero bit positions in base_data.
+        incr_cand = _scatter_bits(base_data,
+                                  format(k, '0' + str(free_bits) + 'b'))
+        incr_cand_ecc = _ecc_encode(config, incr_cand)
+
+        # Dataword is correct by construction, but we need to check whether
+        # the ECC bits are incremental.
+        if _is_incremental_codeword(base_ecc, incr_cand_ecc):
+            # Check whether the candidate fulfills the maximum
+            # Hamming weight constraint.
+            if incr_cand_ecc.count('1') <= config['max_hw']:
+                # Check Hamming distance wrt all existing words.
+                for w in existing_words + [base_ecc]:
+                    if _get_hd(incr_cand_ecc, w) < config['min_hd']:
+                        break
+                else:
+                    incr_cands.append(incr_cand_ecc)
+
+    return incr_cands
+
+
+def _get_new_state_word_pair(config, existing_words):
+    '''Randomly generate a new incrementally writable word pair'''
+    while 1:
+        # Draw a random number and check whether it is unique and whether
+        # the Hamming weight is in range.
+        width = config['secded']['data_width']
+        ecc_width = config['secded']['ecc_width']
+        base = random.getrandbits(width)
+        base = format(base, '0' + str(width) + 'b')
+        base_cand_ecc = _ecc_encode(config, base)
+        # disallow all-zero and all-one states
+        pop_cnt = base_cand_ecc.count('1')
+        if pop_cnt >= config['min_hw'] and pop_cnt <= config['max_hw']:
+
+            # Check Hamming distance wrt all existing words
+            for w in existing_words:
+                if _get_hd(base_cand_ecc, w) < config['min_hd']:
+                    break
+            else:
+                # Get encoded incremental candidates.
+                incr_cands_ecc = _get_incremental_codewords(
+                    config, base_cand_ecc, existing_words)
+                # there are valid candidates, draw one at random.
+                # otherwise we just start over.
+                if incr_cands_ecc:
+                    incr_cand_ecc = random.choice(incr_cands_ecc)
+                    log.info('word {}: {}|{} -> {}|{}'.format(
+                        int(len(existing_words) / 2),
+                        base_cand_ecc[ecc_width:], base_cand_ecc[0:ecc_width],
+                        incr_cand_ecc[ecc_width:], incr_cand_ecc[0:ecc_width]))
+                    existing_words.append(base_cand_ecc)
+                    existing_words.append(incr_cand_ecc)
+                    return (base_cand_ecc, incr_cand_ecc)
+
+
+def _validate_words(config, words):
+    '''Validate generated words (base and incremental).'''
+    for k, w in enumerate(words):
+        # Check whether word is valid wrt to ECC polynomial.
+        if not _is_valid_codeword(config, w):
+            log.error('Codeword {} at index {} is not valid'.format(w, k))
+            exit(1)
+        # Check that word fulfills the Hamming weight constraints.
+        pop_cnt = w.count('1')
+        if pop_cnt < config['min_hw'] or pop_cnt > config['max_hw']:
+            log.error(
+                'Codeword {} at index {} has wrong Hamming weight'.format(
+                    w, k))
+            exit(1)
+        # Check Hamming distance wrt to all other existing words.
+        # If the constraint is larger than 0 this implies uniqueness.
+        if k < len(words) - 1:
+            for k2, w2 in enumerate(words[k + 1:]):
+                if _get_hd(w, w2) < config['min_hd']:
+                    log.error(
+                        'Hamming distance between codeword {} at index {} '
+                        'and codeword {} at index {} is too low.'.format(
+                            w, k, w2, k + 1 + k2))
+                    exit(1)
+
+
+def _hist_to_bars(hist, m):
+    '''Convert histogramm list into ASCII bar plot'''
+    bars = []
+    for i, j in enumerate(hist):
+        bar_prefix = "{:2}: ".format(i)
+        spaces = len(str(m)) - len(bar_prefix)
+        hist_bar = bar_prefix + (" " * spaces)
+        for k in range(j * 20 // max(hist)):
+            hist_bar += "|"
+        hist_bar += " ({:.2f}%)".format(100.0 * j / sum(hist)) if j else "--"
+        bars += [hist_bar]
+    return bars
+
+
+def hd_histogram(existing_words):
+    '''Build Hamming distance histogram'''
+    minimum_hd = len(existing_words[0])
+    maximum_hd = 0
+    minimum_hw = len(existing_words[0])
+    maximum_hw = 0
+    hist = [0] * (len(existing_words[0]) + 1)
+    for i, j in enumerate(existing_words):
+        minimum_hw = min(j.count('1'), minimum_hw)
+        maximum_hw = max(j.count('1'), maximum_hw)
+        if i < len(existing_words) - 1:
+            for k in existing_words[i + 1:]:
+                dist = _get_hd(j, k)
+                hist[dist] += 1
+                minimum_hd = min(dist, minimum_hd)
+                maximum_hd = max(dist, maximum_hd)
+
+    stats = {}
+    stats["hist"] = hist
+    stats["bars"] = _hist_to_bars(hist, len(existing_words))
+    stats["min_hd"] = minimum_hd
+    stats["max_hd"] = maximum_hd
+    stats["min_hw"] = minimum_hw
+    stats["max_hw"] = maximum_hw
+    return stats
+
+
+class LcStEnc():
+    '''Life cycle state encoding generator class
+
+    The constructor expects the parsed configuration
+    hjson to be passed in.
+    '''
+
+    # This holds the config dict.
+    config = {}
+    # Holds generated life cycle words.
+    gen = {
+        'ab_words': [],
+        'cd_words': [],
+        'ef_words': [],
+        'stats': [],
+    }
+
+    def __init__(self, config):
+        '''The constructor validates the configuration dict.'''
+
+        log.info('')
+        log.info('Generate life cycle state')
+        log.info('')
+
+        if 'seed' not in config:
+            log.error('Missing seed in configuration')
+            exit(1)
+
+        if 'secded' not in config:
+            log.error('Missing secded configuration')
+            exit(1)
+
+        config['secded'].setdefault('data_width', 0)
+        config['secded'].setdefault('ecc_width', 0)
+        config['secded'].setdefault('ecc_matrix', [[]])
+        config.setdefault('num_ab_words', 0)
+        config.setdefault('num_cd_words', 0)
+        config.setdefault('num_ef_words', 0)
+        config.setdefault('min_hw', 0)
+        config.setdefault('max_hw', 0)
+        config.setdefault('min_hd', 0)
+
+        config['seed'] = check_int(config['seed'])
+
+        log.info('Seed: {0:x}'.format(config['seed']))
+        log.info('')
+
+        config['secded']['data_width'] = check_int(
+            config['secded']['data_width'])
+        config['secded']['ecc_width'] = check_int(
+            config['secded']['ecc_width'])
+        config['num_ab_words'] = check_int(config['num_ab_words'])
+        config['num_cd_words'] = check_int(config['num_cd_words'])
+        config['num_ef_words'] = check_int(config['num_ef_words'])
+        config['min_hw'] = check_int(config['min_hw'])
+        config['max_hw'] = check_int(config['max_hw'])
+        config['min_hd'] = check_int(config['min_hd'])
+
+        total_width = config['secded']['data_width'] + config['secded'][
+            'ecc_width']
+
+        if config['min_hw'] >= total_width or \
+           config['max_hw'] > total_width or \
+           config['min_hw'] >= config['max_hw']:
+            log.error('Hamming weight constraints are inconsistent.')
+            exit(1)
+
+        if config['max_hw'] - config['min_hw'] + 1 < config['min_hd']:
+            log.error('Hamming distance constraint is inconsistent.')
+            exit(1)
+
+        if config['secded']['ecc_width'] != len(
+                config['secded']['ecc_matrix']):
+            log.error('ECC matrix does not have correct number of rows')
+            exit(1)
+
+        log.info('SECDED Matrix:')
+        for i, l in enumerate(config['secded']['ecc_matrix']):
+            log.info('ECC Bit {} Fanin: {}'.format(i, l))
+            for j, e in enumerate(l):
+                e = check_int(e)
+                config['secded']['ecc_matrix'][i][j] = e
+
+        log.info('')
+
+        self.config = config
+
+        # Re-initialize with seed to make results reproducible.
+        random.seed(int(self.config['seed']))
+
+        # Generate new encoding words
+        word_types = ['ab_words', 'cd_words', 'ef_words']
+        existing_words = []
+        for w in word_types:
+            while len(self.gen[w]) < self.config['num_' + w]:
+                new_word = _get_new_state_word_pair(self.config,
+                                                    existing_words)
+                self.gen[w].append(new_word)
+
+        # Validate words (this must not fail at this point).
+        _validate_words(self.config, existing_words)
+
+        # Print out HD histogram
+        self.gen['stats'] = hd_histogram(existing_words)
+
+        log.info('')
+        log.info('Hamming distance histogram:')
+        log.info('')
+        for bar in self.gen['stats']["bars"]:
+            log.info(bar)
+        log.info('')
+        log.info('Minimum HD: {}'.format(self.gen['stats']['min_hd']))
+        log.info('Maximum HD: {}'.format(self.gen['stats']['max_hd']))
+        log.info('Minimum HW: {}'.format(self.gen['stats']['min_hw']))
+        log.info('Maximum HW: {}'.format(self.gen['stats']['max_hw']))
+
+        log.info('')
+        log.info('Successfully generated life cycle state.')
+        log.info('')
diff --git a/hw/ip/otp_ctrl/util/OtpMemMap.py b/hw/ip/otp_ctrl/util/OtpMemMap.py
new file mode 100644
index 0000000..d843437
--- /dev/null
+++ b/hw/ip/otp_ctrl/util/OtpMemMap.py
@@ -0,0 +1,288 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""OTP memory map class, used to create the associated RTL and
+documentation, and to create OTP memory images for preloading.
+"""
+
+import logging as log
+import random
+from math import ceil, log2
+
+from tabulate import tabulate
+
+from common import check_bool, check_int
+
+DIGEST_SUFFIX = "_DIGEST"
+DIGEST_SIZE = 8
+
+
+class OtpMemMap():
+
+    # This holds the config dict.
+    config = {}
+
+    def __init__(self, config):
+
+        log.info('')
+        log.info('Parse and translate OTP memory map.')
+        log.info('')
+
+        if "seed" not in config:
+            log.error("Missing seed in configuration.")
+            exit(1)
+
+        config["seed"] = check_int(config["seed"])
+
+        # Initialize RNG.
+        random.seed(int(config['seed']))
+
+        offset = 0
+        num_part = 0
+        for part in config["partitions"]:
+            num_part += 1
+            # Defaults
+            part.setdefault("offset", offset)
+            part.setdefault("name", "unknown_name")
+            part.setdefault("variant", "Unbuffered")
+            part.setdefault("size", "0")
+            part.setdefault("secret", "false")
+            part.setdefault("sw_digest", "false")
+            part.setdefault("hw_digest", "false")
+            part.setdefault("write_lock", "none")
+            part.setdefault("read_lock", "none")
+            part.setdefault("key_sel", "NoKey")
+            log.info("Partition {} at offset {} with size {}".format(
+                part["name"], part["offset"], part["size"]))
+
+            # make sure these are boolean types (simplifies the mako templates)
+            part["secret"] = check_bool(part["secret"])
+            part["sw_digest"] = check_bool(part["sw_digest"])
+            part["hw_digest"] = check_bool(part["hw_digest"])
+            part["bkout_type"] = check_bool(part["bkout_type"])
+
+            # basic checks
+            if part["variant"] not in ["Unbuffered", "Buffered", "LifeCycle"]:
+                log.error("Invalid partition type {}".format(part["variant"]))
+                exit(1)
+
+            if part["key_sel"] not in [
+                    "NoKey", "Secret0Key", "Secret1Key", "Secret2Key"
+            ]:
+                log.error("Invalid key sel {}".format(part["key_sel"]))
+                exit(1)
+
+            if check_bool(part["secret"]) and part["key_sel"] == "NoKey":
+                log.error(
+                    "A secret partition needs a key select value other than NoKey"
+                )
+                exit(1)
+
+            if part["write_lock"].lower() not in ["digest", "csr", "none"]:
+                log.error("Invalid value for write_lock")
+                exit(1)
+
+            if part["read_lock"].lower() not in ["digest", "csr", "none"]:
+                log.error("Invalid value for read_lock")
+                exit(1)
+
+            if part["sw_digest"] and part["hw_digest"]:
+                log.error(
+                    "Partition cannot support both a SW and a HW digest at the same time."
+                )
+                exit(1)
+
+            if part["variant"] == "Unbuffered" and not part["sw_digest"]:
+                log.error(
+                    "Unbuffered partitions without digest are not supported at the moment."
+                )
+                exit(1)
+
+            if not part["sw_digest"] and not part["hw_digest"]:
+                if part["write_lock"].lower(
+                ) == "digest" or part["read_lock"].lower() == "digest":
+                    log.error(
+                        "A partition can only be write/read lockable if it has a hw or sw digest."
+                    )
+                    exit(1)
+
+            if check_int(part["offset"]) % 8:
+                log.error("Partition offset must be 64bit aligned")
+                exit(1)
+
+            if check_int(part["size"]) % 8:
+                log.error("Partition size must be 64bit aligned")
+                exit(1)
+
+            # Loop over items within a partition
+            for item in part["items"]:
+                item.setdefault("name", "unknown_name")
+                item.setdefault("size", "0")
+                item.setdefault("isdigest", "false")
+                item.setdefault("offset", offset)
+                # Generate random constant to be used when partition has
+                # not been initialized yet or when it is in error state.
+                if check_bool(item.setdefault("rand_inv_default", "false")):
+                    inv_default = random.getrandbits(
+                        check_int(item["size"]) * 8)
+                else:
+                    inv_default = 0
+                item.setdefault(
+                    "inv_default", "{}'h{:0X}".format(
+                        check_int(item["size"]) * 8, inv_default))
+                log.info("> Item {} at offset {} with size {}".format(
+                    item["name"], offset, item["size"]))
+                offset += check_int(item["size"])
+
+            # Place digest at the end of a partition.
+            if part["sw_digest"] or part["hw_digest"]:
+                part["items"].append({
+                    "name":
+                    part["name"] + DIGEST_SUFFIX,
+                    "size":
+                    DIGEST_SIZE,
+                    "offset":
+                    check_int(part["offset"]) + check_int(part["size"]) -
+                    DIGEST_SIZE,
+                    "isdigest":
+                    "True",
+                    "inv_default":
+                    "{256{1'b1}}"
+                })
+
+                log.info("> Adding digest {} at offset {} with size {}".format(
+                    part["name"] + DIGEST_SUFFIX, offset, DIGEST_SIZE))
+                offset += DIGEST_SIZE
+
+            if len(part["items"]) == 0:
+                log.warning("Partition does not contain any items.")
+
+            # check offsets and size
+            if offset > check_int(part["offset"]) + check_int(part["size"]):
+                log.error("Not enough space in partitition "
+                          "{} to accommodate all items. Bytes available "
+                          "= {}, bytes requested = {}".format(
+                              part["name"], part["size"],
+                              offset - part["offset"]))
+                exit(1)
+
+            offset = check_int(part["offset"]) + check_int(part["size"])
+
+        otp_size = check_int(config["otp"]["depth"]) * check_int(
+            config["otp"]["width"])
+        config["otp"]["size"] = otp_size
+        config["otp"]["addr_width"] = ceil(
+            log2(check_int(config["otp"]["depth"])))
+        config["otp"]["byte_addr_width"] = ceil(log2(check_int(otp_size)))
+
+        if offset > otp_size:
+            log.error(
+                "OTP is not big enough to store all partitions. "
+                "Bytes available {}, bytes required {}",
+                otp_size, offset)
+            exit(1)
+
+        log.info("Total number of partitions: {}".format(num_part))
+        log.info("Bytes available in OTP: {}".format(otp_size))
+        log.info("Bytes required for partitions: {}".format(offset))
+
+        self.config = config
+
+        log.info('')
+        log.info('Successfully parsed and translated OTP memory map.')
+        log.info('')
+
+
+    def create_partitions_table(self):
+        header = [
+            "Partition", "Secret", "Buffered", "WR Lockable", "RD Lockable",
+            "Description"
+        ]
+        table = [header]
+        colalign = ("center", ) * len(header)
+
+        for part in self.config["partitions"]:
+            is_secret = "yes" if check_bool(part["secret"]) else "no"
+            is_buffered = "yes" if part["variant"] in [
+                "Buffered", "LifeCycle"
+            ] else "no"
+            wr_lockable = "no"
+            if part["write_lock"].lower() in ["csr", "digest"]:
+                wr_lockable = "yes (" + part["write_lock"] + ")"
+            rd_lockable = "no"
+            if part["read_lock"].lower() in ["csr", "digest"]:
+                rd_lockable = "yes (" + part["read_lock"] + ")"
+            # remove newlines
+            desc = ' '.join(part["desc"].split())
+            row = [
+                part["name"], is_secret, is_buffered, wr_lockable, rd_lockable,
+                desc
+            ]
+            table.append(row)
+
+        return tabulate(table,
+                        headers="firstrow",
+                        tablefmt="pipe",
+                        colalign=colalign)
+
+    def create_mmap_table(self):
+        header = [
+            "Index", "Partition", "Size [B]", "Access Granule", "Item",
+            "Byte Address", "Size [B]"
+        ]
+        table = [header]
+        colalign = ("center", ) * len(header)
+
+        for k, part in enumerate(self.config["partitions"]):
+            for j, item in enumerate(part["items"]):
+                granule = "64bit" if check_bool(part["secret"]) else "32bit"
+
+                if check_bool(item["isdigest"]):
+                    granule = "64bit"
+                    name = "[{}](#Reg_{}_0)".format(item["name"],
+                                                    item["name"].lower())
+                else:
+                    name = item["name"]
+
+                if j == 0:
+                    row = [str(k), part["name"], str(part["size"]), granule]
+                else:
+                    row = ["", "", "", granule]
+
+                row.extend([
+                    name, "0x{:03X}".format(check_int(item["offset"])),
+                    str(item["size"])
+                ])
+
+                table.append(row)
+
+        return tabulate(table,
+                        headers="firstrow",
+                        tablefmt="pipe",
+                        colalign=colalign)
+
+    def create_digests_table(self):
+        header = ["Digest Name", " Affected Partition", "Calculated by HW"]
+        table = [header]
+        colalign = ("center", ) * len(header)
+
+        for part in self.config["partitions"]:
+            if check_bool(part["hw_digest"]) or check_bool(part["sw_digest"]):
+                is_hw_digest = "yes" if check_bool(part["hw_digest"]) else "no"
+                for item in part["items"]:
+                    if check_bool(item["isdigest"]):
+                        name = "[{}](#Reg_{}_0)".format(
+                            item["name"], item["name"].lower())
+                        row = [name, part["name"], is_hw_digest]
+                        table.append(row)
+                        break
+                else:
+                    log.error(
+                        "Partition with digest does not contain a digest item")
+                    exit(1)
+
+        return tabulate(table,
+                        headers="firstrow",
+                        tablefmt="pipe",
+                        colalign=colalign)
diff --git a/hw/ip/otp_ctrl/util/common.py b/hw/ip/otp_ctrl/util/common.py
new file mode 100644
index 0000000..6e482c3
--- /dev/null
+++ b/hw/ip/otp_ctrl/util/common.py
@@ -0,0 +1,53 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Shared subfunctions.
+"""
+import logging as log
+import textwrap
+
+
+def wrapped_docstring():
+    '''Return a text-wrapped version of the module docstring'''
+    paras = []
+    para = []
+    for line in __doc__.strip().split('\n'):
+        line = line.strip()
+        if not line:
+            if para:
+                paras.append('\n'.join(para))
+                para = []
+        else:
+            para.append(line)
+    if para:
+        paras.append('\n'.join(para))
+
+    return '\n\n'.join(textwrap.fill(p) for p in paras)
+
+
+def check_bool(x):
+    """check_bool checks if input 'x' either a bool or
+       one of the following strings: ["true", "false"]
+
+        It returns value as Bool type.
+    """
+    if isinstance(x, bool):
+        return x
+    if not x.lower() in ["true", "false"]:
+        log.error("{} is not a boolean value.".format(x))
+        exit(1)
+    else:
+        return (x.lower() == "true")
+
+
+def check_int(x):
+    """check_int checks if input 'x' is decimal integer.
+
+        It returns value as an int type.
+    """
+    if isinstance(x, int):
+        return x
+    if not x.isdecimal():
+        log.error("{} is not a decimal number".format(x))
+        exit(1)
+    return int(x)
diff --git a/hw/ip/otp_ctrl/util/gen-lc-state-enc.py b/hw/ip/otp_ctrl/util/gen-lc-state-enc.py
new file mode 100755
index 0000000..d2284e3
--- /dev/null
+++ b/hw/ip/otp_ctrl/util/gen-lc-state-enc.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Given an ECC encoding matrix, this script generates random life cycle
+state encodings that can be incrementally written to a memory protected with
+the ECC code specified.
+"""
+import argparse
+import logging as log
+import random
+from pathlib import Path
+
+import hjson
+from mako.template import Template
+
+from LcStEnc import LcStEnc
+from common import wrapped_docstring
+
+# State encoding definition
+LC_STATE_DEFINITION_FILE = "../../lc_ctrl/data/lc_ctrl_state.hjson"
+# Code templates to render
+TEMPLATES = ["../../lc_ctrl/rtl/lc_ctrl_state_pkg.sv.tpl"]
+
+
+def main():
+    log.basicConfig(level=log.INFO,
+                    format="%(asctime)s - %(message)s",
+                    datefmt="%Y-%m-%d %H:%M")
+
+    parser = argparse.ArgumentParser(
+        prog="gen-lc-state-enc",
+        description=wrapped_docstring(),
+        formatter_class=argparse.RawDescriptionHelpFormatter)
+
+    parser.add_argument('-s',
+                        '--seed',
+                        type=int,
+                        metavar='<seed>',
+                        help='Custom seed for RNG.')
+
+    args = parser.parse_args()
+
+    with open(LC_STATE_DEFINITION_FILE, 'r') as infile:
+        config = hjson.load(infile)
+
+        # If specified, override the seed for random netlist constant computation.
+        if args.seed:
+            log.warning('Commandline override of seed with {}.'.format(
+                args.seed))
+            config['seed'] = args.seed
+        # Otherwise, we either take it from the .hjson if present, or
+        # randomly generate a new seed if not.
+        else:
+            random.seed()
+            new_seed = random.getrandbits(64)
+            if config.setdefault('seed', new_seed) == new_seed:
+                log.warning(
+                    'No seed specified, setting to {}.'.format(new_seed))
+
+        # validate config and generate encoding
+        lc_st_enc = LcStEnc(config)
+
+        # render all templates
+        for template in TEMPLATES:
+            with open(template, 'r') as tplfile:
+                tpl = Template(tplfile.read())
+                with open(
+                        Path(template).parent.joinpath(Path(template).stem),
+                        'w') as outfile:
+                    outfile.write(tpl.render(lc_st_enc=lc_st_enc))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/hw/ip/otp_ctrl/util/gen-otp-mmap.py b/hw/ip/otp_ctrl/util/gen-otp-mmap.py
new file mode 100755
index 0000000..333fb57
--- /dev/null
+++ b/hw/ip/otp_ctrl/util/gen-otp-mmap.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+r"""Generate RTL and documentation collateral from OTP memory
+map definition file (hjson).
+"""
+import argparse
+import logging as log
+import random
+from pathlib import Path
+
+import hjson
+from mako.template import Template
+
+from common import wrapped_docstring
+# Import OTP memory map generator.
+from OtpMemMap import OtpMemMap
+
+TABLE_HEADER_COMMENT = '''<!--
+DO NOT EDIT THIS FILE DIRECTLY.
+It has been generated with hw/ip/otp_ctrl/util/gen-otp-mmap.py
+-->
+
+'''
+
+# memory map source
+MMAP_DEFINITION_FILE = "../data/otp_ctrl_mmap.hjson"
+# documentation tables to generate
+PARTITIONS_TABLE_FILE = "../doc/otp_ctrl_partitions.md"
+DIGESTS_TABLE_FILE = "../doc/otp_ctrl_digests.md"
+MMAP_TABLE_FILE = "../doc/otp_ctrl_mmap.md"
+# code templates to render
+TEMPLATES = ["../data/otp_ctrl.hjson.tpl", "../rtl/otp_ctrl_part_pkg.sv.tpl"]
+
+
+def main():
+    log.basicConfig(level=log.INFO,
+                    format="%(asctime)s - %(message)s",
+                    datefmt="%Y-%m-%d %H:%M")
+
+    parser = argparse.ArgumentParser(
+        prog="gen-otp-mmap",
+        description=wrapped_docstring(),
+        formatter_class=argparse.RawDescriptionHelpFormatter)
+
+    # Generator options for compile time random netlist constants
+    parser.add_argument('--seed',
+                        type=int,
+                        metavar='<seed>',
+                        help='Custom seed for RNG to compute default values.')
+
+    args = parser.parse_args()
+
+    with open(MMAP_DEFINITION_FILE, 'r') as infile:
+        config = hjson.load(infile)
+
+        # If specified, override the seed for random netlist constant computation.
+        if args.seed:
+            log.warning('Commandline override of seed with {}.'.format(
+                args.seed))
+            config['seed'] = args.seed
+        # Otherwise, we either take it from the .hjson if present, or
+        # randomly generate a new seed if not.
+        else:
+            random.seed()
+            new_seed = random.getrandbits(64)
+            if config.setdefault('seed', new_seed) == new_seed:
+                log.warning(
+                    'No seed specified, setting to {}.'.format(new_seed))
+
+        otp_mmap = OtpMemMap(config)
+
+        with open(PARTITIONS_TABLE_FILE, 'w') as outfile:
+            outfile.write(TABLE_HEADER_COMMENT +
+                          otp_mmap.create_partitions_table())
+
+        with open(DIGESTS_TABLE_FILE, 'w') as outfile:
+            outfile.write(TABLE_HEADER_COMMENT +
+                          otp_mmap.create_digests_table())
+
+        with open(MMAP_TABLE_FILE, 'w') as outfile:
+            outfile.write(TABLE_HEADER_COMMENT + otp_mmap.create_mmap_table())
+
+        # render all templates
+        for template in TEMPLATES:
+            with open(template, 'r') as tplfile:
+                tpl = Template(tplfile.read())
+                with open(
+                        Path(template).parent.joinpath(Path(template).stem),
+                        'w') as outfile:
+                    outfile.write(tpl.render(otp_mmap=otp_mmap))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/hw/ip/otp_ctrl/util/translate-mmap.py b/hw/ip/otp_ctrl/util/translate-mmap.py
deleted file mode 100755
index c5f7d13..0000000
--- a/hw/ip/otp_ctrl/util/translate-mmap.py
+++ /dev/null
@@ -1,384 +0,0 @@
-#!/usr/bin/env python3
-# Copyright lowRISC contributors.
-# Licensed under the Apache License, Version 2.0, see LICENSE for details.
-# SPDX-License-Identifier: Apache-2.0
-r"""Convert mako template to Hjson register description
-"""
-import argparse
-import logging as log
-import textwrap
-import random
-from math import ceil, log2
-from pathlib import Path
-
-import hjson
-from mako.template import Template
-from tabulate import tabulate
-
-DIGEST_SUFFIX = "_DIGEST"
-DIGEST_SIZE = 8
-
-TABLE_HEADER_COMMENT = '''<!--
-DO NOT EDIT THIS FILE DIRECTLY.
-It has been generated with hw/ip/otp_ctrl/util/translate-mmap.py
--->
-
-'''
-
-# memory map source
-MMAP_DEFINITION_FILE = "../data/otp_ctrl_mmap.hjson"
-# documentation tables to generate
-PARTITIONS_TABLE_FILE = "../doc/otp_ctrl_partitions.md"
-DIGESTS_TABLE_FILE = "../doc/otp_ctrl_digests.md"
-MMAP_TABLE_FILE = "../doc/otp_ctrl_mmap.md"
-# code templates to render
-TEMPLATES = ["../data/otp_ctrl.hjson.tpl", "../rtl/otp_ctrl_part_pkg.sv.tpl"]
-
-
-def wrapped_docstring():
-    '''Return a text-wrapped version of the module docstring'''
-    paras = []
-    para = []
-    for line in __doc__.strip().split('\n'):
-        line = line.strip()
-        if not line:
-            if para:
-                paras.append('\n'.join(para))
-                para = []
-        else:
-            para.append(line)
-    if para:
-        paras.append('\n'.join(para))
-
-    return '\n\n'.join(textwrap.fill(p) for p in paras)
-
-
-def check_bool(x):
-    """check_bool checks if input 'x' either a bool or
-       one of the following strings: ["true", "false"]
-
-        It returns value as Bool type.
-    """
-    if isinstance(x, bool):
-        return x
-    if not x.lower() in ["true", "false"]:
-        log.error("{} is not a boolean value.".format(x))
-        exit(1)
-    else:
-        return (x.lower() == "true")
-
-
-def check_int(x):
-    """check_int checks if input 'x' is decimal integer.
-
-        It returns value as an int type.
-    """
-    if isinstance(x, int):
-        return x
-    if not x.isdecimal():
-        log.error("{} is not a decimal number".format(x))
-        exit(1)
-    return int(x)
-
-
-def validate(config):
-    offset = 0
-    num_part = 0
-    for part in config["partitions"]:
-        num_part += 1
-        # Defaults
-        part.setdefault("offset", offset)
-        part.setdefault("name", "unknown_name")
-        part.setdefault("variant", "Unbuffered")
-        part.setdefault("size", "0")
-        part.setdefault("secret", "false")
-        part.setdefault("sw_digest", "false")
-        part.setdefault("hw_digest", "false")
-        part.setdefault("write_lock", "none")
-        part.setdefault("read_lock", "none")
-        part.setdefault("key_sel", "NoKey")
-        log.info("Partition {} at offset {} with size {}".format(
-            part["name"], part["offset"], part["size"]))
-
-        # make sure these are boolean types (simplifies the mako templates)
-        part["secret"] = check_bool(part["secret"])
-        part["sw_digest"] = check_bool(part["sw_digest"])
-        part["hw_digest"] = check_bool(part["hw_digest"])
-        part["bkout_type"] = check_bool(part["bkout_type"])
-
-        # basic checks
-        if part["variant"] not in ["Unbuffered", "Buffered", "LifeCycle"]:
-            log.error("Invalid partition type {}".format(part["variant"]))
-            exit(1)
-
-        if part["key_sel"] not in [
-                "NoKey", "Secret0Key", "Secret1Key", "Secret2Key"
-        ]:
-            log.error("Invalid key sel {}".format(part["key_sel"]))
-            exit(1)
-
-        if check_bool(part["secret"]) and part["key_sel"] == "NoKey":
-            log.error(
-                "A secret partition needs a key select value other than NoKey")
-            exit(1)
-
-        if part["write_lock"].lower() not in ["digest", "csr", "none"]:
-            log.error("Invalid value for write_lock")
-            exit(1)
-
-        if part["read_lock"].lower() not in ["digest", "csr", "none"]:
-            log.error("Invalid value for read_lock")
-            exit(1)
-
-        if part["sw_digest"] and part["hw_digest"]:
-            log.error(
-                "Partition cannot support both a SW and a HW digest at the same time."
-            )
-            exit(1)
-
-        if part["variant"] == "Unbuffered" and not part["sw_digest"]:
-            log.error(
-                "Unbuffered partitions without digest are not supported at the moment."
-            )
-            exit(1)
-
-        if not part["sw_digest"] and not part["hw_digest"]:
-            if part["write_lock"].lower(
-            ) == "digest" or part["read_lock"].lower() == "digest":
-                log.error(
-                    "A partition can only be write/read lockable if it has a hw or sw digest."
-                )
-                exit(1)
-
-        if check_int(part["offset"]) % 8:
-            log.error("Partition offset must be 64bit aligned")
-            exit(1)
-
-        if check_int(part["size"]) % 8:
-            log.error("Partition size must be 64bit aligned")
-            exit(1)
-
-        # Loop over items within a partition
-        for item in part["items"]:
-            item.setdefault("name", "unknown_name")
-            item.setdefault("size", "0")
-            item.setdefault("isdigest", "false")
-            item.setdefault("offset", offset)
-            # Generate random constant to be used when partition has
-            # not been initialized yet or when it is in error state.
-            if check_bool(item.setdefault("rand_inv_default", "false")):
-                inv_default = random.getrandbits(check_int(item["size"])*8)
-            else:
-                inv_default = 0
-            item.setdefault("inv_default", "{}'h{:0X}".format(check_int(item["size"])*8, inv_default))
-            log.info("> Item {} at offset {} with size {}".format(
-                item["name"], offset, item["size"]))
-            offset += check_int(item["size"])
-
-        # Place digest at the end of a partition.
-        if part["sw_digest"] or part["hw_digest"]:
-            part["items"].append({
-                "name":
-                part["name"] + DIGEST_SUFFIX,
-                "size":
-                DIGEST_SIZE,
-                "offset":
-                check_int(part["offset"]) + check_int(part["size"]) -
-                DIGEST_SIZE,
-                "isdigest":
-                "True",
-                "inv_default":
-                "{256{1'b1}}"
-            })
-
-            log.info("> Adding digest {} at offset {} with size {}".format(
-                part["name"] + DIGEST_SUFFIX, offset, DIGEST_SIZE))
-            offset += DIGEST_SIZE
-
-        if len(part["items"]) == 0:
-            log.warning("Partition does not contain any items.")
-
-        # check offsets and size
-        if offset > check_int(part["offset"]) + check_int(part["size"]):
-            log.error("Not enough space in partitition "
-                      "{} to accommodate all items. Bytes available "
-                      "= {}, bytes requested = {}".format(
-                          part["name"], part["size"], offset - part["offset"]))
-            exit(1)
-
-        offset = check_int(part["offset"]) + check_int(part["size"])
-
-    otp_size = check_int(config["otp"]["depth"]) * check_int(
-        config["otp"]["width"])
-    config["otp"]["size"] = otp_size
-    config["otp"]["addr_width"] = ceil(log2(check_int(config["otp"]["depth"])))
-    config["otp"]["byte_addr_width"] = ceil(log2(check_int(otp_size)))
-
-    if offset > otp_size:
-        log.error(
-            "OTP is not big enough to store all partitions. Bytes available {}, bytes required {}",
-            otp_size, offset)
-        exit(1)
-
-    log.info("Total number of partitions: {}".format(num_part))
-    log.info("Bytes available in OTP: {}".format(otp_size))
-    log.info("Bytes required for partitions: {}".format(offset))
-
-
-def create_partitions_table(config):
-    header = [
-        "Partition", "Secret", "Buffered", "WR Lockable", "RD Lockable",
-        "Description"
-    ]
-    table = [header]
-    colalign = ("center", ) * len(header)
-
-    for part in config["partitions"]:
-        is_secret = "yes" if check_bool(part["secret"]) else "no"
-        is_buffered = "yes" if part["variant"] in ["Buffered", "LifeCycle"
-                                                   ] else "no"
-        wr_lockable = "no"
-        if part["write_lock"].lower() in ["csr", "digest"]:
-            wr_lockable = "yes (" + part["write_lock"] + ")"
-        rd_lockable = "no"
-        if part["read_lock"].lower() in ["csr", "digest"]:
-            rd_lockable = "yes (" + part["read_lock"] + ")"
-        # remove newlines
-        desc = ' '.join(part["desc"].split())
-        row = [
-            part["name"], is_secret, is_buffered, wr_lockable, rd_lockable,
-            desc
-        ]
-        table.append(row)
-
-    return tabulate(table,
-                    headers="firstrow",
-                    tablefmt="pipe",
-                    colalign=colalign)
-
-
-def create_mmap_table(config):
-    header = [
-        "Index", "Partition", "Size [B]", "Access Granule", "Item",
-        "Byte Address", "Size [B]"
-    ]
-    table = [header]
-    colalign = ("center", ) * len(header)
-
-    for k, part in enumerate(config["partitions"]):
-        for j, item in enumerate(part["items"]):
-            granule = "64bit" if check_bool(part["secret"]) else "32bit"
-
-            if check_bool(item["isdigest"]):
-                granule = "64bit"
-                name = "[{}](#Reg_{}_0)".format(item["name"],
-                                                item["name"].lower())
-            else:
-                name = item["name"]
-
-            if j == 0:
-                row = [str(k), part["name"], str(part["size"]), granule]
-            else:
-                row = ["", "", "", granule]
-
-            row.extend([
-                name, "0x{:03X}".format(check_int(item["offset"])),
-                str(item["size"])
-            ])
-
-            table.append(row)
-
-    return tabulate(table,
-                    headers="firstrow",
-                    tablefmt="pipe",
-                    colalign=colalign)
-
-
-def create_digests_table(config):
-    header = ["Digest Name", " Affected Partition", "Calculated by HW"]
-    table = [header]
-    colalign = ("center", ) * len(header)
-
-    for part in config["partitions"]:
-        if check_bool(part["hw_digest"]) or check_bool(part["sw_digest"]):
-            is_hw_digest = "yes" if check_bool(part["hw_digest"]) else "no"
-            for item in part["items"]:
-                if check_bool(item["isdigest"]):
-                    name = "[{}](#Reg_{}_0)".format(item["name"],
-                                                    item["name"].lower())
-                    row = [name, part["name"], is_hw_digest]
-                    table.append(row)
-                    break
-            else:
-                log.error(
-                    "Partition with digest does not contain a digest item")
-                exit(1)
-
-    return tabulate(table,
-                    headers="firstrow",
-                    tablefmt="pipe",
-                    colalign=colalign)
-
-
-def main():
-    log.basicConfig(level=log.INFO,
-                    format="%(asctime)s - %(message)s",
-                    datefmt="%Y-%m-%d %H:%M")
-
-    parser = argparse.ArgumentParser(
-        prog="translate-mmap",
-        description=wrapped_docstring(),
-        formatter_class=argparse.RawDescriptionHelpFormatter)
-
-    # Generator options for compile time random netlist constants
-    parser.add_argument(
-        '--seed',
-        type=int,
-        metavar='<seed>',
-        help='Custom seed for RNG to compute default values.')
-
-    args = parser.parse_args()
-
-    with open(MMAP_DEFINITION_FILE, 'r') as infile:
-        config = hjson.load(infile)
-
-        # If specified, override the seed for random netlist constant computation.
-        if args.seed:
-            log.warning('Commandline override of seed with {}.'.format(
-                args.seed))
-            config['seed'] = args.seed
-        # Otherwise, we either take it from the .hjson if present, or
-        # randomly generate a new seed if not.
-        else:
-            random.seed()
-            new_seed = random.getrandbits(64)
-            if config.setdefault('seed', new_seed) == new_seed:
-                log.warning(
-                    'No seed specified, setting to {}.'.format(new_seed))
-
-        # Initialize RNG.
-        random.seed(int(config['seed']))
-
-        validate(config)
-
-        with open(PARTITIONS_TABLE_FILE, 'w') as outfile:
-            outfile.write(TABLE_HEADER_COMMENT + create_partitions_table(config))
-
-        with open(DIGESTS_TABLE_FILE, 'w') as outfile:
-            outfile.write(TABLE_HEADER_COMMENT + create_digests_table(config))
-
-        with open(MMAP_TABLE_FILE, 'w') as outfile:
-            outfile.write(TABLE_HEADER_COMMENT + create_mmap_table(config))
-
-        # render all templates
-        for template in TEMPLATES:
-            with open(template, 'r') as tplfile:
-                tpl = Template(tplfile.read())
-                with open(
-                        Path(template).parent.joinpath(Path(template).stem),
-                        'w') as outfile:
-                    outfile.write(tpl.render(config=config))
-
-
-if __name__ == "__main__":
-    main()