[util/tlgen] Fix flake8 lint errors
Fixed flake8 python lint errors for tlgen util.
Signed-off-by: Eunchan Kim <eunchan@opentitan.org>
diff --git a/util/tlgen.py b/util/tlgen.py
index c095005..1a3d0fb 100755
--- a/util/tlgen.py
+++ b/util/tlgen.py
@@ -8,11 +8,9 @@
import argparse
import logging as log
import sys
-from pathlib import Path, PurePath
+from pathlib import Path
import hjson
-import mako
-import pkg_resources
import tlgen
@@ -34,10 +32,9 @@
help=
"Target directory. tlgen needs 'rtl/' and 'dv/' directory under the target dir"
)
- parser.add_argument(
- '--ip-path',
- default="",
- help='''
+ parser.add_argument('--ip-path',
+ default="",
+ help='''
Additional path to generated rtl/ or dv/ folders: outdir/ip_path/rtl
Only needed when there are multiple xbar in outdir''')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose')
diff --git a/util/tlgen/__init__.py b/util/tlgen/__init__.py
index 0fdfeba..60d8102 100644
--- a/util/tlgen/__init__.py
+++ b/util/tlgen/__init__.py
@@ -2,10 +2,10 @@
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
-from .doc import selfdoc
-from .elaborate import elaborate
-from .generate import generate
-from .generate_tb import generate_tb
-from .item import Edge, Node, NodeType
-from .validate import validate
-from .xbar import Xbar
+from .doc import selfdoc # noqa: F401
+from .elaborate import elaborate # noqa: F401
+from .generate import generate # noqa: F401
+from .generate_tb import generate_tb # noqa: F401
+from .item import Edge, Node, NodeType # noqa: F401
+from .validate import validate # noqa: F401
+from .xbar import Xbar # noqa: F401
diff --git a/util/tlgen/doc.py b/util/tlgen/doc.py
index 41cc8c0..a2c1f6f 100644
--- a/util/tlgen/doc.py
+++ b/util/tlgen/doc.py
@@ -5,7 +5,8 @@
"""
import logging as log
-from .validate import *
+from reggen.validate import val_types
+from .validate import root
doc_intro = """
diff --git a/util/tlgen/elaborate.py b/util/tlgen/elaborate.py
index e1c1633..09a32ac 100644
--- a/util/tlgen/elaborate.py
+++ b/util/tlgen/elaborate.py
@@ -2,14 +2,13 @@
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
-import copy
import logging as log
-from .item import Edge, Node, NodeType
+from .item import Node, NodeType
from .xbar import Xbar
-def elaborate(xbar): # xbar: Xbar -> bool
+def elaborate(xbar: Xbar) -> bool:
"""elaborate reads all nodes and edges then
construct internal FIFOs, Sockets.
"""
@@ -23,11 +22,11 @@
process_node(host, xbar)
log.info("Node Processed: " + repr(xbar))
- ## Pipeline
+ # Pipeline
process_pipeline(xbar)
- ## Build address map
- ## Each socket_1n should have address map
+ # Build address map
+ # Each socket_1n should have address map
return True
@@ -124,11 +123,11 @@
# If it is device, it means host and device are directly connected. Ignore now.
# After process node is done, always only one downstream exists in any host node
- if host.pipeline == True and host.pipeline_byp == True:
+ if host.pipeline is True and host.pipeline_byp is True:
# No need to process, same as default
continue
- no_bypass = (host.pipeline == True and host.pipeline_byp == False)
+ no_bypass = (host.pipeline is True and host.pipeline_byp is False)
dnode = host.ds[0].ds
if dnode.node_type == NodeType.ASYNC_FIFO:
@@ -144,7 +143,7 @@
# keep variables separate in case we ever need to differentiate
dnode.dpass = 0 if no_bypass else dnode.dpass
- dnode.hdepth = 0 if host.pipeline == False else dnode.hdepth
+ dnode.hdepth = 0 if host.pipeline is False else dnode.hdepth
dnode.ddepth = dnode.hdepth
for device in xbar.devices:
@@ -156,10 +155,10 @@
# If Socket 1N, find position of the device and follow procedure above
# If it is host, ignore
- if device.pipeline == True and device.pipeline_byp == True:
+ if device.pipeline is True and device.pipeline_byp is True:
continue
- no_bypass = (device.pipeline == True and device.pipeline_byp == False)
+ no_bypass = (device.pipeline is True and device.pipeline_byp is False)
unode = device.us[0].us
if unode.node_type == NodeType.ASYNC_FIFO:
@@ -175,7 +174,7 @@
# keep variables separate in case we ever need to differentiate
unode.hpass = 0 if no_bypass else unode.hpass
- unode.ddepth = 0 if device.pipeline == False else unode.ddepth
+ unode.ddepth = 0 if device.pipeline is False else unode.ddepth
unode.hdepth = unode.ddepth
return xbar
diff --git a/util/tlgen/generate.py b/util/tlgen/generate.py
index 3b39ab0..ff2a177 100644
--- a/util/tlgen/generate.py
+++ b/util/tlgen/generate.py
@@ -12,7 +12,7 @@
from .xbar import Xbar
-def generate(xbar): #xbar: Xbar -> str
+def generate(xbar: Xbar) -> str:
"""generate uses elaborated model then creates top level Xbar module
with prefix.
"""
@@ -21,8 +21,6 @@
filename=resource_filename('tlgen', 'xbar.rtl.sv.tpl'))
xbar_pkg_tpl = Template(
filename=resource_filename('tlgen', 'xbar.pkg.sv.tpl'))
- #xbar_dv_tpl = Template(
- # filename=resource_filename('tlgen', 'xbar.dv.sv.tpl'))
xbar_core_tpl = Template(
filename=resource_filename('tlgen', 'xbar.core.tpl'))
@@ -30,7 +28,7 @@
out_rtl = xbar_rtl_tpl.render(xbar=xbar, ntype=NodeType)
out_pkg = xbar_pkg_tpl.render(xbar=xbar)
out_core = xbar_core_tpl.render(xbar=xbar, ntype=NodeType)
- except:
+ except: # noqa: E722
log.error(exceptions.text_error_template().render())
return (out_rtl, out_pkg, out_core)
diff --git a/util/tlgen/generate_tb.py b/util/tlgen/generate_tb.py
index 58add8e..5fc0674 100644
--- a/util/tlgen/generate_tb.py
+++ b/util/tlgen/generate_tb.py
@@ -7,12 +7,12 @@
from mako import exceptions
from mako.template import Template
from pkg_resources import resource_filename
+from pathlib import Path
-from .item import NodeType
from .xbar import Xbar
-def generate_tb(xbar, dv_path): #xbar: Xbar -> str
+def generate_tb(xbar: Xbar, dv_path: Path) -> str: # xbar: Xbar -> str
# list all the generate files for TB
tb_files = [
"xbar_env_pkg__params.sv", "tb__xbar_connect.sv", "xbar.sim.core",
@@ -46,5 +46,5 @@
with dv_filepath.open(mode='w', encoding='UTF-8') as fout:
try:
fout.write(tpl.render(xbar=xbar))
- except:
+ except: # noqa: E722 for general exception handling
log.error(exceptions.text_error_template().render())
diff --git a/util/tlgen/item.py b/util/tlgen/item.py
index 1c43fa7..685e42c 100644
--- a/util/tlgen/item.py
+++ b/util/tlgen/item.py
@@ -19,8 +19,8 @@
return "U(%s) D(%s)" % (self.us.name, self.ds.name)
-#Edges = List[Edge]
-#Clocks = List[str] # If length is more than one, should be exactly two
+# Edges = List[Edge]
+# Clocks = List[str] # If length is more than one, should be exactly two
# [UpstreamClock, DownstreamClock]
@@ -47,8 +47,8 @@
# If NodeType is Socket out from 1:N then address steering is used
# But this value is also propagated up to a Host from multiple Devices
# Device Node should have address_from, address_to
- #address_from = 0 #: int
- #address_to = 0 #: int
+ # address_from = 0 #: int
+ # address_to = 0 #: int
addr_range = []
us = [] # Edges # Number of Ports depends on the NodeType
diff --git a/util/tlgen/lib.py b/util/tlgen/lib.py
index a5333ff..2f4ef54 100644
--- a/util/tlgen/lib.py
+++ b/util/tlgen/lib.py
@@ -2,7 +2,6 @@
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
-import math
import logging as log
@@ -92,7 +91,7 @@
x for x in xbar["nodes"]
if x["type"] == "device" and not x["name"] == name
]:
- if not "addr_range" in node:
+ if "addr_range" not in node:
# Xbar?
log.info("Xbar type node cannot be compared in this version.",
"Please use in caution")
@@ -129,7 +128,7 @@
addrs = [a for r in device_list for a in r["addr_range"]]
except KeyError:
log.error("Address range is wrong.\n {}".format(
- [x for x in device_list if not "addr_range" in x]))
+ [x for x in device_list if "addr_range" not in x]))
raise SystemExit()
sorted_list = sorted(addrs, key=lambda k: int(k["base_addr"], 0))
diff --git a/util/tlgen/validate.py b/util/tlgen/validate.py
index 7f80b92..5e5e5ba 100644
--- a/util/tlgen/validate.py
+++ b/util/tlgen/validate.py
@@ -5,9 +5,9 @@
from collections import OrderedDict
from functools import partial
-from reggen.validate import check_bool, check_int, check_ln, val_types
+from reggen.validate import check_bool, check_int, val_types
-from .item import Edge, Node, NodeType
+from .item import Node, NodeType
from .xbar import Xbar
from .lib import simplify_addr
@@ -39,13 +39,14 @@
# }
addr = {
'name': 'Address configuration',
- 'description': '''Device Node address configuration. It contains the base address and the size in bytes.
+ 'description':
+ '''Device Node address configuration. It contains the base address and the size in bytes.
''',
'required': {
- 'base_addr': ['d', 'Base address of the device'\
- ' It is required for the device'],
- 'size_byte': ['d', 'Memory space of the device'\
- ' It is required for the device'],
+ 'base_addr':
+ ['d', 'Base address of the device. It is required for the device'],
+ 'size_byte':
+ ['d', 'Memory space of the device. It is required for the device'],
},
'optional': {},
'added': {}
@@ -58,14 +59,19 @@
''',
'required': {
'name': ['s', 'Module instance name'],
- 'type': ['s', 'Module type: {"host", "device", "async", "socket_1n", "socket_m1"}'],
+ 'type': [
+ 's',
+ 'Module type: {"host", "device", "async", "socket_1n", "socket_m1"}'
+ ],
},
'optional': {
'clock': ['s', 'main clock of the port'],
'reset': ['s', 'main reset of the port'],
'pipeline': ['pb', 'If true, pipeline is added in front of the port'],
- 'pipeline_byp': ['pb', 'Pipeline bypass. If true, '\
- 'request/response are not latched'],
+ 'pipeline_byp': [
+ 'pb', 'Pipeline bypass. If true, '
+ 'request/response are not latched'
+ ],
'inst_type': ['s', 'Instance type'],
'xbar': ['pb', 'If true, the node is connected to another Xbar'],
'addr_range': ['lg', addr]
@@ -80,8 +86,10 @@
''',
'required': {
'name': ['s', 'Name of the crossbar'],
- 'clock': ['s', 'Main clock. Internal components use this clock.'\
- ' If not specified, it is assumed to be in main clock domain'],
+ 'clock': [
+ 's', 'Main clock. Internal components use this clock.'
+ ' If not specified, it is assumed to be in main clock domain'
+ ],
'reset': ['s', 'Main reset'],
'connections':
['g', "List of edge. Key is host, entry in value list is device"],
@@ -92,8 +100,10 @@
'type': ['s', 'Indicate Hjson type. "xbar" always if exist'],
},
'added': {
- 'reset_connections': ['g', "Generated by topgen. Key is the reset signal inside IP"\
- " and value is the top reset signal"],
+ 'reset_connections': [
+ 'g', "Generated by topgen. Key is the reset signal inside IP"
+ " and value is the top reset signal"
+ ],
}
}
@@ -107,7 +117,7 @@
# required
for k, v in control["required"].items():
- if not k in obj:
+ if k not in obj:
error += 1
log.error(prefix + " missing required key " + k)
@@ -204,7 +214,7 @@
return len(result) != 0
-def validate(obj): # OrderedDict -> Xbar
+def validate(obj: OrderedDict) -> Xbar: # OrderedDict -> Xbar
xbar = Xbar()
xbar.name = obj["name"].lower()
xbar.clock = obj["clock"].lower()
@@ -300,9 +310,9 @@
def validate_hjson(obj):
- if not "type" in obj:
+ if "type" not in obj:
obj["type"] = "xbar"
- if not "name" in obj:
+ if "name" not in obj:
log.error("Component has no name. Aborting.")
return None, 1
diff --git a/util/tlgen/xbar.py b/util/tlgen/xbar.py
index df8cdce..9df3e1b 100644
--- a/util/tlgen/xbar.py
+++ b/util/tlgen/xbar.py
@@ -3,13 +3,8 @@
# SPDX-License-Identifier: Apache-2.0
import logging as log
-from typing import List
-from .item import Edge, Node, NodeType
-
-#Nodes = List[Node]
-#Edges = List[Edge]
-#Clocks = List[str]
+from .item import Edge, NodeType
class Xbar:
diff --git a/util/topgen/merge.py b/util/topgen/merge.py
index eaa05bd..c531a85 100644
--- a/util/topgen/merge.py
+++ b/util/topgen/merge.py
@@ -27,7 +27,7 @@
"""
ip_list_in_top = [x["name"].lower() for x in top["module"]]
ipname = ip["name"].lower()
- if not ipname in ip_list_in_top:
+ if ipname not in ip_list_in_top:
log.info("TOP doens't use the IP %s. Skip" % ip["name"])
return
@@ -39,7 +39,7 @@
ip_module = top["module"][ip_idx]
# Size
- if not "size" in ip_module:
+ if "size" not in ip_module:
ip_module["size"] = "0x%x" % max(ip["gensize"], 0x1000)
elif int(ip_module["size"], 0) < ip["gensize"]:
log.error(
@@ -251,9 +251,9 @@
("size_byte", "0x1000"),
])],
"xbar": False,
- "pipeline" : "true",
- "pipeline_byp" : "true"
- }) # yapf: disable
+ "pipeline": "true",
+ "pipeline_byp": "true"
+ }) # yapf: disable
else:
# Update if exists
node = nodeobj[0]
@@ -279,17 +279,18 @@
elif len(nodeobj) == 0:
# found in module or memory but node object doesn't exist.
xbar["nodes"].append({
- "name" : device,
- "type" : "device",
- "clock" : deviceobj[0]["clock"],
- "reset" : deviceobj[0]["reset"],
- "inst_type" : deviceobj[0]["type"],
- "addr_range": [OrderedDict([("base_addr", deviceobj[0]["base_addr"]),
- ("size_byte", deviceobj[0]["size"])])],
- "pipeline" : "true",
- "pipeline_byp" : "true",
- "xbar" : True if device in xbar_list else False
- }) # yapf: disable
+ "name": device,
+ "type": "device",
+ "clock": deviceobj[0]["clock"],
+ "reset": deviceobj[0]["reset"],
+ "inst_type": deviceobj[0]["type"],
+ "addr_range": [OrderedDict([
+ ("base_addr", deviceobj[0]["base_addr"]),
+ ("size_byte", deviceobj[0]["size"])])],
+ "pipeline": "true",
+ "pipeline_byp": "true",
+ "xbar": True if device in xbar_list else False
+ }) # yapf: disable
else:
# found and exist in the nodes too
@@ -363,14 +364,14 @@
addr = []
for node in [
x for x in xbar["nodes"]
- if x["type"] == "device" and "xbar" in x and x["xbar"] == False
+ if x["type"] == "device" and "xbar" in x and x["xbar"] is False
]:
addr.extend(node["addr_range"])
# Step 2: visit xbar device ports
xbar_nodes = [
x for x in xbar["nodes"]
- if x["type"] == "device" and "xbar" in x and x["xbar"] == True
+ if x["type"] == "device" and "xbar" in x and x["xbar"] is True
]
# Now call function to get the device range
@@ -402,8 +403,8 @@
for node in host_xbar["nodes"]:
if not node["name"] in devices:
continue
- if "xbar" in node and node["xbar"] == True:
- if not "addr_range" in node:
+ if "xbar" in node and node["xbar"] is True:
+ if "addr_range" not in node:
# Deeper dive into another crossbar
xbar_addr = xbar_cross_node(node["name"], host_xbar, xbars,
visited)
@@ -423,7 +424,6 @@
clks_attr = top['clocks']
groups_in_top = [x["name"].lower() for x in clks_attr['groups']]
-
# Default assignments
for group in clks_attr['groups']:
@@ -432,15 +432,16 @@
group['unique'] = "no"
# if no hardwired clocks, define an empty set
- group['clocks'] = OrderedDict() if 'clocks' not in group else group['clocks']
-
+ group['clocks'] = OrderedDict(
+ ) if 'clocks' not in group else group['clocks']
for ep in top['module'] + top['memory'] + top['xbar']:
clock_connections = OrderedDict()
# if no clock group assigned, default is unique
- ep['clock_group'] = 'secure' if 'clock_group' not in ep else ep['clock_group']
+ ep['clock_group'] = 'secure' if 'clock_group' not in ep else ep[
+ 'clock_group']
ep_grp = ep['clock_group']
# end point names and clocks
@@ -464,7 +465,6 @@
# new group clock name
clk_name = "clk_{}_{}".format(clk, ep_grp)
-
# add clock to a particular group
clks_attr['groups'][cg_idx]['clocks'][clk_name] = clk
@@ -474,6 +474,7 @@
# Add to endpoint structure
ep['clock_connections'] = clock_connections
+
def amend_resets(top):
"""Add a path variable to reset declaration
"""
@@ -488,7 +489,8 @@
if reset["type"] in ["gen"]:
# The resets structure will be used once rstmgr is integrated
# reset_paths[reset["name"]] = "resets.{}_rst_n".format(reset["name"])
- reset_paths[reset["name"]] = "rstmgr_resets.rst_{}_n".format(reset["name"])
+ reset_paths[reset["name"]] = "rstmgr_resets.rst_{}_n".format(
+ reset["name"])
elif reset["type"] == "por":
reset_paths[reset["name"]] = "rst_ni"
else:
@@ -501,10 +503,10 @@
def amend_interrupt(top):
"""Check interrupt_module if exists, or just use all modules
"""
- if not "interrupt_module" in top:
+ if "interrupt_module" not in top:
top["interrupt_module"] = [x["name"] for x in top["module"]]
- if not "interrupt" in top or top["interrupt"] == "":
+ if "interrupt" not in top or top["interrupt"] == "":
top["interrupt"] = []
for m in top["interrupt_module"]:
@@ -523,10 +525,10 @@
def amend_alert(top):
"""Check interrupt_module if exists, or just use all modules
"""
- if not "alert_module" in top:
+ if "alert_module" not in top:
top["alert_module"] = [x["name"] for x in top["module"]]
- if not "alert" in top or top["alert"] == "":
+ if "alert" not in top or top["alert"] == "":
top["alert"] = []
for m in top["alert_module"]:
@@ -547,7 +549,7 @@
"""
pinmux = top["pinmux"]
- if not "dio_modules" in pinmux:
+ if "dio_modules" not in pinmux:
pinmux['dio_modules'] = []
# list out dedicated IO
@@ -559,7 +561,7 @@
# Parse how many signals
m = lib.get_module_by_name(top, mname)
- if sname != None:
+ if sname is not None:
signals = deepcopy([lib.get_signal_by_name(m, sname)])
else:
# Get all module signals
@@ -571,9 +573,10 @@
# convert signal with module name
signals = list(
- map(partial(lib.add_module_prefix_to_signal, module=mname), signals))
+ map(partial(lib.add_module_prefix_to_signal, module=mname),
+ signals))
# Parse how many pads are assigned
- if not "pad" in e:
+ if "pad" not in e:
raise SystemExit("Should catch pad field in validate.py!")
# pads are the list of individual pin, each entry is 1 bit width
@@ -596,7 +599,7 @@
dio_names = [p["name"] for p in pinmux["dio"]]
# Multiplexer IO
- if not "mio_modules" in pinmux:
+ if "mio_modules" not in pinmux:
# Add all modules having available io to Multiplexer IO
pinmux["mio_modules"] = []
@@ -611,11 +614,11 @@
# List up the dedicated IO to exclude from inputs/outputs
# Add port list to `inputs` and `outputs` fields
- if not "inputs" in pinmux:
+ if "inputs" not in pinmux:
pinmux["inputs"] = []
- if not "outputs" in pinmux:
+ if "outputs" not in pinmux:
pinmux["outputs"] = []
- if not "inouts" in pinmux:
+ if "inouts" not in pinmux:
pinmux["inouts"] = []
for e in pinmux["mio_modules"]:
@@ -625,7 +628,7 @@
"Cannot parse signal/module in mio_modules {}".format(e))
# Add all ports from the module to input/outputs
m = lib.get_module_by_name(top, tokens[0])
- if m == None:
+ if m is None:
raise SystemExit("Module {} doesn't exist".format(tokens[0]))
if len(tokens) == 1: