[util] Prep work for shadow reset impelemtation
- This cleans up the python files behind rstmgr a bit
and makes them into a class (following clk examples)
- Add some plumbing for resets to self determine domains
and need for shadow resets. This will remove the need
for correct accounting in the hjson
- Concentrate some information into the reset class instead
of having it spread around topgen / various dicts. Also
add helper functions to retrive this information.
- The level of abstraction is probably a bit too much, but
this is probably a bit cleaner until we get a chance to organize
into a "top" class that represents the entire design.
Signed-off-by: Timothy Chen <timothytim@google.com>
[top] Auto generate
Signed-off-by: Timothy Chen <timothytim@google.com>
[top] Some touch-ups based on comments
Signed-off-by: Timothy Chen <timothytim@google.com>
diff --git a/hw/ip/rstmgr/data/rstmgr.hjson.tpl b/hw/ip/rstmgr/data/rstmgr.hjson.tpl
index b3dc5ab..bbcab6a 100644
--- a/hw/ip/rstmgr/data/rstmgr.hjson.tpl
+++ b/hw/ip/rstmgr/data/rstmgr.hjson.tpl
@@ -12,7 +12,7 @@
name: "RSTMGR",
clocking: [
{clock: "clk_i", reset: "rst_ni", primary: true},
-% for clk in clks:
+% for clk in reset_obj.get_clocks():
{clock: "clk_${clk}_i"}
% endfor
]
diff --git a/hw/ip/rstmgr/data/rstmgr.sv.tpl b/hw/ip/rstmgr/data/rstmgr.sv.tpl
index 4c347c5..ca8c891 100644
--- a/hw/ip/rstmgr/data/rstmgr.sv.tpl
+++ b/hw/ip/rstmgr/data/rstmgr.sv.tpl
@@ -16,7 +16,7 @@
// Primary module clocks
input clk_i,
input rst_ni, // this is connected to the top level reset
-% for clk in clks:
+% for clk in reset_obj.get_clocks():
input clk_${clk}_i,
% endfor
@@ -251,14 +251,14 @@
% for i, rst in enumerate(leaf_rsts):
logic [PowerDomains-1:0] rst_${rst['name']}_n;
% for domain in power_domains:
- % if domain in rst['domains']:
+ % if domain in reset_obj.get_reset_domains(rst['name']):
prim_flop_2sync #(
.Width(1),
.ResetValue('0)
) u_${domain.lower()}_${rst['name']} (
.clk_i(clk_${rst['clk']}_i),
.rst_ni(rst_${rst['parent']}_n[Domain${domain}Sel]),
- % if "sw" in rst:
+ % if rst["sw"]:
.d_i(sw_rst_ctrl_n[${rst['name'].upper()}]),
% else:
.d_i(1'b1),
diff --git a/hw/ip/rstmgr/data/rstmgr_pkg.sv.tpl b/hw/ip/rstmgr/data/rstmgr_pkg.sv.tpl
index 426b748..2b16ceb 100644
--- a/hw/ip/rstmgr/data/rstmgr_pkg.sv.tpl
+++ b/hw/ip/rstmgr/data/rstmgr_pkg.sv.tpl
@@ -19,14 +19,14 @@
// positions of software controllable reset bits
% for rst in sw_rsts:
- parameter int ${rst['name'].upper()} = ${loop.index};
+ parameter int ${rst.upper()} = ${loop.index};
% endfor
// resets generated and broadcast
// This should be templatized and generated
typedef struct packed {
% for rst in output_rsts:
- logic [PowerDomains-1:0] rst_${rst['name']}_n;
+ logic [PowerDomains-1:0] rst_${rst}_n;
% endfor
} rstmgr_out_t;
diff --git a/hw/top_earlgrey/data/autogen/top_earlgrey.gen.hjson b/hw/top_earlgrey/data/autogen/top_earlgrey.gen.hjson
index 5cc4c49..0581911 100644
--- a/hw/top_earlgrey/data/autogen/top_earlgrey.gen.hjson
+++ b/hw/top_earlgrey/data/autogen/top_earlgrey.gen.hjson
@@ -175,6 +175,10 @@
name: rst_ni
gen: false
type: ext
+ domains: []
+ shadowed: false
+ sw: false
+ path: rst_ni
}
{
name: por_aon
@@ -184,29 +188,30 @@
[
Aon
]
- clk: aon
+ shadowed: false
+ sw: false
+ path: rstmgr_aon_resets.rst_por_aon_n
+ clock: aon
}
{
name: lc_src
gen: false
type: int
- domains:
- [
- Aon
- "0"
- ]
- clk: io_div4
+ domains: []
+ shadowed: false
+ sw: false
+ path: ""
+ clock: io_div4
}
{
name: sys_src
gen: false
type: int
- domains:
- [
- Aon
- "0"
- ]
- clk: io_div4
+ domains: []
+ shadowed: false
+ sw: false
+ path: ""
+ clock: io_div4
}
{
name: por
@@ -216,8 +221,11 @@
[
Aon
]
+ shadowed: false
+ sw: false
+ path: rstmgr_aon_resets.rst_por_n
parent: por_aon
- clk: main
+ clock: main
}
{
name: por_io
@@ -227,8 +235,11 @@
[
Aon
]
+ shadowed: false
+ sw: false
+ path: rstmgr_aon_resets.rst_por_io_n
parent: por_aon
- clk: io
+ clock: io
}
{
name: por_io_div2
@@ -238,8 +249,11 @@
[
Aon
]
+ shadowed: false
+ sw: false
+ path: rstmgr_aon_resets.rst_por_io_div2_n
parent: por_aon
- clk: io_div2
+ clock: io_div2
}
{
name: por_io_div4
@@ -249,8 +263,11 @@
[
Aon
]
+ shadowed: false
+ sw: false
+ path: rstmgr_aon_resets.rst_por_io_div4_n
parent: por_aon
- clk: io_div4
+ clock: io_div4
}
{
name: por_usb
@@ -260,8 +277,11 @@
[
Aon
]
+ shadowed: false
+ sw: false
+ path: rstmgr_aon_resets.rst_por_usb_n
parent: por_aon
- clk: usb
+ clock: usb
}
{
name: lc
@@ -271,8 +291,11 @@
[
"0"
]
+ shadowed: false
+ sw: false
+ path: rstmgr_aon_resets.rst_lc_n
parent: lc_src
- clk: main
+ clock: main
}
{
name: lc_io_div4
@@ -280,11 +303,14 @@
type: top
domains:
[
- Aon
"0"
+ Aon
]
+ shadowed: false
+ sw: false
+ path: rstmgr_aon_resets.rst_lc_io_div4_n
parent: lc_src
- clk: io_div4
+ clock: io_div4
}
{
name: sys
@@ -294,8 +320,11 @@
[
"0"
]
+ shadowed: true
+ sw: false
+ path: rstmgr_aon_resets.rst_sys_n
parent: sys_src
- clk: main
+ clock: main
}
{
name: sys_io_div4
@@ -303,11 +332,14 @@
type: top
domains:
[
- Aon
"0"
+ Aon
]
+ shadowed: true
+ sw: false
+ path: rstmgr_aon_resets.rst_sys_io_div4_n
parent: sys_src
- clk: io_div4
+ clock: io_div4
}
{
name: sys_aon
@@ -315,11 +347,14 @@
type: top
domains:
[
- Aon
"0"
+ Aon
]
+ shadowed: false
+ sw: false
+ path: rstmgr_aon_resets.rst_sys_aon_n
parent: sys_src
- clk: aon
+ clock: aon
}
{
name: spi_device
@@ -329,9 +364,11 @@
[
"0"
]
+ shadowed: false
+ sw: true
+ path: rstmgr_aon_resets.rst_spi_device_n
parent: sys_src
- clk: io_div2
- sw: 1
+ clock: io_div2
}
{
name: spi_host0
@@ -341,9 +378,11 @@
[
"0"
]
+ shadowed: false
+ sw: true
+ path: rstmgr_aon_resets.rst_spi_host0_n
parent: sys_src
- clk: io
- sw: 1
+ clock: io
}
{
name: spi_host1
@@ -353,9 +392,11 @@
[
"0"
]
+ shadowed: false
+ sw: true
+ path: rstmgr_aon_resets.rst_spi_host1_n
parent: sys_src
- clk: io_div2
- sw: 1
+ clock: io_div2
}
{
name: usb
@@ -365,9 +406,11 @@
[
"0"
]
+ shadowed: false
+ sw: true
+ path: rstmgr_aon_resets.rst_usb_n
parent: sys_src
- clk: usb
- sw: 1
+ clock: usb
}
{
name: i2c0
@@ -377,9 +420,11 @@
[
"0"
]
+ shadowed: false
+ sw: true
+ path: rstmgr_aon_resets.rst_i2c0_n
parent: sys_src
- clk: io_div2
- sw: 1
+ clock: io_div2
}
{
name: i2c1
@@ -389,9 +434,11 @@
[
"0"
]
+ shadowed: false
+ sw: true
+ path: rstmgr_aon_resets.rst_i2c1_n
parent: sys_src
- clk: io_div2
- sw: 1
+ clock: io_div2
}
{
name: i2c2
@@ -401,9 +448,11 @@
[
"0"
]
+ shadowed: false
+ sw: true
+ path: rstmgr_aon_resets.rst_i2c2_n
parent: sys_src
- clk: io_div2
- sw: 1
+ clock: io_div2
}
]
}
@@ -419,7 +468,7 @@
}
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys_io_div4
}
clock_group: secure
clock_connections:
@@ -459,7 +508,7 @@
}
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys_io_div4
}
clock_group: secure
clock_connections:
@@ -499,7 +548,7 @@
}
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys_io_div4
}
clock_group: secure
clock_connections:
@@ -539,7 +588,7 @@
}
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys_io_div4
}
clock_group: secure
clock_connections:
@@ -580,7 +629,7 @@
clock_group: peri
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys_io_div4
}
clock_connections:
{
@@ -621,7 +670,7 @@
clock_group: peri
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_spi_device_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: spi_device
}
clock_connections:
{
@@ -697,8 +746,8 @@
clock_group: peri
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_spi_host0_n[rstmgr_pkg::Domain0Sel]
- rst_core_ni: rstmgr_aon_resets.rst_spi_host0_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: spi_host0
+ rst_core_ni: spi_host0
}
clock_connections:
{
@@ -752,8 +801,8 @@
clock_group: peri
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_spi_host1_n[rstmgr_pkg::Domain0Sel]
- rst_core_ni: rstmgr_aon_resets.rst_spi_host1_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: spi_host1
+ rst_core_ni: spi_host1
}
clock_connections:
{
@@ -804,7 +853,7 @@
clock_group: peri
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_i2c0_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: i2c0
}
clock_connections:
{
@@ -844,7 +893,7 @@
clock_group: peri
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_i2c1_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: i2c1
}
clock_connections:
{
@@ -884,7 +933,7 @@
clock_group: peri
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_i2c2_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: i2c2
}
clock_connections:
{
@@ -924,7 +973,7 @@
clock_group: peri
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys_io_div4
}
clock_connections:
{
@@ -964,7 +1013,7 @@
clock_group: timers
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys_io_div4
}
clock_connections:
{
@@ -1010,9 +1059,9 @@
]
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
- rst_aon_ni: rstmgr_aon_resets.rst_sys_aon_n[rstmgr_pkg::Domain0Sel]
- rst_usb_48mhz_ni: rstmgr_aon_resets.rst_usb_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys_io_div4
+ rst_aon_ni: sys_aon
+ rst_usb_48mhz_ni: usb
}
clock_connections:
{
@@ -1161,8 +1210,8 @@
clock_group: timers
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_lc_io_div4_n[rstmgr_pkg::Domain0Sel]
- rst_edn_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: lc_io_div4
+ rst_edn_ni: sys
}
base_addrs:
{
@@ -1463,8 +1512,8 @@
clock_group: timers
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_lc_io_div4_n[rstmgr_pkg::Domain0Sel]
- rst_kmac_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: lc_io_div4
+ rst_kmac_ni: sys
}
clock_connections:
{
@@ -1906,8 +1955,8 @@
clock_group: timers
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
- rst_edn_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys_io_div4
+ rst_edn_ni: sys
}
attr: templated
clock_connections:
@@ -2027,8 +2076,8 @@
clock_group: powerup
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_por_n[rstmgr_pkg::DomainAonSel]
- rst_slow_ni: rstmgr_aon_resets.rst_por_aon_n[rstmgr_pkg::DomainAonSel]
+ rst_ni: por
+ rst_slow_ni: por_aon
}
domain: Aon
attr: templated
@@ -2405,12 +2454,12 @@
clock_group: powerup
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_por_io_div4_n[rstmgr_pkg::DomainAonSel]
- rst_main_ni: rstmgr_aon_resets.rst_por_n[rstmgr_pkg::DomainAonSel]
- rst_io_ni: rstmgr_aon_resets.rst_por_io_n[rstmgr_pkg::DomainAonSel]
- rst_usb_ni: rstmgr_aon_resets.rst_por_usb_n[rstmgr_pkg::DomainAonSel]
- rst_io_div2_ni: rstmgr_aon_resets.rst_por_io_div2_n[rstmgr_pkg::DomainAonSel]
- rst_io_div4_ni: rstmgr_aon_resets.rst_por_io_div4_n[rstmgr_pkg::DomainAonSel]
+ rst_ni: por_io_div4
+ rst_main_ni: por
+ rst_io_ni: por_io
+ rst_usb_ni: por_usb
+ rst_io_div2_ni: por_io_div2
+ rst_io_div4_ni: por_io_div4
}
domain: Aon
attr: templated
@@ -2630,8 +2679,8 @@
clock_group: secure
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::DomainAonSel]
- rst_aon_ni: rstmgr_aon_resets.rst_sys_aon_n[rstmgr_pkg::DomainAonSel]
+ rst_ni: sys_io_div4
+ rst_aon_ni: sys_aon
}
domain: Aon
clock_connections:
@@ -2697,8 +2746,8 @@
clock_group: peri
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::DomainAonSel]
- rst_slow_ni: rstmgr_aon_resets.rst_sys_aon_n[rstmgr_pkg::DomainAonSel]
+ rst_ni: sys_io_div4
+ rst_slow_ni: sys_aon
}
clock_reset_export:
[
@@ -2769,8 +2818,8 @@
clock_group: powerup
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::DomainAonSel]
- rst_core_ni: rstmgr_aon_resets.rst_sys_aon_n[rstmgr_pkg::DomainAonSel]
+ rst_ni: sys_io_div4
+ rst_core_ni: sys_aon
}
domain: Aon
clock_connections:
@@ -2812,8 +2861,8 @@
clock_group: powerup
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::DomainAonSel]
- rst_aon_ni: rstmgr_aon_resets.rst_sys_aon_n[rstmgr_pkg::DomainAonSel]
+ rst_ni: sys_io_div4
+ rst_aon_ni: sys_aon
}
domain: Aon
attr: templated
@@ -3065,8 +3114,8 @@
clock_group: timers
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::DomainAonSel]
- rst_aon_ni: rstmgr_aon_resets.rst_sys_aon_n[rstmgr_pkg::DomainAonSel]
+ rst_ni: sys_io_div4
+ rst_aon_ni: sys_aon
}
domain: Aon
attr: templated
@@ -3160,7 +3209,7 @@
]
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys_io_div4
}
attr: reggen_only
clock_connections:
@@ -3202,7 +3251,7 @@
]
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::DomainAonSel]
+ rst_ni: sys_io_div4
}
domain: Aon
attr: reggen_top
@@ -3296,8 +3345,8 @@
clock_group: infra
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::DomainAonSel]
- rst_otp_ni: rstmgr_aon_resets.rst_lc_io_div4_n[rstmgr_pkg::DomainAonSel]
+ rst_ni: sys_io_div4
+ rst_otp_ni: lc_io_div4
}
domain: Aon
param_decl:
@@ -3483,8 +3532,8 @@
clock_group: infra
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_lc_n[rstmgr_pkg::Domain0Sel]
- rst_otp_ni: rstmgr_aon_resets.rst_lc_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: lc
+ rst_otp_ni: lc_io_div4
}
base_addrs:
{
@@ -3749,7 +3798,7 @@
clock_group: infra
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_lc_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: lc
}
base_addrs:
{
@@ -3897,7 +3946,7 @@
clock_group: secure
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
}
attr: templated
clock_connections:
@@ -3976,8 +4025,8 @@
clock_group: trans
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
- rst_edn_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
+ rst_edn_ni: sys
}
param_decl:
{
@@ -4177,7 +4226,7 @@
clock_group: trans
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
}
clock_connections:
{
@@ -4235,8 +4284,8 @@
clock_group: trans
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
- rst_edn_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
+ rst_edn_ni: sys
}
clock_connections:
{
@@ -4359,8 +4408,8 @@
clock_group: secure
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
- rst_edn_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
+ rst_edn_ni: sys
}
clock_connections:
{
@@ -4684,7 +4733,7 @@
clock_group: secure
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
}
clock_connections:
{
@@ -4819,7 +4868,7 @@
clock_group: secure
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
}
clock_reset_export:
[
@@ -4958,7 +5007,7 @@
clock_group: secure
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
}
clock_reset_export:
[
@@ -5037,7 +5086,7 @@
clock_group: secure
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
}
clock_connections:
{
@@ -5113,8 +5162,8 @@
clock_group: secure
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
- rst_otp_ni: rstmgr_aon_resets.rst_lc_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
+ rst_otp_ni: lc_io_div4
}
param_decl:
{
@@ -5301,9 +5350,9 @@
clock_group: trans
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
- rst_edn_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
- rst_otp_ni: rstmgr_aon_resets.rst_lc_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
+ rst_edn_ni: sys
+ rst_otp_ni: lc_io_div4
}
clock_connections:
{
@@ -5476,7 +5525,7 @@
clock_group: infra
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
}
base_addrs:
{
@@ -5661,8 +5710,8 @@
clock_group: infra
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
- rst_esc_ni: rstmgr_aon_resets.rst_lc_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
+ rst_esc_ni: lc_io_div4
}
clock_connections:
{
@@ -6039,7 +6088,7 @@
clock_group: infra
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: sys
}
type: ram_1p_scr
base_addr: 0x10000000
@@ -6140,7 +6189,7 @@
clock_group: infra
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::DomainAonSel]
+ rst_ni: sys_io_div4
}
domain: Aon
type: ram_1p_scr
@@ -6241,7 +6290,7 @@
clock_group: infra
reset_connections:
{
- rst_ni: rstmgr_aon_resets.rst_lc_n[rstmgr_pkg::Domain0Sel]
+ rst_ni: lc
}
type: eflash
base_addr: 0x20000000
@@ -7088,8 +7137,8 @@
reset: rst_main_ni
reset_connections:
{
- rst_main_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
- rst_fixed_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_main_ni: sys
+ rst_fixed_ni: sys_io_div4
}
clock_connections:
{
@@ -7875,7 +7924,7 @@
reset: rst_peri_ni
reset_connections:
{
- rst_peri_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_peri_ni: sys_io_div4
}
clock_connections:
{
@@ -12980,28 +13029,6 @@
]
}
}
- reset_paths:
- {
- rst_ni: rst_ni
- por_aon: rstmgr_aon_resets.rst_por_aon_n
- por: rstmgr_aon_resets.rst_por_n
- por_io: rstmgr_aon_resets.rst_por_io_n
- por_io_div2: rstmgr_aon_resets.rst_por_io_div2_n
- por_io_div4: rstmgr_aon_resets.rst_por_io_div4_n
- por_usb: rstmgr_aon_resets.rst_por_usb_n
- lc: rstmgr_aon_resets.rst_lc_n
- lc_io_div4: rstmgr_aon_resets.rst_lc_io_div4_n
- sys: rstmgr_aon_resets.rst_sys_n
- sys_io_div4: rstmgr_aon_resets.rst_sys_io_div4_n
- sys_aon: rstmgr_aon_resets.rst_sys_aon_n
- spi_device: rstmgr_aon_resets.rst_spi_device_n
- spi_host0: rstmgr_aon_resets.rst_spi_host0_n
- spi_host1: rstmgr_aon_resets.rst_spi_host1_n
- usb: rstmgr_aon_resets.rst_usb_n
- i2c0: rstmgr_aon_resets.rst_i2c0_n
- i2c1: rstmgr_aon_resets.rst_i2c1_n
- i2c2: rstmgr_aon_resets.rst_i2c2_n
- }
inter_signal:
{
signals:
diff --git a/hw/top_earlgrey/data/top_earlgrey.hjson b/hw/top_earlgrey/data/top_earlgrey.hjson
index 8c75531..ae538d2 100644
--- a/hw/top_earlgrey/data/top_earlgrey.hjson
+++ b/hw/top_earlgrey/data/top_earlgrey.hjson
@@ -141,26 +141,26 @@
//
nodes: [
{ name: "rst_ni", gen: false, type: "ext", }
- { name: "por_aon", gen: false, type: "top", domains: ["Aon" ], clk: "aon" }
- { name: "lc_src", gen: false, type: "int", domains: ["Aon", "0"], clk: "io_div4" }
- { name: "sys_src", gen: false, type: "int", domains: ["Aon", "0"], clk: "io_div4" }
- { name: "por", gen: true, type: "top", domains: ["Aon" ], parent: "por_aon", clk: "main" }
- { name: "por_io", gen: true, type: "top", domains: ["Aon", ], parent: "por_aon", clk: "io" }
- { name: "por_io_div2", gen: true, type: "top", domains: ["Aon", ], parent: "por_aon", clk: "io_div2" }
- { name: "por_io_div4", gen: true , type: "top", domains: ["Aon", ], parent: "por_aon", clk: "io_div4" }
- { name: "por_usb", gen: true, type: "top", domains: ["Aon", ], parent: "por_aon", clk: "usb" }
- { name: "lc", gen: true, type: "top", domains: [ "0"], parent: "lc_src", clk: "main" }
- { name: "lc_io_div4", gen: true, type: "top", domains: ["Aon", "0"], parent: "lc_src", clk: "io_div4" }
- { name: "sys", gen: true, type: "top", domains: [ "0"], parent: "sys_src", clk: "main" }
- { name: "sys_io_div4", gen: true, type: "top", domains: ["Aon", "0"], parent: "sys_src", clk: "io_div4" }
- { name: "sys_aon", gen: true, type: "top", domains: ["Aon", "0"], parent: "sys_src", clk: "aon" }
- { name: "spi_device", gen: true, type: "top", domains: [ "0"], parent: "sys_src", clk: "io_div2", sw: 1 }
- { name: "spi_host0", gen: true, type: "top", domains: [ "0"], parent: "sys_src", clk: "io", sw: 1 }
- { name: "spi_host1", gen: true, type: "top", domains: [ "0"], parent: "sys_src", clk: "io_div2", sw: 1 }
- { name: "usb", gen: true, type: "top", domains: [ "0"], parent: "sys_src", clk: "usb", sw: 1 }
- { name: "i2c0", gen: true, type: "top", domains: [ "0"], parent: "sys_src", clk: "io_div2", sw: 1 },
- { name: "i2c1", gen: true, type: "top", domains: [ "0"], parent: "sys_src", clk: "io_div2", sw: 1 },
- { name: "i2c2", gen: true, type: "top", domains: [ "0"], parent: "sys_src", clk: "io_div2", sw: 1 },
+ { name: "por_aon", gen: false, type: "top", clk: "aon" }
+ { name: "lc_src", gen: false, type: "int", clk: "io_div4" }
+ { name: "sys_src", gen: false, type: "int", clk: "io_div4" }
+ { name: "por", gen: true, type: "top", parent: "por_aon", clk: "main" }
+ { name: "por_io", gen: true, type: "top", parent: "por_aon", clk: "io" }
+ { name: "por_io_div2", gen: true, type: "top", parent: "por_aon", clk: "io_div2" }
+ { name: "por_io_div4", gen: true , type: "top", parent: "por_aon", clk: "io_div4" }
+ { name: "por_usb", gen: true, type: "top", parent: "por_aon", clk: "usb" }
+ { name: "lc", gen: true, type: "top", parent: "lc_src", clk: "main" }
+ { name: "lc_io_div4", gen: true, type: "top", parent: "lc_src", clk: "io_div4" }
+ { name: "sys", gen: true, type: "top", parent: "sys_src", clk: "main" }
+ { name: "sys_io_div4", gen: true, type: "top", parent: "sys_src", clk: "io_div4" }
+ { name: "sys_aon", gen: true, type: "top", parent: "sys_src", clk: "aon" }
+ { name: "spi_device", gen: true, type: "top", parent: "sys_src", clk: "io_div2", sw: true }
+ { name: "spi_host0", gen: true, type: "top", parent: "sys_src", clk: "io", sw: true }
+ { name: "spi_host1", gen: true, type: "top", parent: "sys_src", clk: "io_div2", sw: true }
+ { name: "usb", gen: true, type: "top", parent: "sys_src", clk: "usb", sw: true }
+ { name: "i2c0", gen: true, type: "top", parent: "sys_src", clk: "io_div2", sw: true },
+ { name: "i2c1", gen: true, type: "top", parent: "sys_src", clk: "io_div2", sw: true },
+ { name: "i2c2", gen: true, type: "top", parent: "sys_src", clk: "io_div2", sw: true },
]
}
diff --git a/hw/top_earlgrey/ip/xbar_main/data/autogen/xbar_main.gen.hjson b/hw/top_earlgrey/ip/xbar_main/data/autogen/xbar_main.gen.hjson
index 2d3de21..8c601a4 100644
--- a/hw/top_earlgrey/ip/xbar_main/data/autogen/xbar_main.gen.hjson
+++ b/hw/top_earlgrey/ip/xbar_main/data/autogen/xbar_main.gen.hjson
@@ -17,8 +17,8 @@
reset: rst_main_ni
reset_connections:
{
- rst_main_ni: rstmgr_aon_resets.rst_sys_n[rstmgr_pkg::Domain0Sel]
- rst_fixed_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_main_ni: sys
+ rst_fixed_ni: sys_io_div4
}
clock_connections:
{
diff --git a/hw/top_earlgrey/ip/xbar_peri/data/autogen/xbar_peri.gen.hjson b/hw/top_earlgrey/ip/xbar_peri/data/autogen/xbar_peri.gen.hjson
index 946b49a..5800edd 100644
--- a/hw/top_earlgrey/ip/xbar_peri/data/autogen/xbar_peri.gen.hjson
+++ b/hw/top_earlgrey/ip/xbar_peri/data/autogen/xbar_peri.gen.hjson
@@ -16,7 +16,7 @@
reset: rst_peri_ni
reset_connections:
{
- rst_peri_ni: rstmgr_aon_resets.rst_sys_io_div4_n[rstmgr_pkg::Domain0Sel]
+ rst_peri_ni: sys_io_div4
}
clock_connections:
{
diff --git a/util/reggen/ip_block.py b/util/reggen/ip_block.py
index 594e019..06d473c 100644
--- a/util/reggen/ip_block.py
+++ b/util/reggen/ip_block.py
@@ -10,7 +10,7 @@
from .alert import Alert
from .bus_interfaces import BusInterfaces
-from .clocking import Clocking
+from .clocking import Clocking, ClockingItem
from .inter_signal import InterSignal
from .lib import (check_keys, check_name, check_int, check_bool,
check_list, check_optional_str)
@@ -348,3 +348,18 @@
else:
raise ValueError("Signal {} does not exist in IP block {}"
.format(name, self.name))
+
+ def has_shadowed_reg(self) -> bool:
+ '''Return boolean indication whether reg block contains shadowed registers'''
+
+ for rb in self.reg_blocks.values():
+ if rb.has_shadowed_reg():
+ return True
+
+ # if we are here, then no one has has a shadowed register
+ return False
+
+ def get_primary_clock(self) -> ClockingItem:
+ '''Return primary clock of an block'''
+
+ return self.clocking.primary
diff --git a/util/reggen/reg_block.py b/util/reggen/reg_block.py
index 275054a..fa9d3c4 100644
--- a/util/reggen/reg_block.py
+++ b/util/reggen/reg_block.py
@@ -419,3 +419,11 @@
def get_addr_width(self) -> int:
'''Calculate the number of bits to address every byte of the block'''
return (self.offset - 1).bit_length()
+
+ def has_shadowed_reg(self) -> bool:
+ '''Return boolean indication whether reg block contains shadowed reigsters'''
+ for r in self.flat_regs:
+ if r.shadowed:
+ return True
+
+ return False
diff --git a/util/topgen.py b/util/topgen.py
index e5d4cee..b45b7c8 100755
--- a/util/topgen.py
+++ b/util/topgen.py
@@ -588,33 +588,19 @@
outputs.append(rtl_path / Path(x))
# Parameters needed for generation
- clks = []
- output_rsts = OrderedDict()
- sw_rsts = OrderedDict()
- leaf_rsts = OrderedDict()
+ reset_obj = topcfg['resets']
# unique clocks
- for rst in topcfg["resets"]["nodes"]:
- if rst['type'] != "ext" and rst['clk'] not in clks:
- clks.append(rst['clk'])
+ clks = reset_obj.get_clocks()
# resets sent to reset struct
- output_rsts = [
- rst for rst in topcfg["resets"]["nodes"] if rst['type'] == "top"
- ]
+ output_rsts = reset_obj.get_top_resets()
# sw controlled resets
- sw_rsts = [
- rst for rst in topcfg["resets"]["nodes"]
- if 'sw' in rst and rst['sw'] == 1
- ]
+ sw_rsts = reset_obj.get_sw_resets()
# leaf resets
- leaf_rsts = [rst for rst in topcfg["resets"]["nodes"] if rst['gen']]
-
- log.info("output resets {}".format(output_rsts))
- log.info("software resets {}".format(sw_rsts))
- log.info("leaf resets {}".format(leaf_rsts))
+ leaf_rsts = reset_obj.get_generated_resets()
# Number of reset requests
n_rstreqs = len(topcfg["reset_requests"])
@@ -631,7 +617,8 @@
sw_rsts=sw_rsts,
output_rsts=output_rsts,
leaf_rsts=leaf_rsts,
- export_rsts=topcfg['exported_rsts'])
+ export_rsts=topcfg['exported_rsts'],
+ reset_obj=topcfg['resets'])
except: # noqa: E722
log.error(exceptions.text_error_template().render())
diff --git a/util/topgen/c.py b/util/topgen/c.py
index 3af6a25..8de153f 100644
--- a/util/topgen/c.py
+++ b/util/topgen/c.py
@@ -388,16 +388,13 @@
# Enumerates the positions of all software controllable resets
def _init_rstmgr_sw_rsts(self):
- sw_rsts = [
- rst for rst in self.top["resets"]["nodes"]
- if 'sw' in rst and rst['sw'] == 1
- ]
+ sw_rsts = self.top['resets'].get_sw_resets()
enum = CEnum(self._top_name +
Name(["reset", "manager", "sw", "resets"]))
for rst in sw_rsts:
- enum.add_constant(Name.from_snake_case(rst["name"]))
+ enum.add_constant(Name.from_snake_case(rst))
enum.add_last_constant("Last valid rstmgr software reset request")
diff --git a/util/topgen/clocks.py b/util/topgen/clocks.py
index 6103ae2..6c62714 100644
--- a/util/topgen/clocks.py
+++ b/util/topgen/clocks.py
@@ -160,3 +160,10 @@
f'{grp.name}: the given source name is '
f'{src_name}, which is unknown.')
grp.add_clock(clk_name, src)
+
+ def get_clock_by_name(self, name: str) -> object:
+
+ ret = self.all_srcs.get(name)
+ if ret is None:
+ raise ValueError(f'{name} is not a valid clock')
+ return ret
diff --git a/util/topgen/lib.py b/util/topgen/lib.py
index 37e5105..9c228fc 100644
--- a/util/topgen/lib.py
+++ b/util/topgen/lib.py
@@ -11,7 +11,6 @@
from typing import Dict, List, Optional, Tuple
import hjson
-
from reggen.ip_block import IpBlock
# Ignore flake8 warning as the function is used in the template
@@ -290,48 +289,16 @@
return "clk_{}_i".format(clk)
-def get_reset_path(reset, domain, reset_cfg):
+def get_reset_path(reset, domain, top):
"""Return the appropriate reset path given name
"""
- # find matching node for reset
- node_match = [node for node in reset_cfg['nodes'] if node['name'] == reset]
- assert len(node_match) == 1
- reset_type = node_match[0]['type']
-
- # find matching path
- hier_path = ""
- if reset_type == "int":
- log.debug("{} used as internal reset".format(reset["name"]))
- else:
- hier_path = reset_cfg['hier_paths'][reset_type]
-
- # find domain selection
- domain_sel = ''
- if reset_type not in ["ext", "int"]:
- domain_sel = "[rstmgr_pkg::Domain{}Sel]".format(domain)
-
- reset_path = ""
- if reset_type == "ext":
- reset_path = reset
- else:
- reset_path = "{}rst_{}_n{}".format(hier_path, reset, domain_sel)
-
- return reset_path
+ return top['resets'].get_path(reset, domain)
def get_unused_resets(top):
"""Return dict of unused resets and associated domain
"""
- unused_resets = OrderedDict()
- unused_resets = {
- reset['name']: domain
- for reset in top['resets']['nodes']
- for domain in top['power']['domains']
- if reset['type'] == 'top' and domain not in reset['domains']
- }
-
- log.debug("Unused resets are {}".format(unused_resets))
- return unused_resets
+ return top['resets'].get_unused_resets(top['power']['domains'])
def is_templated(module):
@@ -512,7 +479,7 @@
return ''.join(acc)
-def is_rom_ctrl (modules):
+def is_rom_ctrl(modules):
'''Return true if rom_ctrl (and thus boot-up rom integrity checking)
exists in the design
'''
diff --git a/util/topgen/merge.py b/util/topgen/merge.py
index 02d14dd..85aded7 100644
--- a/util/topgen/merge.py
+++ b/util/topgen/merge.py
@@ -11,7 +11,7 @@
from topgen import c, lib
from .clocks import Clocks
-
+from .resets import Resets
from reggen.ip_block import IpBlock
from reggen.params import LocalParam, Parameter, RandParameter, MemSizeParameter
@@ -605,17 +605,36 @@
top['inter_module']['connect']['{}.idle'.format(clkmgr_name)].append(entry)
-def amend_resets(top):
+def amend_resets(top, name_to_block):
"""Generate exported reset structure and automatically connect to
intermodule.
+
+ Also iterate through and determine need for shadowed reset and
+ domains.
"""
+ top_resets = Resets(top['resets'], top['clocks'])
rstmgr_name = _find_module_name(top['module'], 'rstmgr')
# Generate exported reset list
exported_rsts = OrderedDict()
for module in top["module"]:
+ block = name_to_block[module['type']]
+ block_clock = block.get_primary_clock()
+ primary_reset = module['reset_connections'][block_clock.reset]
+
+ # shadowed determination
+ if block.has_shadowed_reg():
+ top_resets.mark_reset_shadowed(primary_reset)
+
+ # domain determination
+ if module['domain']:
+ for r in block.clocking.items:
+ if r.reset:
+ reset = module['reset_connections'][r.reset]
+ top_resets.add_reset_domain(reset, module['domain'])
+
# This code is here to ensure if amend_clocks/resets switched order
# everything would still work
export_if = module.get('clock_reset_export', [])
@@ -637,38 +656,12 @@
for intf in top['exported_rsts']:
top['inter_module']['external']['{}.resets_{}'.format(
rstmgr_name, intf)] = "rsts_{}".format(intf)
- """Discover the full path and selection to each reset connection.
- This is done by modifying the reset connection of each end point.
- """
- for end_point in top['module'] + top['memory'] + top['xbar']:
- for port, net in end_point['reset_connections'].items():
- reset_path = lib.get_reset_path(net, end_point['domain'],
- top['resets'])
- end_point['reset_connections'][port] = reset_path
- # reset paths are still needed temporarily until host only modules are properly automated
- reset_paths = OrderedDict()
- reset_hiers = top["resets"]['hier_paths']
+ # reset class objects
+ top["resets"] = top_resets
- for reset in top["resets"]["nodes"]:
- if "type" not in reset:
- log.error("{} missing type field".format(reset["name"]))
- return
-
- if reset["type"] == "top":
- reset_paths[reset["name"]] = "{}rst_{}_n".format(
- reset_hiers["top"], reset["name"])
-
- elif reset["type"] == "ext":
- reset_paths[reset["name"]] = reset_hiers["ext"] + reset['name']
- elif reset["type"] == "int":
- log.info("{} used as internal reset".format(reset["name"]))
- else:
- log.error("{} type is invalid".format(reset["type"]))
-
- top["reset_paths"] = reset_paths
-
- return
+ # The original resets dict is transformed to the reset class
+ assert isinstance(top["resets"], Resets)
def ensure_interrupt_modules(top: OrderedDict, name_to_block: Dict[str, IpBlock]):
@@ -1049,7 +1042,7 @@
# Add path names to declared resets.
# Declare structure for exported resets.
- amend_resets(topcfg)
+ amend_resets(topcfg, name_to_block)
# remove unwanted fields 'debug_mem_base_addr'
topcfg.pop('debug_mem_base_addr', None)
diff --git a/util/topgen/resets.py b/util/topgen/resets.py
new file mode 100644
index 0000000..883de53
--- /dev/null
+++ b/util/topgen/resets.py
@@ -0,0 +1,182 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+from typing import Dict, Optional
+from .clocks import Clocks
+
+
+class ResetItem:
+ '''Individual resets'''
+ def __init__(self, hier: Dict[str, str], raw: Dict[str, object], clocks: Clocks):
+ if not raw['name']:
+ raise ValueError('Reset has no name')
+
+ self.name = raw['name']
+ self.gen = raw.get('gen', True)
+ self.rst_type = raw.get('type', 'top')
+
+ self.path = ""
+ if self.rst_type == 'top':
+ self.path = f"{hier['top']}rst_{self.name}_n"
+ elif self.rst_type == 'ext':
+ self.path = f"{hier['ext']}{self.name}"
+
+ # to be constructed later
+ self.domains = []
+ self.shadowed = False
+
+ self.parent = raw.get('parent', "")
+
+ # This can be a source clock or a derived source
+ if self.rst_type != 'ext':
+ self.clock = clocks.get_clock_by_name(raw['clk'])
+ else:
+ self.clock = None
+
+ self.sw = bool(raw.get('sw', 0))
+
+ def _asdict(self) -> Dict[str, object]:
+ ret = {
+ 'name': self.name,
+ 'gen': self.gen,
+ 'type': self.rst_type,
+ 'domains': self.domains,
+ 'shadowed': self.shadowed,
+ 'sw': self.sw,
+ 'path': self.path
+ }
+
+ if self.parent:
+ ret['parent'] = self.parent
+
+ if self.clock:
+ ret['clock'] = self.clock.name
+
+ return ret
+
+
+class Resets:
+ '''Resets for the chip'''
+ def __init__(self, raw: Dict[str, object], clocks: Clocks):
+ self.hier_paths = {}
+ assert isinstance(raw['hier_paths'], dict)
+ for rst_src, path in raw['hier_paths'].items():
+ self.hier_paths[str(rst_src)] = str(path)
+
+ assert isinstance(raw['nodes'], list)
+
+ self.nodes = {}
+ for node in raw['nodes']:
+ assert isinstance(node, dict)
+ reset = ResetItem(self.hier_paths, node, clocks)
+ self.nodes[reset.name] = reset
+
+ def _asdict(self) -> Dict[str, object]:
+ ret = {
+ 'hier_paths': self.hier_paths,
+ 'nodes': list(self.nodes.values())
+ }
+
+ return ret
+
+ def get_reset_by_name(self, name: str) -> ResetItem:
+
+ ret = self.nodes.get(name, None)
+ if ret:
+ return ret
+ else:
+ raise ValueError(f'{name} is not a defined reset')
+
+ def mark_reset_shadowed(self, name: str):
+ '''Mark particular reset as requiring shadow'''
+
+ reset = self.get_reset_by_name(name)
+ reset.shadowed = True
+
+ def get_reset_domains(self, name: str):
+ '''Get available domains for a reset'''
+
+ return self.get_reset_by_name(name).domains
+
+ def get_clocks(self) -> list:
+ '''Get associated clocks'''
+
+ clocks = {}
+ for reset in self.nodes.values():
+ if reset.rst_type != 'ext':
+ clocks[reset.clock.name] = 1
+
+ return clocks.keys()
+
+ def get_generated_resets(self) -> Dict[str, object]:
+ '''Get generated resets and return dict with
+ with related clock
+ '''
+
+ ret = []
+ for reset in self.nodes.values():
+ if reset.gen:
+ entry = {}
+ entry['name'] = reset.name
+ entry['clk'] = reset.clock.name
+ entry['parent'] = reset.parent
+ entry['sw'] = reset.sw
+ ret.append(entry)
+
+ return ret
+
+ def get_top_resets(self) -> list:
+ '''Get resets pushed to the top level'''
+
+ return [reset.name
+ for reset in self.nodes.values()
+ if reset.rst_type == 'top']
+
+ def get_sw_resets(self) -> list:
+ '''Get software controlled resets'''
+
+ return [reset.name
+ for reset in self.nodes.values()
+ if reset.sw]
+
+ def get_path(self, name: str, domain: Optional[str]) -> str:
+ '''Get path to reset'''
+
+ reset = self.get_reset_by_name(name)
+ if reset.rst_type == 'int':
+ raise ValueError(f'Reset {name} is not a reset exported from rstmgr')
+
+ if reset.rst_type == 'ext':
+ return reset.path
+
+ # if a generated reset
+ if domain:
+ return f'{reset.path}[rstmgr_pkg::Domain{domain}Sel]'
+ else:
+ return reset.path
+
+ def get_unused_resets(self, domains: list) -> Dict[str, str]:
+ '''Get unused resets'''
+
+ top_resets = [reset
+ for reset in self.nodes.values()
+ if reset.rst_type == 'top']
+
+ ret = {}
+ for reset in top_resets:
+ for dom in domains:
+ if dom not in reset.domains:
+ ret[reset.name] = dom
+
+ return ret
+
+ def add_reset_domain(self, name: str, domain: str):
+ '''Mark particular reset as requiring shadow'''
+
+ reset = self.get_reset_by_name(name)
+
+ # Other reset types of hardwired domains
+ if reset.rst_type == 'top':
+ if domain not in reset.domains:
+ reset.domains.append(domain)
diff --git a/util/topgen/templates/chiplevel.sv.tpl b/util/topgen/templates/chiplevel.sv.tpl
index 8fceee6..585ad46 100644
--- a/util/topgen/templates/chiplevel.sv.tpl
+++ b/util/topgen/templates/chiplevel.sv.tpl
@@ -63,12 +63,7 @@
max_sigwidth = len("{}".format(max_sigwidth))
cpu_clk = top['clocks'].hier_paths['top'] + "clk_proc_main"
-cpu_rst = top["reset_paths"]["sys"]
-dm_rst = top["reset_paths"]["lc"]
-esc_clk = top['clocks'].hier_paths['top'] + "clk_io_div4_timers"
-esc_rst = top["reset_paths"]["sys_io_div4"]
-unused_resets = lib.get_unused_resets(top)
unused_im_defs, undriven_im_defs = lib.get_dangling_im_def(top["inter_signal"]["definitions"])
%>\
diff --git a/util/topgen/templates/toplevel.sv.tpl b/util/topgen/templates/toplevel.sv.tpl
index ce897ed..61d356e 100644
--- a/util/topgen/templates/toplevel.sv.tpl
+++ b/util/topgen/templates/toplevel.sv.tpl
@@ -26,10 +26,6 @@
max_sigwidth = len("{}".format(max_sigwidth))
cpu_clk = top['clocks'].hier_paths['top'] + "clk_proc_main"
-cpu_rst = top["reset_paths"]["sys"]
-dm_rst = top["reset_paths"]["lc"]
-esc_clk = top['clocks'].hier_paths['top'] + "clk_io_div4_timers"
-esc_rst = top["reset_paths"]["sys_io_div4"]
unused_resets = lib.get_unused_resets(top)
unused_im_defs, undriven_im_defs = lib.get_dangling_im_def(top["inter_signal"]["definitions"])
@@ -268,7 +264,7 @@
logic unused_d${v.lower()}_rst_${k};
% endfor
% for k, v in unused_resets.items():
- assign unused_d${v.lower()}_rst_${k} = ${lib.get_reset_path(k, v, top['resets'])};
+ assign unused_d${v.lower()}_rst_${k} = ${lib.get_reset_path(k, v, top)};
% endfor
// ibex specific assignments
@@ -335,8 +331,8 @@
% for key in clocks:
.${key} (${clocks[key]}),
% endfor
- % for key, value in resets.items():
- .${key} (${value}),
+ % for port, reset in resets.items():
+ .${port} (${lib.get_reset_path(reset, m['domain'], top)}),
% endfor
.tl_i (${m["name"]}_tl_req),
.tl_o (${m["name"]}_tl_rsp),
@@ -370,8 +366,8 @@
% for key in clocks:
.${key} (${clocks[key]}),
% endfor
- % for key, value in resets.items():
- .${key} (${value}),
+ % for port, reset in resets.items():
+ .${port} (${lib.get_reset_path(reset, m['domain'], top)}),
% endfor
.key_valid_i (${m["inter_signal_list"][1]["top_signame"]}_req.valid),
@@ -425,8 +421,8 @@
% for key in clocks:
.${key} (${clocks[key]}),
% endfor
- % for key, value in resets.items():
- .${key} (${value}),
+ % for port, reset in resets.items():
+ .${port} (${lib.get_reset_path(reset, m['domain'], top)}),
% endfor
.tl_i (${m["name"]}_tl_req),
@@ -453,8 +449,8 @@
% for key in clocks:
.${key} (${clocks[key]}),
% endfor
- % for key, value in resets.items():
- .${key} (${value}),
+ % for port, reset in resets.items():
+ .${port} (${lib.get_reset_path(reset, m['domain'], top)}),
% endfor
.req_i (${m["name"]}_req),
.addr_i (${m["name"]}_addr),
@@ -488,8 +484,8 @@
% for key in clocks:
.${key} (${clocks[key]}),
% endfor
- % for key, value in resets.items():
- .${key} (${value}),
+ % for port, reset in resets.items():
+ .${port} (${lib.get_reset_path(reset, m['domain'], top)}),
% endfor
.tl_i (${m["name"]}_tl_req),
@@ -512,8 +508,8 @@
% for key in clocks:
.${key} (${clocks[key]}),
% endfor
- % for key, value in resets.items():
- .${key} (${value}),
+ % for port, reset in resets.items():
+ .${port} (${lib.get_reset_path(reset, m['domain'], top)}),
% endfor
.host_req_i (flash_host_req),
.host_intg_err_i (flash_host_intg_err),
@@ -664,8 +660,8 @@
% for k, v in m["clock_connections"].items():
.${k} (${v}),
% endfor
- % for k, v in m["reset_connections"].items():
- .${k} (${v})${"," if not loop.last else ""}
+ % for port, reset in m["reset_connections"].items():
+ .${port} (${lib.get_reset_path(reset, m['domain'], top)})${"," if not loop.last else ""}
% endfor
);
@@ -689,8 +685,8 @@
% for k, v in xbar["clock_connections"].items():
.${k} (${v}),
% endfor
- % for k, v in xbar["reset_connections"].items():
- .${k} (${v}),
+ % for port, reset in xbar["reset_connections"].items():
+ .${port} (${lib.get_reset_path(reset, xbar["domain"], top)}),
% endfor
## Inter-module signal
diff --git a/util/topgen/validate.py b/util/topgen/validate.py
index 6879ee1..4152a6b 100644
--- a/util/topgen/validate.py
+++ b/util/topgen/validate.py
@@ -752,21 +752,6 @@
error += 1
return error
- # check that power domain definition is consistent with reset and module definition
- for reset in top['resets']['nodes']:
- if reset['gen']:
- if 'domains' not in reset:
- log.error("{} missing domain definition".format(reset['name']))
- error += 1
- return error
- else:
- for domain in reset['domains']:
- if domain not in top['power']['domains']:
- log.error("{} defined invalid domain {}".format(
- reset['name'], domain))
- error += 1
- return error
-
# Check that each module, xbar, memory has a power domain defined.
# If not, give it a default.
# If there is one defined, check that it is a valid definition