lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 1 | // Copyright lowRISC contributors. |
| 2 | // Licensed under the Apache License, Version 2.0, see LICENSE for details. |
| 3 | // SPDX-License-Identifier: Apache-2.0 |
| 4 | |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 5 | `include "prim_assert.sv" |
| 6 | |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 7 | /** |
| 8 | * Tile-Link UL adapter for SRAM-like devices |
| 9 | * |
| 10 | * - Intentionally omitted BaseAddr in case of multiple memory maps are used in a SoC, |
Scott Johnson | fe79c4b | 2020-07-08 10:31:08 -0700 | [diff] [blame] | 11 | * it means that aliasing can happen if target device size in TL-UL crossbar is bigger |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 12 | * than SRAM size |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 13 | * - At most one of EnableDataIntgGen / EnableDataIntgPt can be enabled. However it |
| 14 | * possible for both to be disabled. |
| 15 | * A module can neither generate an integrity response nor pass through any pre-existing |
| 16 | * integrity. This might be the case for non-security critical memories where there is |
| 17 | * no stored integrity AND another entity upstream is already generating returning integrity. |
| 18 | * There is however no case where EnableDataIntgGen and EnableDataIntgPt are both true. |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 19 | */ |
Timothy Chen | 1a9a60f | 2021-02-10 18:04:39 -0800 | [diff] [blame] | 20 | module tlul_adapter_sram import tlul_pkg::*; #( |
Timothy Chen | 12cce14 | 2021-03-02 18:11:01 -0800 | [diff] [blame] | 21 | parameter int SramAw = 12, |
| 22 | parameter int SramDw = 32, // Must be multiple of the TL width |
| 23 | parameter int Outstanding = 1, // Only one request is accepted |
| 24 | parameter bit ByteAccess = 1, // 1: true, 0: false |
| 25 | parameter bit ErrOnWrite = 0, // 1: Writes not allowed, automatically error |
| 26 | parameter bit ErrOnRead = 0, // 1: Reads not allowed, automatically error |
| 27 | parameter bit CmdIntgCheck = 0, // 1: Enable command integrity check |
| 28 | parameter bit EnableRspIntgGen = 0, // 1: Generate response integrity |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 29 | parameter bit EnableDataIntgGen = 0, // 1: Generate response data integrity |
| 30 | parameter bit EnableDataIntgPt = 0, // 1: Passthrough command/response data integrity |
| 31 | localparam int WidthMult = SramDw / top_pkg::TL_DW, |
| 32 | localparam int IntgWidth = tlul_pkg::DataIntgWidth * WidthMult, |
| 33 | localparam int DataOutW = EnableDataIntgPt ? SramDw + IntgWidth : SramDw |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 34 | ) ( |
| 35 | input clk_i, |
| 36 | input rst_ni, |
| 37 | |
| 38 | // TL-UL interface |
Timothy Chen | 1a9a60f | 2021-02-10 18:04:39 -0800 | [diff] [blame] | 39 | input tl_h2d_t tl_i, |
| 40 | output tl_d2h_t tl_o, |
| 41 | |
| 42 | // control interface |
| 43 | input tl_instr_en_e en_ifetch_i, |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 44 | |
| 45 | // SRAM interface |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 46 | output logic req_o, |
Timothy Chen | 4798f27 | 2021-03-24 17:19:42 -0700 | [diff] [blame] | 47 | output tl_type_e req_type_o, |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 48 | input gnt_i, |
| 49 | output logic we_o, |
| 50 | output logic [SramAw-1:0] addr_o, |
| 51 | output logic [DataOutW-1:0] wdata_o, |
| 52 | output logic [DataOutW-1:0] wmask_o, |
| 53 | output logic intg_error_o, |
| 54 | input [DataOutW-1:0] rdata_i, |
| 55 | input rvalid_i, |
| 56 | input [1:0] rerror_i // 2 bit error [1]: Uncorrectable, [0]: Correctable |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 57 | ); |
| 58 | |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 59 | localparam int SramByte = SramDw/8; |
| 60 | localparam int DataBitWidth = prim_util_pkg::vbits(SramByte); |
Timothy Chen | 8874eb6 | 2020-07-09 16:27:37 -0700 | [diff] [blame] | 61 | localparam int WoffsetWidth = (SramByte == top_pkg::TL_DBW) ? 1 : |
| 62 | DataBitWidth - prim_util_pkg::vbits(top_pkg::TL_DBW); |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 63 | |
| 64 | typedef struct packed { |
| 65 | logic [top_pkg::TL_DBW-1:0] mask ; // Byte mask within the TL-UL word |
| 66 | logic [WoffsetWidth-1:0] woffset ; // Offset of the TL-UL word within the SRAM word |
| 67 | } sram_req_t ; |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 68 | |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 69 | typedef enum logic [1:0] { |
| 70 | OpWrite, |
| 71 | OpRead, |
| 72 | OpUnknown |
| 73 | } req_op_e ; |
| 74 | |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 75 | typedef struct packed { |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 76 | req_op_e op ; |
| 77 | logic error ; |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 78 | logic [top_pkg::TL_SZW-1:0] size ; |
| 79 | logic [top_pkg::TL_AIW-1:0] source ; |
| 80 | } req_t ; |
| 81 | |
| 82 | typedef struct packed { |
Rupert Swarbrick | 2405f7f | 2021-01-26 10:36:01 +0000 | [diff] [blame] | 83 | logic [top_pkg::TL_DW-1:0] data ; |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 84 | logic [DataIntgWidth-1:0] data_intg ; |
Rupert Swarbrick | 2405f7f | 2021-01-26 10:36:01 +0000 | [diff] [blame] | 85 | logic error ; |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 86 | } rsp_t ; |
| 87 | |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 88 | localparam int SramReqFifoWidth = $bits(sram_req_t) ; |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 89 | localparam int ReqFifoWidth = $bits(req_t) ; |
| 90 | localparam int RspFifoWidth = $bits(rsp_t) ; |
| 91 | |
| 92 | // FIFO signal in case OutStand is greater than 1 |
| 93 | // If request is latched, {write, source} is pushed to req fifo. |
| 94 | // Req fifo is popped when D channel is acknowledged (v & r) |
| 95 | // D channel valid is asserted if it is write request or rsp fifo not empty if read. |
| 96 | logic reqfifo_wvalid, reqfifo_wready; |
| 97 | logic reqfifo_rvalid, reqfifo_rready; |
| 98 | req_t reqfifo_wdata, reqfifo_rdata; |
| 99 | |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 100 | logic sramreqfifo_wvalid, sramreqfifo_wready; |
| 101 | logic sramreqfifo_rready; |
| 102 | sram_req_t sramreqfifo_wdata, sramreqfifo_rdata; |
Michael Schaffner | df17bc8 | 2020-05-04 17:19:09 -0700 | [diff] [blame] | 103 | |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 104 | logic rspfifo_wvalid, rspfifo_wready; |
| 105 | logic rspfifo_rvalid, rspfifo_rready; |
| 106 | rsp_t rspfifo_wdata, rspfifo_rdata; |
| 107 | |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 108 | logic error_internal; // Internal protocol error checker |
Timothy Chen | 12cce14 | 2021-03-02 18:11:01 -0800 | [diff] [blame] | 109 | logic intg_error; |
Timothy Chen | 5aec528 | 2019-09-10 21:10:56 -0700 | [diff] [blame] | 110 | logic wr_attr_error; |
Timothy Chen | 1a9a60f | 2021-02-10 18:04:39 -0800 | [diff] [blame] | 111 | logic instr_error; |
Timothy Chen | 5aec528 | 2019-09-10 21:10:56 -0700 | [diff] [blame] | 112 | logic wr_vld_error; |
Timothy Chen | 4ce485f | 2019-09-30 22:36:26 -0700 | [diff] [blame] | 113 | logic rd_vld_error; |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 114 | logic tlul_error; // Error from `tlul_err` module |
| 115 | |
Michael Schaffner | 02ac5b1 | 2020-05-06 18:55:29 -0700 | [diff] [blame] | 116 | logic a_ack, d_ack, sram_ack; |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 117 | assign a_ack = tl_i.a_valid & tl_o.a_ready ; |
| 118 | assign d_ack = tl_o.d_valid & tl_i.d_ready ; |
Michael Schaffner | 02ac5b1 | 2020-05-06 18:55:29 -0700 | [diff] [blame] | 119 | assign sram_ack = req_o & gnt_i ; |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 120 | |
| 121 | // Valid handling |
| 122 | logic d_valid, d_error; |
| 123 | always_comb begin |
| 124 | d_valid = 1'b0; |
| 125 | |
| 126 | if (reqfifo_rvalid) begin |
| 127 | if (reqfifo_rdata.error) begin |
| 128 | // Return error response. Assume no request went out to SRAM |
| 129 | d_valid = 1'b1; |
| 130 | end else if (reqfifo_rdata.op == OpRead) begin |
| 131 | d_valid = rspfifo_rvalid; |
| 132 | end else begin |
| 133 | // Write without error |
| 134 | d_valid = 1'b1; |
| 135 | end |
| 136 | end else begin |
| 137 | d_valid = 1'b0; |
| 138 | end |
| 139 | end |
| 140 | |
| 141 | always_comb begin |
| 142 | d_error = 1'b0; |
| 143 | |
| 144 | if (reqfifo_rvalid) begin |
| 145 | if (reqfifo_rdata.op == OpRead) begin |
| 146 | d_error = rspfifo_rdata.error | reqfifo_rdata.error; |
| 147 | end else begin |
| 148 | d_error = reqfifo_rdata.error; |
| 149 | end |
| 150 | end else begin |
| 151 | d_error = 1'b0; |
| 152 | end |
| 153 | end |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 154 | |
Timothy Chen | 12cce14 | 2021-03-02 18:11:01 -0800 | [diff] [blame] | 155 | |
| 156 | tl_d2h_t tl_out; |
| 157 | assign tl_out = '{ |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 158 | d_valid : d_valid , |
Michael Schaffner | b6b1c78 | 2019-12-18 15:28:30 -0800 | [diff] [blame] | 159 | d_opcode : (d_valid && reqfifo_rdata.op != OpRead) ? AccessAck : AccessAckData, |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 160 | d_param : '0, |
Michael Schaffner | b6b1c78 | 2019-12-18 15:28:30 -0800 | [diff] [blame] | 161 | d_size : (d_valid) ? reqfifo_rdata.size : '0, |
| 162 | d_source : (d_valid) ? reqfifo_rdata.source : '0, |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 163 | d_sink : 1'b0, |
Cindy Chen | ae02f9a | 2020-01-14 11:10:16 -0800 | [diff] [blame] | 164 | d_data : (d_valid && rspfifo_rvalid && reqfifo_rdata.op == OpRead) |
Rupert Swarbrick | 2405f7f | 2021-01-26 10:36:01 +0000 | [diff] [blame] | 165 | ? rspfifo_rdata.data : '0, |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 166 | d_user : '{default: '1, data_intg: d_valid ? rspfifo_rdata.data_intg : '1}, |
Timothy Chen | 712e06f | 2020-05-01 18:48:23 -0700 | [diff] [blame] | 167 | d_error : d_valid && d_error, |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 168 | |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 169 | a_ready : (gnt_i | error_internal) & reqfifo_wready & sramreqfifo_wready |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 170 | }; |
| 171 | |
Timothy Chen | 12cce14 | 2021-03-02 18:11:01 -0800 | [diff] [blame] | 172 | |
| 173 | tlul_rsp_intg_gen #( |
| 174 | .EnableRspIntgGen(EnableRspIntgGen), |
| 175 | .EnableDataIntgGen(EnableDataIntgGen) |
| 176 | ) u_rsp_gen ( |
| 177 | .tl_i(tl_out), |
| 178 | .tl_o |
| 179 | ); |
| 180 | |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 181 | // a_ready depends on the FIFO full condition and grant from SRAM (or SRAM arbiter) |
| 182 | // assemble response, including read response, write response, and error for unsupported stuff |
| 183 | |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 184 | // Output to SRAM: |
| 185 | // Generate request only when no internal error occurs. If error occurs, the request should be |
| 186 | // dropped and returned error response to the host. So, error to be pushed to reqfifo. |
| 187 | // In this case, it is assumed the request is granted (may cause ordering issue later?) |
Timothy Chen | 4798f27 | 2021-03-24 17:19:42 -0700 | [diff] [blame] | 188 | assign req_o = tl_i.a_valid & reqfifo_wready & ~error_internal; |
| 189 | assign req_type_o = tl_i.a_user.tl_type; |
| 190 | assign we_o = tl_i.a_valid & logic'(tl_i.a_opcode inside {PutFullData, PutPartialData}); |
| 191 | assign addr_o = (tl_i.a_valid) ? tl_i.a_address[DataBitWidth+:SramAw] : '0; |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 192 | |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 193 | // Support SRAMs wider than the TL-UL word width by mapping the parts of the |
| 194 | // TL-UL address which are more fine-granular than the SRAM width to the |
| 195 | // SRAM write mask. |
| 196 | logic [WoffsetWidth-1:0] woffset; |
| 197 | if (top_pkg::TL_DW != SramDw) begin : gen_wordwidthadapt |
| 198 | assign woffset = tl_i.a_address[DataBitWidth-1:prim_util_pkg::vbits(top_pkg::TL_DBW)]; |
| 199 | end else begin : gen_no_wordwidthadapt |
| 200 | assign woffset = '0; |
| 201 | end |
Eunchan Kim | aa7c7dd | 2019-09-10 12:09:37 -0700 | [diff] [blame] | 202 | |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 203 | // The size of the data/wmask depends on whether passthrough integrity is enabled. |
| 204 | // If passthrough integrity is enabled, the data is concatenated with the integrity passed through |
| 205 | // the user bits. Otherwise, it is the data only. |
| 206 | localparam int DataWidth = EnableDataIntgPt ? top_pkg::TL_DW + DataIntgWidth : top_pkg::TL_DW; |
| 207 | |
| 208 | // Final combined wmask / wdata |
| 209 | logic [WidthMult-1:0][DataWidth-1:0] wmask_combined; |
| 210 | logic [WidthMult-1:0][DataWidth-1:0] wdata_combined; |
| 211 | |
| 212 | // Original tlul portion |
Timothy Chen | 8874eb6 | 2020-07-09 16:27:37 -0700 | [diff] [blame] | 213 | logic [WidthMult-1:0][top_pkg::TL_DW-1:0] wmask_int; |
| 214 | logic [WidthMult-1:0][top_pkg::TL_DW-1:0] wdata_int; |
| 215 | |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 216 | // Integrity portion |
| 217 | logic [WidthMult-1:0][DataIntgWidth-1:0] wmask_intg; |
| 218 | logic [WidthMult-1:0][DataIntgWidth-1:0] wdata_intg; |
| 219 | |
Eunchan Kim | aa7c7dd | 2019-09-10 12:09:37 -0700 | [diff] [blame] | 220 | always_comb begin |
Timothy Chen | 8874eb6 | 2020-07-09 16:27:37 -0700 | [diff] [blame] | 221 | wmask_int = '0; |
| 222 | wdata_int = '0; |
| 223 | |
| 224 | if (tl_i.a_valid) begin |
| 225 | for (int i = 0 ; i < top_pkg::TL_DW/8 ; i++) begin |
| 226 | wmask_int[woffset][8*i +: 8] = {8{tl_i.a_mask[i]}}; |
| 227 | wdata_int[woffset][8*i +: 8] = (tl_i.a_mask[i] && we_o) ? tl_i.a_data[8*i+:8] : '0; |
| 228 | end |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 229 | end |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 230 | end |
| 231 | |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 232 | // TODO: The logic below is incomplete. If the adapter detects a write is NOT |
| 233 | // the full word, it must read back the other parts of the data from memory and |
| 234 | // re-generate the integrity. |
| 235 | // Since that will cause back-pressure to the upstream agent and likely substantial |
| 236 | // change into this module, it is left to a different PR. |
| 237 | always_comb begin |
| 238 | wmask_intg = '0; |
| 239 | wdata_intg = '0; |
| 240 | |
| 241 | if (tl_i.a_valid) begin |
| 242 | wmask_intg[woffset] = '1; |
| 243 | wdata_intg[woffset] = tl_i.a_user.data_intg; |
| 244 | end |
| 245 | end |
| 246 | |
| 247 | for (genvar i = 0; i < WidthMult; i++) begin : gen_write_output |
| 248 | if (EnableDataIntgPt) begin : gen_combined_output |
| 249 | assign wmask_combined[i] = {wmask_intg[i], wmask_int[i]}; |
| 250 | assign wdata_combined[i] = {wdata_intg[i], wdata_int[i]}; |
| 251 | end else begin : gen_ft_output |
| 252 | logic unused_w; |
| 253 | assign wmask_combined[i] = wmask_int[i]; |
| 254 | assign wdata_combined[i] = wdata_int[i]; |
| 255 | assign unused_w = |wmask_intg & |wdata_intg; |
| 256 | end |
| 257 | end |
| 258 | |
| 259 | assign wmask_o = wmask_combined; |
| 260 | assign wdata_o = wdata_combined; |
| 261 | |
Timothy Chen | 8874eb6 | 2020-07-09 16:27:37 -0700 | [diff] [blame] | 262 | |
Michael Schaffner | 5546c5a | 2019-10-31 16:11:37 -0700 | [diff] [blame] | 263 | // Begin: Request Error Detection |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 264 | |
| 265 | // wr_attr_error: Check if the request size,mask are permitted. |
| 266 | // Basic check of size, mask, addr align is done in tlul_err module. |
| 267 | // Here it checks any partial write if ByteAccess isn't allowed. |
Philipp Wagner | 823ed75 | 2021-02-16 13:49:06 +0000 | [diff] [blame] | 268 | assign wr_attr_error = (tl_i.a_opcode == PutFullData || tl_i.a_opcode == PutPartialData) |
| 269 | ? ((ByteAccess == 0) ? (tl_i.a_mask != '1 || tl_i.a_size != 2'h2) : 1'b0) |
| 270 | : 1'b0; |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 271 | |
Timothy Chen | 1a9a60f | 2021-02-10 18:04:39 -0800 | [diff] [blame] | 272 | // An instruction type transaction is only valid if en_ifetch is enabled |
| 273 | assign instr_error = tl_i.a_user.tl_type == InstrType & |
| 274 | en_ifetch_i == InstrDis; |
| 275 | |
Timothy Chen | 5aec528 | 2019-09-10 21:10:56 -0700 | [diff] [blame] | 276 | if (ErrOnWrite == 1) begin : gen_no_writes |
| 277 | assign wr_vld_error = tl_i.a_opcode != Get; |
| 278 | end else begin : gen_writes_allowed |
| 279 | assign wr_vld_error = 1'b0; |
| 280 | end |
| 281 | |
Timothy Chen | 4ce485f | 2019-09-30 22:36:26 -0700 | [diff] [blame] | 282 | if (ErrOnRead == 1) begin: gen_no_reads |
| 283 | assign rd_vld_error = tl_i.a_opcode == Get; |
| 284 | end else begin : gen_reads_allowed |
| 285 | assign rd_vld_error = 1'b0; |
| 286 | end |
| 287 | |
Timothy Chen | 12cce14 | 2021-03-02 18:11:01 -0800 | [diff] [blame] | 288 | if (CmdIntgCheck) begin : gen_cmd_intg_check |
| 289 | tlul_cmd_intg_chk u_cmd_intg_chk ( |
| 290 | .tl_i, |
| 291 | .err_o () |
| 292 | ); |
| 293 | |
| 294 | // TODO, hook up err_o once memory initialization is done |
| 295 | assign intg_error = '0; |
| 296 | end else begin : gen_no_cmd_intg_check |
| 297 | assign intg_error = '0; |
| 298 | end |
| 299 | |
| 300 | |
| 301 | // permanently latch integrity error until reset |
| 302 | logic intg_error_q; |
| 303 | always_ff @(posedge clk_i or negedge rst_ni) begin |
| 304 | if (!rst_ni) begin |
| 305 | intg_error_q <= '0; |
| 306 | end else if (intg_error) begin |
| 307 | intg_error_q <= 1'b1; |
| 308 | end |
| 309 | end |
| 310 | assign intg_error_o = intg_error_q; |
| 311 | |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 312 | tlul_err u_err ( |
| 313 | .clk_i, |
| 314 | .rst_ni, |
| 315 | .tl_i, |
| 316 | .err_o (tlul_error) |
| 317 | ); |
Timothy Chen | 5aec528 | 2019-09-10 21:10:56 -0700 | [diff] [blame] | 318 | |
Timothy Chen | 12cce14 | 2021-03-02 18:11:01 -0800 | [diff] [blame] | 319 | assign error_internal = wr_attr_error | wr_vld_error | rd_vld_error | instr_error | |
| 320 | tlul_error | intg_error | intg_error_q; |
Michael Schaffner | 5546c5a | 2019-10-31 16:11:37 -0700 | [diff] [blame] | 321 | // End: Request Error Detection |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 322 | |
| 323 | assign reqfifo_wvalid = a_ack ; // Push to FIFO only when granted |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 324 | assign reqfifo_wdata = '{ |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 325 | op: (tl_i.a_opcode != Get) ? OpWrite : OpRead, // To return AccessAck for opcode error |
| 326 | error: error_internal, |
| 327 | size: tl_i.a_size, |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 328 | source: tl_i.a_source |
| 329 | }; // Store the request only. Doesn't have to store data |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 330 | assign reqfifo_rready = d_ack ; |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 331 | |
Michael Schaffner | df17bc8 | 2020-05-04 17:19:09 -0700 | [diff] [blame] | 332 | // push together with ReqFIFO, pop upon returning read |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 333 | assign sramreqfifo_wdata = '{ |
| 334 | mask : tl_i.a_mask, |
| 335 | woffset : woffset |
| 336 | }; |
| 337 | assign sramreqfifo_wvalid = sram_ack & ~we_o; |
| 338 | assign sramreqfifo_rready = rspfifo_wvalid; |
Michael Schaffner | df17bc8 | 2020-05-04 17:19:09 -0700 | [diff] [blame] | 339 | |
Timothy Chen | b195695 | 2019-09-20 17:02:56 -0700 | [diff] [blame] | 340 | assign rspfifo_wvalid = rvalid_i & reqfifo_rvalid; |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 341 | |
| 342 | // Make sure only requested bytes are forwarded |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 343 | logic [WidthMult-1:0][DataWidth-1:0] rdata; |
| 344 | logic [WidthMult-1:0][DataWidth-1:0] rmask; |
| 345 | logic [DataWidth-1:0] rdata_tlword; |
Timothy Chen | 8874eb6 | 2020-07-09 16:27:37 -0700 | [diff] [blame] | 346 | |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 347 | // When passing through data integrity, we must feedback the entire |
| 348 | // read data, otherwise the stored integrity will not calculate correctly |
| 349 | if (EnableDataIntgPt) begin : gen_no_rmask |
| 350 | assign rmask = {DataOutW{|sramreqfifo_rdata.mask}}; |
| 351 | end else begin : gen_rmask |
| 352 | always_comb begin |
| 353 | rmask = '0; |
| 354 | for (int i = 0 ; i < top_pkg::TL_DW/8 ; i++) begin |
| 355 | rmask[sramreqfifo_rdata.woffset][8*i +: 8] = {8{sramreqfifo_rdata.mask[i]}}; |
| 356 | end |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 357 | end |
| 358 | end |
Timothy Chen | 8874eb6 | 2020-07-09 16:27:37 -0700 | [diff] [blame] | 359 | |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 360 | assign rdata = rdata_i & rmask; |
Michael Schaffner | 5f6d00f | 2020-09-15 17:32:21 -0700 | [diff] [blame] | 361 | assign rdata_tlword = rdata[sramreqfifo_rdata.woffset]; |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 362 | |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 363 | assign rspfifo_wdata = '{ |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 364 | data : rdata_tlword[top_pkg::TL_DW-1:0], |
| 365 | data_intg : EnableDataIntgPt ? rdata_tlword[DataWidth-1 -: DataIntgWidth] : '1, |
| 366 | error : rerror_i[1] // Only care for Uncorrectable error |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 367 | }; |
Eunchan Kim | 0d72c43 | 2019-09-27 13:08:53 -0700 | [diff] [blame] | 368 | assign rspfifo_rready = (reqfifo_rdata.op == OpRead & ~reqfifo_rdata.error) |
| 369 | ? reqfifo_rready : 1'b0 ; |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 370 | |
Michael Schaffner | c980988 | 2020-09-22 20:26:31 -0700 | [diff] [blame] | 371 | // This module only cares about uncorrectable errors. |
| 372 | logic unused_rerror; |
| 373 | assign unused_rerror = rerror_i[0]; |
| 374 | |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 375 | // FIFO instance: REQ, RSP |
Eunchan Kim | aa7c7dd | 2019-09-10 12:09:37 -0700 | [diff] [blame] | 376 | |
| 377 | // ReqFIFO is to store the Access type to match to the Response data. |
| 378 | // For instance, SRAM accepts the write request but doesn't return the |
| 379 | // acknowledge. In this case, it may be hard to determine when the D |
| 380 | // response for the write data should send out if reads/writes are |
| 381 | // interleaved. So, to make it in-order (even TL-UL allows out-of-order |
| 382 | // responses), storing the request is necessary. And if the read entry |
| 383 | // is write op, it is safe to return the response right away. If it is |
| 384 | // read reqeust, then D response is waiting until read data arrives. |
Eunchan Kim | 6c731a8 | 2020-03-04 14:48:52 -0800 | [diff] [blame] | 385 | |
| 386 | // Notes: |
Timothy | 281a7b2 | 2019-09-18 14:37:31 -0700 | [diff] [blame] | 387 | // The oustanding+1 allows the reqfifo to absorb back to back transactions |
| 388 | // without any wait states. Alternatively, the depth can be kept as |
| 389 | // oustanding as long as the outgoing ready is qualified with the acceptance |
| 390 | // of the response in the same cycle. Doing so however creates a path from |
| 391 | // ready_i to ready_o, which may not be desireable. |
Eunchan Kim | 6c731a8 | 2020-03-04 14:48:52 -0800 | [diff] [blame] | 392 | prim_fifo_sync #( |
Scott Johnson | 2d3397b | 2020-07-14 15:35:32 -0700 | [diff] [blame] | 393 | .Width (ReqFifoWidth), |
| 394 | .Pass (1'b0), |
| 395 | .Depth (Outstanding) |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 396 | ) u_reqfifo ( |
| 397 | .clk_i, |
| 398 | .rst_ni, |
Scott Johnson | 2d3397b | 2020-07-14 15:35:32 -0700 | [diff] [blame] | 399 | .clr_i (1'b0), |
| 400 | .wvalid_i(reqfifo_wvalid), |
| 401 | .wready_o(reqfifo_wready), |
| 402 | .wdata_i (reqfifo_wdata), |
Scott Johnson | 2d3397b | 2020-07-14 15:35:32 -0700 | [diff] [blame] | 403 | .rvalid_o(reqfifo_rvalid), |
| 404 | .rready_i(reqfifo_rready), |
Rupert Swarbrick | 69e5e6c | 2021-01-29 10:06:10 +0000 | [diff] [blame] | 405 | .rdata_o (reqfifo_rdata), |
| 406 | .full_o (), |
| 407 | .depth_o () |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 408 | ); |
| 409 | |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 410 | // sramreqfifo: |
Michael Schaffner | df17bc8 | 2020-05-04 17:19:09 -0700 | [diff] [blame] | 411 | // While the ReqFIFO holds the request until it is sent back via TL-UL, the |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 412 | // sramreqfifo only needs to hold the mask and word offset until the read |
| 413 | // data returns from memory. |
Michael Schaffner | df17bc8 | 2020-05-04 17:19:09 -0700 | [diff] [blame] | 414 | prim_fifo_sync #( |
Scott Johnson | 2d3397b | 2020-07-14 15:35:32 -0700 | [diff] [blame] | 415 | .Width (SramReqFifoWidth), |
| 416 | .Pass (1'b0), |
| 417 | .Depth (Outstanding) |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 418 | ) u_sramreqfifo ( |
Michael Schaffner | df17bc8 | 2020-05-04 17:19:09 -0700 | [diff] [blame] | 419 | .clk_i, |
| 420 | .rst_ni, |
Scott Johnson | 2d3397b | 2020-07-14 15:35:32 -0700 | [diff] [blame] | 421 | .clr_i (1'b0), |
| 422 | .wvalid_i(sramreqfifo_wvalid), |
| 423 | .wready_o(sramreqfifo_wready), |
| 424 | .wdata_i (sramreqfifo_wdata), |
Scott Johnson | 2d3397b | 2020-07-14 15:35:32 -0700 | [diff] [blame] | 425 | .rvalid_o(), |
| 426 | .rready_i(sramreqfifo_rready), |
Rupert Swarbrick | 69e5e6c | 2021-01-29 10:06:10 +0000 | [diff] [blame] | 427 | .rdata_o (sramreqfifo_rdata), |
| 428 | .full_o (), |
| 429 | .depth_o () |
Michael Schaffner | df17bc8 | 2020-05-04 17:19:09 -0700 | [diff] [blame] | 430 | ); |
| 431 | |
Eunchan Kim | aa7c7dd | 2019-09-10 12:09:37 -0700 | [diff] [blame] | 432 | // Rationale having #Outstanding depth in response FIFO. |
| 433 | // In normal case, if the host or the crossbar accepts the response data, |
| 434 | // response FIFO isn't needed. But if in any case it has a chance to be |
| 435 | // back pressured, the response FIFO should store the returned data not to |
| 436 | // lose the data from the SRAM interface. Remember, SRAM interface doesn't |
| 437 | // have back-pressure signal such as read_ready. |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 438 | prim_fifo_sync #( |
Scott Johnson | 2d3397b | 2020-07-14 15:35:32 -0700 | [diff] [blame] | 439 | .Width (RspFifoWidth), |
| 440 | .Pass (1'b1), |
| 441 | .Depth (Outstanding) |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 442 | ) u_rspfifo ( |
| 443 | .clk_i, |
| 444 | .rst_ni, |
Scott Johnson | 2d3397b | 2020-07-14 15:35:32 -0700 | [diff] [blame] | 445 | .clr_i (1'b0), |
| 446 | .wvalid_i(rspfifo_wvalid), |
| 447 | .wready_o(rspfifo_wready), |
| 448 | .wdata_i (rspfifo_wdata), |
Scott Johnson | 2d3397b | 2020-07-14 15:35:32 -0700 | [diff] [blame] | 449 | .rvalid_o(rspfifo_rvalid), |
| 450 | .rready_i(rspfifo_rready), |
Rupert Swarbrick | 69e5e6c | 2021-01-29 10:06:10 +0000 | [diff] [blame] | 451 | .rdata_o (rspfifo_rdata), |
| 452 | .full_o (), |
| 453 | .depth_o () |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 454 | ); |
| 455 | |
| 456 | // below assertion fails when SRAM rvalid is asserted even though ReqFifo is empty |
Greg Chadwick | 46ede4b | 2020-01-14 12:46:39 +0000 | [diff] [blame] | 457 | `ASSERT(rvalidHighReqFifoEmpty, rvalid_i |-> reqfifo_rvalid) |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 458 | |
| 459 | // below assertion fails when outstanding value is too small (SRAM rvalid is asserted |
| 460 | // even though the RspFifo is full) |
Greg Chadwick | 46ede4b | 2020-01-14 12:46:39 +0000 | [diff] [blame] | 461 | `ASSERT(rvalidHighWhenRspFifoFull, rvalid_i |-> rspfifo_wready) |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 462 | |
Timothy Chen | 4ce485f | 2019-09-30 22:36:26 -0700 | [diff] [blame] | 463 | // If both ErrOnWrite and ErrOnRead are set, this block is useless |
| 464 | `ASSERT_INIT(adapterNoReadOrWrite, (ErrOnWrite & ErrOnRead) == 0) |
| 465 | |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 466 | `ASSERT_INIT(SramDwHasByteGranularity_A, SramDw % 8 == 0) |
Timothy Chen | 2799bf0 | 2021-03-18 14:48:47 -0700 | [diff] [blame] | 467 | `ASSERT_INIT(SramDwIsMultipleOfTlulWidth_A, SramDw % top_pkg::TL_DW == 0) |
| 468 | |
| 469 | // These parameter options cannot both be true at the same time |
| 470 | `ASSERT_INIT(DataIntgOptions_A, ~(EnableDataIntgGen & EnableDataIntgPt)) |
Philipp Wagner | ba2376c | 2020-06-19 16:57:20 +0100 | [diff] [blame] | 471 | |
Michael Schaffner | b6b1c78 | 2019-12-18 15:28:30 -0800 | [diff] [blame] | 472 | // make sure outputs are defined |
Greg Chadwick | 46ede4b | 2020-01-14 12:46:39 +0000 | [diff] [blame] | 473 | `ASSERT_KNOWN(TlOutKnown_A, tl_o ) |
| 474 | `ASSERT_KNOWN(ReqOutKnown_A, req_o ) |
| 475 | `ASSERT_KNOWN(WeOutKnown_A, we_o ) |
| 476 | `ASSERT_KNOWN(AddrOutKnown_A, addr_o ) |
| 477 | `ASSERT_KNOWN(WdataOutKnown_A, wdata_o) |
| 478 | `ASSERT_KNOWN(WmaskOutKnown_A, wmask_o) |
Michael Schaffner | b6b1c78 | 2019-12-18 15:28:30 -0800 | [diff] [blame] | 479 | |
lowRISC Contributors | 802543a | 2019-08-31 12:12:56 +0100 | [diff] [blame] | 480 | endmodule |