[dv/common] initial support for shadow register

1. Add a tag for shadow register in dv_base_reg
2. Add a knob to control if user wants to
  a). write shadow register twice with the same values
  b). only write shadow register once (later use for error case)

Signed-off-by: Cindy Chen <chencindy@google.com>
diff --git a/hw/dv/sv/csr_utils/csr_utils_pkg.sv b/hw/dv/sv/csr_utils/csr_utils_pkg.sv
index 04e98c3..fa3a442 100644
--- a/hw/dv/sv/csr_utils/csr_utils_pkg.sv
+++ b/hw/dv/sv/csr_utils/csr_utils_pkg.sv
@@ -178,12 +178,13 @@
                             input  uvm_path_e   path = UVM_DEFAULT_PATH,
                             input  bit          blocking = default_csr_blocking,
                             input  uint         timeout_ns = default_timeout_ns,
-                            input  uvm_reg_map  map = null);
+                            input  uvm_reg_map  map = null,
+                            input  bit          en_shadow_wr = 1);
     if (blocking) begin
-      csr_update_sub(csr, check, path, timeout_ns, map);
+      csr_update_sub(csr, check, path, timeout_ns, map, en_shadow_wr);
     end else begin
       fork
-        csr_update_sub(csr, check, path, timeout_ns, map);
+        csr_update_sub(csr, check, path, timeout_ns, map, en_shadow_wr);
       join_none
       // Add #0 to ensure that this thread starts executing before any subsequent call
       #0;
@@ -195,7 +196,8 @@
                                 input  uvm_check_e  check = UVM_CHECK,
                                 input  uvm_path_e   path = UVM_DEFAULT_PATH,
                                 input  uint         timeout_ns = default_timeout_ns,
-                                input  uvm_reg_map  map = null);
+                                input  uvm_reg_map  map = null,
+                                input  bit          en_shadow_wr = 1);
     fork
       begin : isolation_fork
         uvm_status_e  status;
@@ -204,7 +206,9 @@
         fork
           begin
             increment_outstanding_access();
+            csr_pre_write_sub(csr, en_shadow_wr);
             csr.update(.status(status), .path(path), .map(map), .prior(100));
+            csr_post_write_sub(csr, en_shadow_wr);
             if (check == UVM_CHECK) begin
               `DV_CHECK_EQ(status, UVM_IS_OK, "", error, msg_id)
             end
@@ -229,16 +233,17 @@
                         input bit            backdoor = 0,
                         input uint           timeout_ns = default_timeout_ns,
                         input bit            predict = 0,
-                        input uvm_reg_map    map = null);
+                        input uvm_reg_map    map = null,
+                        input bit            en_shadow_wr = 1);
     if (backdoor) begin
       csr_poke(csr, value, check, predict);
     end else if (blocking) begin
-      csr_wr_sub(csr, value, check, path, timeout_ns, map);
+      csr_wr_sub(csr, value, check, path, timeout_ns, map, en_shadow_wr);
       if (predict) void'(csr.predict(.value(value), .kind(UVM_PREDICT_WRITE)));
     end else begin
       fork
         begin
-          csr_wr_sub(csr, value, check, path, timeout_ns, map);
+          csr_wr_sub(csr, value, check, path, timeout_ns, map, en_shadow_wr);
           // predict after csr_wr_sub, to ensure predict after enable register overwrite the locked
           // registers' access information
           if (predict) void'(csr.predict(.value(value), .kind(UVM_PREDICT_WRITE)));
@@ -255,7 +260,8 @@
                             input uvm_check_e    check = UVM_CHECK,
                             input uvm_path_e     path = UVM_DEFAULT_PATH,
                             input uint           timeout_ns = default_timeout_ns,
-                            input uvm_reg_map    map = null);
+                            input uvm_reg_map    map = null,
+                            input bit            en_shadow_wr = 1);
     fork
       begin : isolation_fork
         uvm_status_e  status;
@@ -264,7 +270,9 @@
         fork
           begin
             increment_outstanding_access();
+            csr_pre_write_sub(csr, en_shadow_wr);
             csr.write(.status(status), .value(value), .path(path), .map(map), .prior(100));
+            csr_post_write_sub(csr, en_shadow_wr);
             if (check == UVM_CHECK) begin
               `DV_CHECK_EQ(status, UVM_IS_OK, "", error, msg_id)
             end
@@ -281,6 +289,30 @@
     join
   endtask
 
+  task automatic csr_pre_write_sub(ref uvm_reg csr, bit en_shadow_wr);
+    dv_base_reg dv_reg;
+    `downcast(dv_reg, csr, "", fatal, msg_id)
+    if (dv_reg.get_is_shadowed()) begin
+      if (en_shadow_wr) increment_outstanding_access();
+      dv_reg.atomic_en_shadow_wr.get(1);
+      dv_reg.set_en_shadow_wr(en_shadow_wr);
+    end
+  endtask
+
+  task automatic csr_post_write_sub(ref uvm_reg csr, bit en_shadow_wr);
+    dv_base_reg dv_reg;
+    `downcast(dv_reg, csr, "", fatal, msg_id)
+    if (dv_reg.get_is_shadowed()) begin
+      // try setting en_shadow_wr back to default value 1, this function will only work if the
+      // shadow reg finished both writes
+      dv_reg.set_en_shadow_wr(1);
+      dv_reg.atomic_en_shadow_wr.put(1);
+      if (en_shadow_wr) begin
+        decrement_outstanding_access();
+      end
+    end
+  endtask
+
   // backdoor write csr
   task automatic csr_poke(input uvm_reg        csr,
                           input uvm_reg_data_t value,
diff --git a/hw/dv/sv/dv_base_reg/dv_base_reg.sv b/hw/dv/sv/dv_base_reg/dv_base_reg.sv
index 3ad45ee..2428412 100644
--- a/hw/dv/sv/dv_base_reg/dv_base_reg.sv
+++ b/hw/dv/sv/dv_base_reg/dv_base_reg.sv
@@ -8,12 +8,27 @@
   // hence, backdoor write isn't available
   local bit is_ext_reg;
 
-  local dv_base_reg locked_regs[$];
+  local dv_base_reg    locked_regs[$];
+  local uvm_reg_data_t staged_shadow_val;
+  local bit            is_shadowed;
+  local bit            shadow_wr_staged; // stage the first shadow reg write
+  local bit            shadow_update_err;
+  local bit            en_shadow_wr = 1;
+
+  // atomic_shadow_wr: semaphore to guarantee atomicity of the two writes for shadowed registers.
+  // In case a parallel thread writing a different value to the same reg causing an update_err
+  semaphore            atomic_shadow_wr;
+
+  // atomic_en_shadow_wr: semaphore to guarantee setting or resetting en_shadow_wr is unchanged
+  // through the 1st/2nd (or both) writes
+  semaphore            atomic_en_shadow_wr;
 
   function new(string       name = "",
                int unsigned n_bits,
                int          has_coverage);
     super.new(name, n_bits, has_coverage);
+    atomic_en_shadow_wr = new(1);
+    atomic_shadow_wr    = new(1);
   endfunction : new
 
   function void get_dv_base_reg_fields(ref dv_base_reg_field dv_fields[$]);
@@ -63,12 +78,58 @@
     locked_regs_q = locked_regs;
   endfunction
 
-  // post_write callback to handle reg enables
+  // is_shadowed bit is only one-time programmable
+  // once this function is called in RAL auto-generated class, it cannot be changed
+  function void set_is_shadowed();
+    is_shadowed = 1;
+  endfunction
+
+  function void set_en_shadow_wr(bit val);
+    // do not update en_shadow_wr if shadow register write is in process
+    if ((en_shadow_wr ^ val) && shadow_wr_staged) begin
+      `uvm_info(`gfn,
+          $sformatf("unable to %s en_shadow_wr because register already completed first write",
+          val ? "set" : "clear"), UVM_LOW)
+      return;
+    end
+    en_shadow_wr = val;
+  endfunction
+
+  function bit get_is_shadowed();
+    return is_shadowed;
+  endfunction
+
+  function bit get_shadow_update_err();
+    return shadow_update_err;
+  endfunction
+
+  virtual function void clear_shadow_update_err();
+    shadow_update_err = 0;
+  endfunction
+
+  // post_write callback to handle special regs:
+  // - shadow register, enable reg won't be updated until the second write has no error
+  // - enable register, if enable_reg is disabled, change access policy to all the locked_regs
   // TODO: create an `enable_field_access_policy` variable and set the template code during
   // automation.
   virtual task post_write(uvm_reg_item rw);
     dv_base_reg_field fields[$];
     string field_access;
+    if (is_shadowed) begin
+      // first write
+      if (!shadow_wr_staged) begin
+        shadow_wr_staged = 1;
+        staged_shadow_val = rw.value[0];
+        return;
+      end begin
+        // second write
+        shadow_wr_staged = 0;
+        if (staged_shadow_val != rw.value[0]) begin
+          shadow_update_err = 1;
+          return;
+        end
+      end
+    end
     if (is_enable_reg()) begin
       get_dv_base_reg_fields(fields);
       field_access = fields[0].get_access();
@@ -77,7 +138,7 @@
         "W1C": if (rw.value[0][0] == 1'b1) set_locked_regs_access("RO");
         "W0C": if (rw.value[0][0] == 1'b0) set_locked_regs_access("RO");
         "RO": ; // if RO, it's updated by design, need to predict in scb
-        default:`uvm_fatal(`gtn, $sformatf("enable register invalid access %s", field_access))
+        default:`uvm_fatal(`gfn, $sformatf("enable register invalid access %s", field_access))
       endcase
     end
   endtask
@@ -90,4 +151,49 @@
     return is_ext_reg;
   endfunction
 
+  // if it is a shadowed register, and is enabled to write it twice, this task will write the
+  // register twice with the same value and address.
+  virtual task write(output uvm_status_e     status,
+                     input uvm_reg_data_t    value,
+                     input uvm_path_e        path = UVM_DEFAULT_PATH,
+                     input uvm_reg_map       map = null,
+                     input uvm_sequence_base parent=null,
+                     input int               prior = -1,
+                     input uvm_object        extension = null,
+                     input string            fname = "",
+                     input int               lineno = 0);
+    if (is_shadowed) atomic_shadow_wr.get(1);
+    super.write(status, value, path, map, parent, prior, extension, fname, lineno);
+    if (is_shadowed && en_shadow_wr) begin
+      super.write(status, value, path, map, parent, prior, extension, fname, lineno);
+    end
+    if (is_shadowed) atomic_shadow_wr.put(1);
+  endtask
+
+  // override do_predict function to support shadow_reg:
+  // skip predict if it is shadow_reg's first write, or second write with an update_err
+  virtual function void do_predict (uvm_reg_item      rw,
+                                    uvm_predict_e     kind = UVM_PREDICT_DIRECT,
+                                    uvm_reg_byte_en_t be = -1);
+    if (is_shadowed && (shadow_wr_staged || shadow_update_err) && kind != UVM_PREDICT_READ) begin
+      `uvm_info(`gfn,
+                $sformatf("skip predict csr %s: due to shadow_reg_first_wr=%0b or update_err=%0b",
+                          get_name(), shadow_wr_staged, shadow_update_err), UVM_HIGH)
+      return;
+    end
+    super.do_predict(rw, kind, be);
+  endfunction
+
+  virtual function void reset(string kind = "HARD");
+    super.reset(kind);
+    if (is_shadowed) begin
+      shadow_update_err = 0;
+      shadow_wr_staged  = 0;
+      // in case reset is issued during shadowed writes
+      void'(atomic_shadow_wr.try_get(1));
+      void'(atomic_en_shadow_wr.try_get(1));
+      atomic_shadow_wr.put(1);
+      atomic_en_shadow_wr.put(1);
+    end
+  endfunction
 endclass
diff --git a/util/reggen/uvm_reg.sv.tpl b/util/reggen/uvm_reg.sv.tpl
index e2229ca..339324b 100644
--- a/util/reggen/uvm_reg.sv.tpl
+++ b/util/reggen/uvm_reg.sv.tpl
@@ -210,6 +210,7 @@
   reg_offset =  str(reg_width) + "'h" + "%x" % r.offset
   reg_wen = r.regwen
   reg_tags = r.tags
+  reg_shadowed = r.shadowed
 %>\
       ${reg_name} = ${gen_dv.rcname(block, r)}::type_id::create("${reg_name}");
       ${reg_name}.configure(.blk_parent(this));
@@ -220,6 +221,9 @@
 % if reg_wen:
       ${reg_wen}.add_locked_reg(${reg_name});
 % endif
+% if reg_shadowed:
+      ${reg_name}.set_is_shadowed();
+% endif
 % if reg_tags:
       // create register tags
 % for reg_tag in reg_tags: