[dv] Fix unmapped test and add partial mem write support

Add partial mem write to the seq to match original plan, but no IP
supports that yet
(hmac supports mem partial write, but can't read mem for check)

Signed-off-by: Weicai Yang <weicai@google.com>
diff --git a/hw/dv/data/tests/mem_tests.hjson b/hw/dv/data/tests/mem_tests.hjson
index b569540..e758199 100644
--- a/hw/dv/data/tests/mem_tests.hjson
+++ b/hw/dv/data/tests/mem_tests.hjson
@@ -15,9 +15,9 @@
     }
 
     {
-      name: "{name}_mem_partial_read"
+      name: "{name}_mem_partial_access"
       en_run_modes: ["mem_tests_mode"]
-      run_opts: ["+run_mem_partial_read"]
+      run_opts: ["+run_mem_partial_access"]
     }
   ]
 
@@ -25,7 +25,7 @@
     {
       name: sw_access
       tests: ["{name}_mem_walk",
-              "{name}_mem_partial_read"]
+              "{name}_mem_partial_access"]
     }
   ]
 }
diff --git a/hw/dv/sv/cip_lib/cip_base_vseq.sv b/hw/dv/sv/cip_lib/cip_base_vseq.sv
index 89c9644..afddba0 100644
--- a/hw/dv/sv/cip_lib/cip_base_vseq.sv
+++ b/hw/dv/sv/cip_lib/cip_base_vseq.sv
@@ -242,7 +242,7 @@
       "tl_errors":                  run_tl_errors_vseq(num_times);
       "stress_all_with_rand_reset": run_stress_all_with_rand_reset_vseq(num_times);
       "same_csr_outstanding":       run_same_csr_outstanding_vseq(num_times);
-      "mem_partial_read":           run_mem_partial_read_vseq(num_times);
+      "mem_partial_access":         run_mem_partial_access_vseq(num_times);
       default:                      run_csr_vseq_wrapper(num_times);
     endcase
   endtask
@@ -444,12 +444,13 @@
   endtask
 
   // test partial mem read with non-blocking random read/write
-  virtual task run_mem_partial_read_vseq(num_times);
+  virtual task run_mem_partial_access_vseq(num_times);
       bit [TL_DW-1:0]     exp_mem[tl_addr_t];
       tl_addr_t           addr_q[$];
       int                 num_words;
       bit [TL_AW-1:0]     addr;
       bit [TL_DW-1:0]     data;
+      bit [TL_DBW-1:0]    mask;
 
       foreach (cfg.mem_ranges[i]) begin
         num_words += cfg.mem_ranges[i].end_addr - cfg.mem_ranges[i].start_addr;
@@ -465,22 +466,22 @@
                 addr inside {[cfg.mem_ranges[mem_idx].start_addr :
                               cfg.mem_ranges[mem_idx].end_addr]};)
             data = $urandom;
-            tl_access(.addr(addr), .write(1), .data(data), .blocking(0));
+            if (cfg.en_mem_byte_write) mask = get_rand_contiguous_mask();
+            else                       mask = '1;
+            tl_access(.addr(addr), .write(1), .data(data), .mask(mask), .blocking(0));
+
             addr[1:0] = 0;
             exp_mem[addr] = data;
             addr_q.push_back(addr);
           end
           // Randomly pick a previously written address for partial read.
           exp_mem.size > 0: begin // read
-            bit [TL_DBW-1:0] mask;
             bit [TL_SZW-1:0] size;
             bit [TL_DW-1:0]  compare_mask;
             // get all the programmed addresses and randomly pick one
             addr = addr_q[$urandom_range(0, addr_q.size - 1)];
+            mask = get_rand_contiguous_mask();
 
-            `DV_CHECK_STD_RANDOMIZE_WITH_FATAL(mask,
-                // mask must be contiguous, e.g. 'b1001, 'b1010 aren't allowed
-                $countones(mask ^ {mask[TL_DBW-2:0], 1'b0}) <= 2;)
             // calculate compare_mask which is data width wide
             foreach (mask[i]) compare_mask[i*8+:8] = {8{mask[i]}};
             tl_access(.addr(addr), .write(0), .data(data), .mask(mask), .compare_mask(compare_mask),
@@ -488,7 +489,6 @@
           end
         endcase
       end
-
   endtask
 
   virtual task run_alert_rsp_seq_nonblocking();
@@ -504,4 +504,12 @@
       join_none
     end
   endtask
+
+  // TLUL mask must be contiguous, e.g. 'b1001, 'b1010 aren't allowed
+  virtual function bit[TL_DBW-1:0] get_rand_contiguous_mask();
+    bit [TL_DBW-1:0] mask;
+    `DV_CHECK_STD_RANDOMIZE_WITH_FATAL(mask,
+                                       $countones(mask ^ {mask[TL_DBW-2:0], 1'b0}) <= 2;)
+    return mask;
+  endfunction
 endclass
diff --git a/hw/dv/sv/cip_lib/doc/index.md b/hw/dv/sv/cip_lib/doc/index.md
index 5668f78..cf3814d 100644
--- a/hw/dv/sv/cip_lib/doc/index.md
+++ b/hw/dv/sv/cip_lib/doc/index.md
@@ -204,7 +204,7 @@
 * **task run_same_csr_outstanding_vseq**: This task tests the same CSR with
   non-blocking accesses as the regular CSR sequences don't cover that due to
   limitation of uvm_reg.
-* **task run_mem_partial_read_vseq**: This task tests the partial read to the
+* **task run_mem_partial_access_vseq**: This task tests the partial access to the
   memories by randomizing mask, size, and the 2 LSB bits of the address. It also runs
   with non-blocking access enabled.
   ```
diff --git a/hw/dv/tools/testplans/mem_testplan.hjson b/hw/dv/tools/testplans/mem_testplan.hjson
index f269dd9..1bc47dd 100644
--- a/hw/dv/tools/testplans/mem_testplan.hjson
+++ b/hw/dv/tools/testplans/mem_testplan.hjson
@@ -20,10 +20,10 @@
             Verify partial-accessibility of all memories in the design.
             - Do partial reads and writes into the memories and verify the outcome for
               correctness.
+            - Also test outstanding access on memories
             '''
       milestone: V1
-      // mem_walk does partial writes, so we can reuse that test here
-      tests: ["{name}{intf}_mem_walk"]
+      tests: ["{name}{intf}_mem_partial_access"]
     }
     // TODO: add mem access with reset
   ]