sw/vec: Update RVV implementations using latest RVV spec

Needed when we update riscv32-unknown-elf-gdb toolchain.

Change-Id: I0276ee3450be644203d95680bfa781b6f20ececa
diff --git a/pw_unit_test_demo/pw_unit_test_demo.cpp b/pw_unit_test_demo/pw_unit_test_demo.cpp
index d7dc3d3..706dacb 100644
--- a/pw_unit_test_demo/pw_unit_test_demo.cpp
+++ b/pw_unit_test_demo/pw_unit_test_demo.cpp
@@ -10,7 +10,7 @@
 namespace {
 
 TEST(SpringbokTest, ExceptBool) {
-  size_t vl = vsetvl_e8m1(32);
+  size_t vl = __riscv_vsetvl_e8m1(32);
   EXPECT_TRUE(vl == 32);
 }
 
diff --git a/test_v_helpers/test_v_helpers.cpp b/test_v_helpers/test_v_helpers.cpp
index b539448..90aa4d2 100644
--- a/test_v_helpers/test_v_helpers.cpp
+++ b/test_v_helpers/test_v_helpers.cpp
@@ -25,13 +25,13 @@
     case VLMUL::LMUL_M1:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvl_e8m1(avl);
+          return __riscv_vsetvl_e8m1(avl);
           break;
         case VSEW::SEW_E16:
-          return vsetvl_e16m1(avl);
+          return __riscv_vsetvl_e16m1(avl);
           break;
         case VSEW::SEW_E32:
-          return vsetvl_e32m1(avl);
+          return __riscv_vsetvl_e32m1(avl);
           break;
         default:
           return -1;
@@ -41,13 +41,13 @@
     case VLMUL::LMUL_M2:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvl_e8m2(avl);
+          return __riscv_vsetvl_e8m2(avl);
           break;
         case VSEW::SEW_E16:
-          return vsetvl_e16m2(avl);
+          return __riscv_vsetvl_e16m2(avl);
           break;
         case VSEW::SEW_E32:
-          return vsetvl_e32m2(avl);
+          return __riscv_vsetvl_e32m2(avl);
           break;
         default:
           return -1;
@@ -56,13 +56,13 @@
     case VLMUL::LMUL_M4:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvl_e8m4(avl);
+          return __riscv_vsetvl_e8m4(avl);
           break;
         case VSEW::SEW_E16:
-          return vsetvl_e16m4(avl);
+          return __riscv_vsetvl_e16m4(avl);
           break;
         case VSEW::SEW_E32:
-          return vsetvl_e32m4(avl);
+          return __riscv_vsetvl_e32m4(avl);
           break;
         default:
           return -1;
@@ -72,13 +72,13 @@
     case VLMUL::LMUL_M8:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvl_e8m8(avl);
+          return __riscv_vsetvl_e8m8(avl);
           break;
         case VSEW::SEW_E16:
-          return vsetvl_e16m8(avl);
+          return __riscv_vsetvl_e16m8(avl);
           break;
         case VSEW::SEW_E32:
-          return vsetvl_e32m8(avl);
+          return __riscv_vsetvl_e32m8(avl);
           break;
         default:
           return -1;
@@ -88,13 +88,13 @@
     case VLMUL::LMUL_MF2:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvl_e8mf2(avl);
+          return __riscv_vsetvl_e8mf2(avl);
           break;
         case VSEW::SEW_E16:
-          return vsetvl_e16mf2(avl);
+          return __riscv_vsetvl_e16mf2(avl);
           break;
         case VSEW::SEW_E32:
-          return vsetvl_e32mf2(avl);
+          return __riscv_vsetvl_e32mf2(avl);
           break;
         default:
           return -1;
@@ -104,10 +104,10 @@
     case VLMUL::LMUL_MF4:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvl_e8mf4(avl);
+          return __riscv_vsetvl_e8mf4(avl);
           break;
         case VSEW::SEW_E16:
-          return vsetvl_e16mf4(avl);
+          return __riscv_vsetvl_e16mf4(avl);
           break;
         default:
           return -1;
@@ -117,7 +117,7 @@
     case VLMUL::LMUL_MF8:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvl_e8mf8(avl);
+          return __riscv_vsetvl_e8mf8(avl);
           break;
         default:
           return -1;
@@ -135,13 +135,13 @@
     case VLMUL::LMUL_M1:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvlmax_e8m1();
+          return __riscv_vsetvlmax_e8m1();
           break;
         case VSEW::SEW_E16:
-          return vsetvlmax_e16m1();
+          return __riscv_vsetvlmax_e16m1();
           break;
         case VSEW::SEW_E32:
-          return vsetvlmax_e32m1();
+          return __riscv_vsetvlmax_e32m1();
           break;
         default:
           return -1;
@@ -151,13 +151,13 @@
     case VLMUL::LMUL_M2:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvlmax_e8m2();
+          return __riscv_vsetvlmax_e8m2();
           break;
         case VSEW::SEW_E16:
-          return vsetvlmax_e16m2();
+          return __riscv_vsetvlmax_e16m2();
           break;
         case VSEW::SEW_E32:
-          return vsetvlmax_e32m2();
+          return __riscv_vsetvlmax_e32m2();
           break;
         default:
           return -1;
@@ -166,13 +166,13 @@
     case VLMUL::LMUL_M4:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvlmax_e8m4();
+          return __riscv_vsetvlmax_e8m4();
           break;
         case VSEW::SEW_E16:
-          return vsetvlmax_e16m4();
+          return __riscv_vsetvlmax_e16m4();
           break;
         case VSEW::SEW_E32:
-          return vsetvlmax_e32m4();
+          return __riscv_vsetvlmax_e32m4();
           break;
         default:
           return -1;
@@ -182,13 +182,13 @@
     case VLMUL::LMUL_M8:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvlmax_e8m8();
+          return __riscv_vsetvlmax_e8m8();
           break;
         case VSEW::SEW_E16:
-          return vsetvlmax_e16m8();
+          return __riscv_vsetvlmax_e16m8();
           break;
         case VSEW::SEW_E32:
-          return vsetvlmax_e32m8();
+          return __riscv_vsetvlmax_e32m8();
           break;
         default:
           return -1;
@@ -198,13 +198,13 @@
     case VLMUL::LMUL_MF2:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvlmax_e8mf2();
+          return __riscv_vsetvlmax_e8mf2();
           break;
         case VSEW::SEW_E16:
-          return vsetvlmax_e16mf2();
+          return __riscv_vsetvlmax_e16mf2();
           break;
         case VSEW::SEW_E32:
-          return vsetvlmax_e32mf2();
+          return __riscv_vsetvlmax_e32mf2();
           break;
         default:
           return -1;
@@ -213,10 +213,10 @@
     case VLMUL::LMUL_MF4:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvlmax_e8mf4();
+          return __riscv_vsetvlmax_e8mf4();
           break;
         case VSEW::SEW_E16:
-          return vsetvlmax_e16mf4();
+          return __riscv_vsetvlmax_e16mf4();
           break;
         default:
           return -1;
@@ -225,7 +225,7 @@
     case VLMUL::LMUL_MF8:
       switch (sew) {
         case VSEW::SEW_E8:
-          return vsetvlmax_e8mf8();
+          return __riscv_vsetvlmax_e8mf8();
           break;
         default:
           return -1;
diff --git a/tests/vmv_test.cpp b/tests/vmv_test.cpp
index 674d1bd..52e5760 100644
--- a/tests/vmv_test.cpp
+++ b/tests/vmv_test.cpp
@@ -52,34 +52,37 @@
     if (avl > vlmax) {
       continue;
     }
-    vint8m1_t vec1 = vle8_v_i8m1(reinterpret_cast<int8_t *>(test_vector_1), vl);
-    vint8m1_t vec2 = vmv_v_v_i8m1(vec1, vl);
+    vint8m1_t vec1 =
+        __riscv_vle8_v_i8m1(reinterpret_cast<int8_t *>(test_vector_1), vl);
+    vint8m1_t vec2 = __riscv_vmv_v_v_i8m1(vec1, vl);
     int8_t *ptr_vec_2 = reinterpret_cast<int8_t *>(reference_vector_1);
-    vse8_v_i8m1(ptr_vec_2, vec2, vl);
+    __riscv_vse8_v_i8m1(ptr_vec_2, vec2, vl);
     assert_vec_elem_eq<int8_t>(vlmax, test_vector_1, reference_vector_1);
   }
 }
 
-#define DEFINE_TEST_VMV_V_V_I_INTRINSIC(_SEW_, _LMUL_)                         \
-  TEST_F(VmvTest, DISABLED_intrinsic_vmv_v_v_i##_SEW_##m##_LMUL_) {            \
-    for (int i = 0; i < AVL_COUNT; i++) {                                      \
-      int32_t avl = AVLS[i];                                                   \
-      int vlmax;                                                               \
-      int vl;                                                                  \
-      std::tie(vlmax, vl) = vector_test_setup<int##_SEW_##_t>(                 \
-          VLMUL::LMUL_M##_LMUL_, avl, {test_vector_1, reference_vector_1});    \
-      if (avl > vlmax) {                                                       \
-        continue;                                                              \
-      }                                                                        \
-      vint##_SEW_##m##_LMUL_##_t vec1 = vle##_SEW_##_v_i##_SEW_##m##_LMUL_(    \
-          reinterpret_cast<int##_SEW_##_t *>(test_vector_1), vl);              \
-      vint##_SEW_##m##_LMUL_##_t vec2 = vmv_v_v_i##_SEW_##m##_LMUL_(vec1, vl); \
-      int##_SEW_##_t *ptr_vec_2 =                                              \
-          reinterpret_cast<int##_SEW_##_t *>(reference_vector_1);              \
-      vse##_SEW_##_v_i##_SEW_##m##_LMUL_(ptr_vec_2, vec2, vl);                 \
-      assert_vec_elem_eq<int##_SEW_##_t>(vlmax, test_vector_1,                 \
-                                         reference_vector_1);                  \
-    }                                                                          \
+#define DEFINE_TEST_VMV_V_V_I_INTRINSIC(_SEW_, _LMUL_)                      \
+  TEST_F(VmvTest, DISABLED_intrinsic_vmv_v_v_i##_SEW_##m##_LMUL_) {         \
+    for (int i = 0; i < AVL_COUNT; i++) {                                   \
+      int32_t avl = AVLS[i];                                                \
+      int vlmax;                                                            \
+      int vl;                                                               \
+      std::tie(vlmax, vl) = vector_test_setup<int##_SEW_##_t>(              \
+          VLMUL::LMUL_M##_LMUL_, avl, {test_vector_1, reference_vector_1}); \
+      if (avl > vlmax) {                                                    \
+        continue;                                                           \
+      }                                                                     \
+      vint##_SEW_##m##_LMUL_##_t vec1 =                                     \
+          __riscv_vle##_SEW_##_v_i##_SEW_##m##_LMUL_(                       \
+              reinterpret_cast<int##_SEW_##_t *>(test_vector_1), vl);       \
+      vint##_SEW_##m##_LMUL_##_t vec2 =                                     \
+          __riscv_vmv_v_v_i##_SEW_##m##_LMUL_(vec1, vl);                    \
+      int##_SEW_##_t *ptr_vec_2 =                                           \
+          reinterpret_cast<int##_SEW_##_t *>(reference_vector_1);           \
+      __riscv_vse##_SEW_##_v_i##_SEW_##m##_LMUL_(ptr_vec_2, vec2, vl);      \
+      assert_vec_elem_eq<int##_SEW_##_t>(vlmax, test_vector_1,              \
+                                         reference_vector_1);               \
+    }                                                                       \
   }
 
 DEFINE_TEST_VMV_V_V_I_INTRINSIC(8, 1)