Add springbok vector unit test for vmerge
Added a simple vector unit test for vmerge operations to verify if vmerge.vim and vmerge.vvm work properly.
Test results:
- vmerge.vim and vmerge.vvm tests passed for Qemu and Renode.
Change-Id: Id6108ed7577cdc159de2b602ec2b806a39f7e7f5
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 4214281..a1087c8 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -518,3 +518,12 @@
LINKOPTS
-Xlinker --defsym=__itcm_length__=128K
)
+
+vec_cc_test(
+ NAME
+ vmerge_test
+ SRCS
+ vmerge_test.cpp
+ LINKOPTS
+ -Xlinker --defsym=__itcm_length__=128K
+)
diff --git a/tests/vmerge_test.cpp b/tests/vmerge_test.cpp
new file mode 100644
index 0000000..a8f741f
--- /dev/null
+++ b/tests/vmerge_test.cpp
@@ -0,0 +1,106 @@
+#include <stdlib.h>
+
+#include "pw_unit_test/framework.h"
+#include "test_v_helpers.h"
+
+// Test for vmerge.vim and vmerge.vvm instructions.
+namespace vmerge_test {
+namespace {
+
+using namespace test_v_helpers;
+
+uint8_t src_vector_1[MAXVL_BYTES];
+uint8_t src_vector_2[MAXVL_BYTES];
+uint8_t src_mask_vector[MAXVL_BYTES];
+uint8_t dest_vector[MAXVL_BYTES];
+
+class VmergeTest : public ::testing::Test {
+ protected:
+ void SetUp() override { zero_vector_registers(); }
+ void TearDown() override { zero_vector_registers(); }
+};
+
+TEST_F(VmergeTest, vmerge_vim) {
+ for (int i = 0; i < AVL_COUNT; i++) {
+ int32_t avl = AVLS[i];
+ int vlmax;
+ int vl;
+ /* For non narrowing instructions all vectors have same type*/
+ std::tie(vlmax, vl) = vector_test_setup<uint8_t>(
+ VLMUL::LMUL_M1, avl, {dest_vector, src_vector_2, src_mask_vector});
+ if (avl > vlmax) {
+ continue;
+ }
+
+ fill_random_vector<uint8_t>(src_vector_2, vl);
+ fill_random_vector<uint8_t>(src_mask_vector, vl);
+ const int8_t test_val = 12;
+
+ // Load vector registers
+ __asm__ volatile("vle8.v v16, (%0)" : : "r"(src_vector_2));
+ __asm__ volatile("vle8.v v0, (%0)" : : "r"(src_mask_vector));
+
+ // Run target instruction
+ __asm__ volatile("vmerge.vim v24, v16, %[IMM], v0" ::[IMM] "n"(test_val));
+
+ // Store result vector register
+ __asm__ volatile("vse8.v v24, (%0)" : : "r"(dest_vector));
+
+ // Check vector elements
+ constexpr uint32_t kShift = 3; // shift for uint8_t
+ for (int idx = 0; idx < vl; idx++) {
+ uint32_t mask_idx = idx >> kShift;
+ uint32_t mask_pos = idx & ~(mask_idx << kShift);
+ if (src_mask_vector[mask_idx] & (1 << mask_pos)) {
+ ASSERT_EQ(dest_vector[idx], test_val);
+ } else {
+ ASSERT_EQ(dest_vector[idx], src_vector_2[idx]);
+ }
+ }
+ }
+}
+
+TEST_F(VmergeTest, vmerge_vvm) {
+ for (int i = 0; i < AVL_COUNT; i++) {
+ int32_t avl = AVLS[i];
+ int vlmax;
+ int vl;
+ /* For non narrowing instructions all vectors have same type*/
+ std::tie(vlmax, vl) = vector_test_setup<uint8_t>(
+ VLMUL::LMUL_M1, avl,
+ {dest_vector, src_vector_1, src_vector_2, src_mask_vector});
+ if (avl > vlmax) {
+ continue;
+ }
+
+ fill_random_vector<uint8_t>(src_vector_1, vl);
+ fill_random_vector<uint8_t>(src_vector_2, vl);
+ fill_random_vector<uint8_t>(src_mask_vector, vl);
+
+ // Load vector registers
+ __asm__ volatile("vle8.v v8, (%0)" : : "r"(src_vector_1));
+ __asm__ volatile("vle8.v v16, (%0)" : : "r"(src_vector_2));
+ __asm__ volatile("vle8.v v0, (%0)" : : "r"(src_mask_vector));
+
+ // Run target instruction
+ __asm__ volatile("vmerge.vvm v24, v16, v8, v0");
+
+ // Store result vector register
+ __asm__ volatile("vse8.v v24, (%0)" : : "r"(dest_vector));
+
+ // Check vector elements
+ constexpr uint32_t kShift = 3; // shift for uint8_t
+ for (int idx = 0; idx < vl; idx++) {
+ uint32_t mask_idx = idx >> kShift;
+ uint32_t mask_pos = idx & ~(mask_idx << kShift);
+ if (src_mask_vector[mask_idx] & (1 << mask_pos)) {
+ ASSERT_EQ(dest_vector[idx], src_vector_1[idx]);
+ } else {
+ ASSERT_EQ(dest_vector[idx], src_vector_2[idx]);
+ }
+ }
+ }
+}
+
+} // namespace
+} // namespace vmerge_test