Pull common functions/consts into test_v_helpers.h
Remove the vector zeroing function definition from test files.
This function is defined in `test_v_helpers.cpp`
Pull common constants into `test_v_helpers.h`
Pull templated functions into `test_v_helpers.h`
Update files to adjust names and utilize these shared definitions.
Change-Id: I7aabadb50ed58e5cc6c8e6c5a5a62ba1daed49b1
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 076516a..414ab1f 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -9,6 +9,7 @@
test_v_helpers.cpp)
target_include_directories(test_v_helpers PUBLIC include)
+target_link_libraries(test_v_helpers PUBLIC pw_unit_test)
target_compile_options(test_v_helpers PUBLIC
${VEC_DEFAULT_COPTS}
diff --git a/tests/include/test_v_helpers.h b/tests/include/test_v_helpers.h
index 095c1c0..05c4a4c 100644
--- a/tests/include/test_v_helpers.h
+++ b/tests/include/test_v_helpers.h
@@ -3,8 +3,17 @@
#include <stdint.h>
+#include <bit>
+#include <tuple>
+
+#include "pw_unit_test/framework.h"
+
namespace test_v_helpers {
+const int LMUL_MAX = 8;
+const int VLEN = 512;
+const int MAXVL_BYTES = VLEN * LMUL_MAX;
+
const int32_t AVLS[] = {1, 4, 3, 2, 16, 8, 5, 17,
32, 36, 64, 55, 100, 321, 256, 128,
512, 623, 1024, 1100, 1543, 2048, 3052, 4096,
@@ -61,6 +70,28 @@
}
}
+template <typename T>
+static std::tuple<int, int> vector_test_setup(VLMUL lmul, int32_t avl,
+ uint8_t *test_vector_1,
+ uint8_t *test_vector_2) {
+ // Clear all vector registers
+ zero_vector_registers();
+ // Initialize test_vector_1 and determine vl, vlmax
+ uint32_t bw = std::__bit_width(sizeof(T));
+ VSEW sew = static_cast<VSEW>(bw - 1);
+ int vlmax = get_vsetvlmax_intrinsic(sew, lmul);
+ if (avl > vlmax) {
+ avl = vlmax;
+ }
+ memset(test_vector_1, 0, MAXVL_BYTES);
+ memset(test_vector_2, 0, MAXVL_BYTES);
+ int vl = set_vsetvl_intrinsic(sew, lmul, avl);
+
+ EXPECT_EQ(avl, vl);
+
+ return std::make_tuple(vlmax, vl);
+}
+
} // namespace test_v_helpers
#endif
diff --git a/tests/test_v_helpers.cpp b/tests/test_v_helpers.cpp
index e7dadff..b7efbcb 100644
--- a/tests/test_v_helpers.cpp
+++ b/tests/test_v_helpers.cpp
@@ -15,8 +15,8 @@
uint32_t vtype = get_vtype(sew, lmul, tail_agnostic, mask_agnostic);
uint32_t vl;
__asm__ volatile("vsetvl %[VL], %[AVL], %[VTYPE]"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl), [VTYPE] "r"(vtype));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl), [ VTYPE ] "r"(vtype));
return vl;
}
@@ -169,18 +169,18 @@
switch (sew) {
case VSEW::SEW_E8:
__asm__ volatile("vsetvli %[VL], %[AVL], e8, m1, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
case VSEW::SEW_E16:
__asm__ volatile("vsetvli %[VL], %[AVL], e16, m1, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
case VSEW::SEW_E32:
__asm__ volatile("vsetvli %[VL], %[AVL], e32, m1, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
default:
return 0;
@@ -190,18 +190,18 @@
switch (sew) {
case VSEW::SEW_E8:
__asm__ volatile("vsetvli %[VL], %[AVL], e8, m2, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
case VSEW::SEW_E16:
__asm__ volatile("vsetvli %[VL], %[AVL], e16, m2, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
case VSEW::SEW_E32:
__asm__ volatile("vsetvli %[VL], %[AVL], e32, m2, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
default:
return 0;
@@ -211,18 +211,18 @@
switch (sew) {
case VSEW::SEW_E8:
__asm__ volatile("vsetvli %[VL], %[AVL], e8, m4, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
case VSEW::SEW_E16:
__asm__ volatile("vsetvli %[VL], %[AVL], e16, m4, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
case VSEW::SEW_E32:
__asm__ volatile("vsetvli %[VL], %[AVL], e32, m4, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
default:
return 0;
@@ -232,18 +232,18 @@
switch (sew) {
case VSEW::SEW_E8:
__asm__ volatile("vsetvli %[VL], %[AVL], e8, m8, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
case VSEW::SEW_E16:
__asm__ volatile("vsetvli %[VL], %[AVL], e16, m8, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
case VSEW::SEW_E32:
__asm__ volatile("vsetvli %[VL], %[AVL], e32, m8, tu, mu"
- : [VL] "=r"(vl)
- : [AVL] "r"(avl));
+ : [ VL ] "=r"(vl)
+ : [ AVL ] "r"(avl));
break;
default:
return 0;
diff --git a/tests/vadd_vi_test.cpp b/tests/vadd_vi_test.cpp
index 104f5df..ed2b085 100644
--- a/tests/vadd_vi_test.cpp
+++ b/tests/vadd_vi_test.cpp
@@ -12,45 +12,11 @@
namespace vadd_vi_test {
namespace {
-const int LMUL_MAX = 8;
-const int VLEN = 512;
-const int MAXVL_BYTES = VLEN / sizeof(uint8_t) * LMUL_MAX;
-
using namespace test_v_helpers;
uint8_t test_vector_1[MAXVL_BYTES];
uint8_t test_vector_2[MAXVL_BYTES];
-static void zero_vector_registers() {
- // Clear all vector registers
- int vlmax = get_vsetvlmax_intrinsic(VSEW::SEW_E32, VLMUL::LMUL_M8);
- set_vsetvl_intrinsic(VSEW::SEW_E32, VLMUL::LMUL_M8, vlmax);
- __asm__ volatile("vmv.v.i v0, 0");
- __asm__ volatile("vmv.v.i v8, 0");
- __asm__ volatile("vmv.v.i v16, 0");
- __asm__ volatile("vmv.v.i v24, 0");
-}
-
-template <typename T>
-static std::tuple<int, int> vadd_vi_test_setup(VLMUL lmul, int32_t avl) {
- // Clear all vector registers
- zero_vector_registers();
-
- // Initialize test_vector_1 and determine vl, vlmax
- uint32_t bw = std::__bit_width(sizeof(T));
- VSEW sew = static_cast<VSEW>(bw - 1);
- int vlmax = get_vsetvlmax_intrinsic(sew, lmul);
- if (avl > vlmax) {
- avl = vlmax;
- }
- memset(test_vector_1, 0, MAXVL_BYTES);
- memset(test_vector_2, 0, MAXVL_BYTES);
- int vl = set_vsetvl_intrinsic(sew, lmul, avl);
- EXPECT_EQ(avl, vl);
-
- return std::make_tuple(vlmax, vl);
-}
-
class VaddViTest : public ::testing::Test {
protected:
void SetUp() override { zero_vector_registers(); }
@@ -62,7 +28,8 @@
int32_t avl = AVLS[i];
int vlmax;
int vl;
- std::tie(vlmax, vl) = vadd_vi_test_setup<uint8_t>(VLMUL::LMUL_M1, avl);
+ std::tie(vlmax, vl) = vector_test_setup<uint8_t>(
+ VLMUL::LMUL_M1, avl, test_vector_1, test_vector_2);
if (avl > vlmax) {
continue;
}
@@ -86,8 +53,8 @@
int32_t avl = AVLS[i]; \
int vlmax; \
int vl; \
- std::tie(vlmax, vl) = \
- vadd_vi_test_setup<int##_SEW_##_t>(VLMUL::LMUL_M##_LMUL_, avl); \
+ std::tie(vlmax, vl) = vector_test_setup<int##_SEW_##_t>( \
+ VLMUL::LMUL_M##_LMUL_, avl, test_vector_1, test_vector_2); \
if (avl > vlmax) { \
continue; \
} \
diff --git a/tests/vadd_vv_test.cpp b/tests/vadd_vv_test.cpp
index 7e36624..36141f6 100644
--- a/tests/vadd_vv_test.cpp
+++ b/tests/vadd_vv_test.cpp
@@ -13,45 +13,11 @@
namespace vadd_vv_test {
namespace {
-const int LMUL_MAX = 8;
-const int VLEN = 512;
-const int MAXVL_BYTES = VLEN / sizeof(uint8_t) * LMUL_MAX;
-
using namespace test_v_helpers;
uint8_t test_vector_1[MAXVL_BYTES];
uint8_t test_vector_2[MAXVL_BYTES];
-static void zero_vector_registers() {
- // Clear all vector registers
- int vlmax = get_vsetvlmax_intrinsic(VSEW::SEW_E32, VLMUL::LMUL_M8);
- set_vsetvl_intrinsic(VSEW::SEW_E32, VLMUL::LMUL_M8, vlmax);
- __asm__ volatile("vmv.v.i v0, 0");
- __asm__ volatile("vmv.v.i v8, 0");
- __asm__ volatile("vmv.v.i v16, 0");
- __asm__ volatile("vmv.v.i v24, 0");
-}
-
-template <typename T>
-static std::tuple<int, int> vadd_vv_test_setup(VLMUL lmul, int32_t avl) {
- // Clear all vector registers
- zero_vector_registers();
-
- // Initialize test_vector_1 and determine vl, vlmax
- uint32_t bw = std::__bit_width(sizeof(T));
- VSEW sew = static_cast<VSEW>(bw - 1);
- int vlmax = get_vsetvlmax_intrinsic(sew, lmul);
- if (avl > vlmax) {
- avl = vlmax;
- }
- memset(test_vector_1, 0, MAXVL_BYTES);
- memset(test_vector_2, 0, MAXVL_BYTES);
- int vl = set_vsetvl_intrinsic(sew, lmul, avl);
- EXPECT_EQ(avl, vl);
-
- return std::make_tuple(vlmax, vl);
-}
-
class VaddVxTest : public ::testing::Test {
protected:
void SetUp() override { zero_vector_registers(); }
@@ -63,7 +29,8 @@
int32_t avl = AVLS[i];
int vlmax;
int vl;
- std::tie(vlmax, vl) = vadd_vv_test_setup<uint8_t>(VLMUL::LMUL_M1, avl);
+ std::tie(vlmax, vl) = vector_test_setup<uint8_t>(
+ VLMUL::LMUL_M1, avl, test_vector_1, test_vector_2);
if (avl > vlmax) {
continue;
}
@@ -92,8 +59,8 @@
int32_t avl = AVLS[i]; \
int vlmax; \
int vl; \
- std::tie(vlmax, vl) = \
- vadd_vv_test_setup<int##_SEW_##_t>(VLMUL::LMUL_M##_LMUL_, avl); \
+ std::tie(vlmax, vl) = vector_test_setup<int##_SEW_##_t>( \
+ VLMUL::LMUL_M##_LMUL_, avl, test_vector_1, test_vector_2); \
if (avl > vlmax) { \
continue; \
} \
diff --git a/tests/vadd_vx_test.cpp b/tests/vadd_vx_test.cpp
index 03216e5..5b07466 100644
--- a/tests/vadd_vx_test.cpp
+++ b/tests/vadd_vx_test.cpp
@@ -13,45 +13,11 @@
namespace vadd_vx_test {
namespace {
-const int LMUL_MAX = 8;
-const int VLEN = 512;
-const int MAXVL_BYTES = VLEN / sizeof(uint8_t) * LMUL_MAX;
-
using namespace test_v_helpers;
uint8_t test_vector_1[MAXVL_BYTES];
uint8_t test_vector_2[MAXVL_BYTES];
-static void zero_vector_registers() {
- // Clear all vector registers
- int vlmax = get_vsetvlmax_intrinsic(VSEW::SEW_E32, VLMUL::LMUL_M8);
- set_vsetvl_intrinsic(VSEW::SEW_E32, VLMUL::LMUL_M8, vlmax);
- __asm__ volatile("vmv.v.i v0, 0");
- __asm__ volatile("vmv.v.i v8, 0");
- __asm__ volatile("vmv.v.i v16, 0");
- __asm__ volatile("vmv.v.i v24, 0");
-}
-
-template <typename T>
-static std::tuple<int, int> vadd_vx_test_setup(VLMUL lmul, int32_t avl) {
- // Clear all vector registers
- zero_vector_registers();
-
- // Initialize test_vector_1 and determine vl, vlmax
- uint32_t bw = std::__bit_width(sizeof(T));
- VSEW sew = static_cast<VSEW>(bw - 1);
- int vlmax = get_vsetvlmax_intrinsic(sew, lmul);
- if (avl > vlmax) {
- avl = vlmax;
- }
- memset(test_vector_1, 0, MAXVL_BYTES);
- memset(test_vector_2, 0, MAXVL_BYTES);
- int vl = set_vsetvl_intrinsic(sew, lmul, avl);
- EXPECT_EQ(avl, vl);
-
- return std::make_tuple(vlmax, vl);
-}
-
class VaddVxTest : public ::testing::Test {
protected:
void SetUp() override { zero_vector_registers(); }
@@ -63,7 +29,8 @@
int32_t avl = AVLS[i];
int vlmax;
int vl;
- std::tie(vlmax, vl) = vadd_vx_test_setup<uint8_t>(VLMUL::LMUL_M1, avl);
+ std::tie(vlmax, vl) = vector_test_setup<uint8_t>(
+ VLMUL::LMUL_M1, avl, test_vector_1, test_vector_2);
if (avl > vlmax) {
continue;
}
@@ -85,8 +52,8 @@
int32_t avl = AVLS[i]; \
int vlmax; \
int vl; \
- std::tie(vlmax, vl) = \
- vadd_vx_test_setup<int##_SEW_##_t>(VLMUL::LMUL_M##_LMUL_, avl); \
+ std::tie(vlmax, vl) = vector_test_setup<int##_SEW_##_t>( \
+ VLMUL::LMUL_M##_LMUL_, avl, test_vector_1, test_vector_2); \
if (avl > vlmax) { \
continue; \
} \
diff --git a/tests/vmv_test.cpp b/tests/vmv_test.cpp
index ff58a2f..807cbd9 100644
--- a/tests/vmv_test.cpp
+++ b/tests/vmv_test.cpp
@@ -12,60 +12,24 @@
namespace vmv_test {
namespace {
-const int LMUL_MAX = 8;
-const int VLEN = 512;
-const int MAXVL_BYTES = VLEN / sizeof(uint8_t) * LMUL_MAX;
-
using namespace test_v_helpers;
uint8_t test_vector_1[MAXVL_BYTES];
uint8_t test_vector_2[MAXVL_BYTES];
-
-template <typename T>
-static std::tuple<int, int> vmv_test_setup(VLMUL lmul, int32_t avl) {
- // Clear all vector registers
- zero_vector_registers();
-
- // Initialize test_vector1 and determine vl, vlmax
- uint32_t bw = std::__bit_width(sizeof(T));
- VSEW sew = static_cast<VSEW>(bw - 1);
- int vlmax = get_vsetvlmax_intrinsic(sew, lmul);
- if (avl > vlmax) {
- avl = vlmax;
- }
- T *ptr_vec_1 = reinterpret_cast<T *>(test_vector_1);
- memset(test_vector_1, 0, MAXVL_BYTES);
- memset(test_vector_2, 0, MAXVL_BYTES);
- int vl = set_vsetvl_intrinsic(sew, lmul, avl);
- EXPECT_EQ(avl, vl);
- for (int idx = 0; idx < vl; idx++) {
- ptr_vec_1[idx] = idx;
- }
- return std::make_tuple(vlmax, vl);
-}
-
-template <typename T>
-static void vmv_check(int avl) {
- T *ptr_vec_1 = reinterpret_cast<T *>(test_vector_1);
- T *ptr_vec_2 = reinterpret_cast<T *>(test_vector_2);
- for (int idx = 0; idx < avl; idx++) {
- ASSERT_EQ(ptr_vec_1[idx], ptr_vec_2[idx]);
- }
-}
-
class VmvTest : public ::testing::Test {
protected:
void SetUp() override { zero_vector_registers(); }
void TearDown() override { zero_vector_registers(); }
};
-TEST_F(VmvTest, vmv_demo) {
+TEST_F(VmvTest, DISABLED_vmv_demo) {
for (int i = 0; i < AVL_COUNT; i++) {
int32_t avl = AVLS[i];
int vlmax;
int vl;
- std::tie(vlmax, vl) = vmv_test_setup<uint8_t>(VLMUL::LMUL_M1, avl);
+ std::tie(vlmax, vl) = vector_test_setup<uint8_t>(
+ VLMUL::LMUL_M1, avl, test_vector_1, test_vector_2);
if (avl > vlmax) {
continue;
}
@@ -73,7 +37,7 @@
__asm__ volatile("vle8.v v0, (%0)" : : "r"(test_vector_1));
__asm__ volatile("vmv.v.v v1, v0");
__asm__ volatile("vse8.v v1, (%0)" : : "r"(test_vector_2));
- vmv_check<uint8_t>(vlmax);
+ assert_vec_elem_eq<int8_t>(vlmax, test_vector_1, test_vector_2);
}
}
@@ -83,7 +47,8 @@
int32_t avl = AVLS[i];
int vlmax;
int vl;
- std::tie(vlmax, vl) = vmv_test_setup<uint8_t>(VLMUL::LMUL_M1, avl);
+ std::tie(vlmax, vl) = vector_test_setup<uint8_t>(
+ VLMUL::LMUL_M1, avl, test_vector_1, test_vector_2);
if (avl > vlmax) {
continue;
}
@@ -91,7 +56,7 @@
vint8m1_t vec2 = vmv_v_v_i8m1(vec1, vl);
int8_t *ptr_vec_2 = reinterpret_cast<int8_t *>(test_vector_2);
vse8_v_i8m1(ptr_vec_2, vec2, vl);
- vmv_check<uint8_t>(vlmax);
+ assert_vec_elem_eq<int8_t>(vlmax, test_vector_1, test_vector_2);
}
}
@@ -101,8 +66,8 @@
int32_t avl = AVLS[i]; \
int vlmax; \
int vl; \
- std::tie(vlmax, vl) = \
- vmv_test_setup<uint##_SEW_##_t>(VLMUL::LMUL_M##_LMUL_, avl); \
+ std::tie(vlmax, vl) = vector_test_setup<uint##_SEW_##_t>( \
+ VLMUL::LMUL_M##_LMUL_, avl, test_vector_1, test_vector_2); \
if (avl > vlmax) { \
continue; \
} \
@@ -112,7 +77,7 @@
int##_SEW_##_t *ptr_vec_2 = \
reinterpret_cast<int##_SEW_##_t *>(test_vector_2); \
vse##_SEW_##_v_i##_SEW_##m##_LMUL_(ptr_vec_2, vec2, vl); \
- vmv_check<uint##_SEW_##_t>(vlmax); \
+ assert_vec_elem_eq<int##_SEW_##_t>(vlmax, test_vector_1, test_vector_2); \
} \
}
@@ -131,26 +96,26 @@
DEFINE_TEST_VMV_V_V_I_INTRINSIC(32, 4)
DEFINE_TEST_VMV_V_V_I_INTRINSIC(32, 8)
-#define DEFINE_TEST_VMV_V_V_I(_SEW_, _LMUL_) \
- TEST_F(VmvTest, vmv_v_v_i##_SEW_##m##_LMUL_) { \
- for (int i = 0; i < AVL_COUNT; i++) { \
- int32_t avl = AVLS[i]; \
- int vlmax; \
- int vl; \
- std::tie(vlmax, vl) = \
- vmv_test_setup<uint##_SEW_##_t>(VLMUL::LMUL_M##_LMUL_, avl); \
- if (avl > vlmax) { \
- continue; \
- } \
- uint##_SEW_##_t *ptr_vec_1 = \
- reinterpret_cast<uint##_SEW_##_t *>(test_vector_1); \
- uint##_SEW_##_t *ptr_vec_2 = \
- reinterpret_cast<uint##_SEW_##_t *>(test_vector_2); \
- __asm__ volatile("vle" #_SEW_ ".v v0, (%0)" : : "r"(ptr_vec_1)); \
- __asm__ volatile("vmv.v.v v8, v0"); \
- __asm__ volatile("vse" #_SEW_ ".v v8, (%0)" : : "r"(ptr_vec_2)); \
- vmv_check<uint##_SEW_##_t>(vlmax); \
- } \
+#define DEFINE_TEST_VMV_V_V_I(_SEW_, _LMUL_) \
+ TEST_F(VmvTest, vmv_v_v_i##_SEW_##m##_LMUL_) { \
+ for (int i = 0; i < AVL_COUNT; i++) { \
+ int32_t avl = AVLS[i]; \
+ int vlmax; \
+ int vl; \
+ std::tie(vlmax, vl) = vector_test_setup<uint##_SEW_##_t>( \
+ VLMUL::LMUL_M##_LMUL_, avl, test_vector_1, test_vector_2); \
+ if (avl > vlmax) { \
+ continue; \
+ } \
+ uint##_SEW_##_t *ptr_vec_1 = \
+ reinterpret_cast<uint##_SEW_##_t *>(test_vector_1); \
+ uint##_SEW_##_t *ptr_vec_2 = \
+ reinterpret_cast<uint##_SEW_##_t *>(test_vector_2); \
+ __asm__ volatile("vle" #_SEW_ ".v v0, (%0)" : : "r"(ptr_vec_1)); \
+ __asm__ volatile("vmv.v.v v8, v0"); \
+ __asm__ volatile("vse" #_SEW_ ".v v8, (%0)" : : "r"(ptr_vec_2)); \
+ assert_vec_elem_eq<int##_SEW_##_t>(vlmax, test_vector_1, test_vector_2); \
+ } \
}
DEFINE_TEST_VMV_V_V_I(8, 1)
@@ -173,7 +138,8 @@
int32_t avl = AVLS[i];
int vlmax;
int vl;
- std::tie(vlmax, vl) = vmv_test_setup<uint8_t>(VLMUL::LMUL_M1, avl);
+ std::tie(vlmax, vl) = vector_test_setup<uint8_t>(
+ VLMUL::LMUL_M1, avl, test_vector_1, test_vector_2);
if (avl > vlmax) {
continue;
}
@@ -185,7 +151,7 @@
ptr_vec_1[i] = test_val;
}
__asm__ volatile("vse8.v v8, (%0)" : : "r"(ptr_vec_2));
- vmv_check<uint8_t>(vlmax);
+ assert_vec_elem_eq<uint8_t>(vlmax, test_vector_1, test_vector_2);
}
}
@@ -195,8 +161,8 @@
int32_t avl = AVLS[i]; \
int vlmax; \
int vl; \
- std::tie(vlmax, vl) = \
- vmv_test_setup<uint##_SEW_##_t>(VLMUL::LMUL_M##_LMUL_, avl); \
+ std::tie(vlmax, vl) = vector_test_setup<uint##_SEW_##_t>( \
+ VLMUL::LMUL_M##_LMUL_, avl, test_vector_1, test_vector_2); \
if (avl > vlmax) { \
continue; \
} \
@@ -210,7 +176,8 @@
ptr_vec_1[i] = test_val; \
} \
__asm__ volatile("vse" #_SEW_ ".v v8, (%0)" : : "r"(ptr_vec_2)); \
- vmv_check<uint##_SEW_##_t>(vlmax); \
+ assert_vec_elem_eq<uint##_SEW_##_t>(vlmax, test_vector_1, \
+ test_vector_2); \
} \
}
@@ -234,7 +201,8 @@
int32_t avl = AVLS[i];
int vlmax;
int vl;
- std::tie(vlmax, vl) = vmv_test_setup<int8_t>(VLMUL::LMUL_M1, avl);
+ std::tie(vlmax, vl) = vector_test_setup<int8_t>(
+ VLMUL::LMUL_M1, avl, test_vector_1, test_vector_2);
if (avl > vlmax) {
continue;
}
@@ -246,34 +214,34 @@
ptr_vec_1[i] = test_val;
}
__asm__ volatile("vse8.v v8, (%0)" : : "r"(ptr_vec_2));
- vmv_check<uint8_t>(vlmax);
+ assert_vec_elem_eq<uint8_t>(vlmax, test_vector_1, test_vector_2);
}
}
// TODO(gkielian): Allow mechanism for multiple tests for same sew,lmul pair
-#define DEFINE_TEST_VMV_V_I_I(_SEW_, _LMUL_, TEST_VAL) \
- TEST_F(VmvTest, vmv_v_i_e##_SEW_##m##_LMUL_) { \
- for (int i = 0; i < AVL_COUNT; i++) { \
- int32_t avl = AVLS[i]; \
- int vlmax; \
- int vl; \
- std::tie(vlmax, vl) = \
- vmv_test_setup<int##_SEW_##_t>(VLMUL::LMUL_M##_LMUL_, avl); \
- if (avl > vlmax) { \
- continue; \
- } \
- int##_SEW_##_t *ptr_vec_1 = \
- reinterpret_cast<int##_SEW_##_t *>(test_vector_1); \
- int##_SEW_##_t *ptr_vec_2 = \
- reinterpret_cast<int##_SEW_##_t *>(test_vector_2); \
- int##_SEW_##_t test_val = TEST_VAL; \
- __asm__ volatile("vmv.v.i v8, " #TEST_VAL); \
- for (int i = 0; i < vl; i++) { \
- ptr_vec_1[i] = test_val; \
- } \
- __asm__ volatile("vse" #_SEW_ ".v v8, (%0)" : : "r"(ptr_vec_2)); \
- vmv_check<int##_SEW_##_t>(vlmax); \
- } \
+#define DEFINE_TEST_VMV_V_I_I(_SEW_, _LMUL_, TEST_VAL) \
+ TEST_F(VmvTest, vmv_v_i_e##_SEW_##m##_LMUL_) { \
+ for (int i = 0; i < AVL_COUNT; i++) { \
+ int32_t avl = AVLS[i]; \
+ int vlmax; \
+ int vl; \
+ std::tie(vlmax, vl) = vector_test_setup<int##_SEW_##_t>( \
+ VLMUL::LMUL_M##_LMUL_, avl, test_vector_1, test_vector_2); \
+ if (avl > vlmax) { \
+ continue; \
+ } \
+ int##_SEW_##_t *ptr_vec_1 = \
+ reinterpret_cast<int##_SEW_##_t *>(test_vector_1); \
+ int##_SEW_##_t *ptr_vec_2 = \
+ reinterpret_cast<int##_SEW_##_t *>(test_vector_2); \
+ int##_SEW_##_t test_val = TEST_VAL; \
+ __asm__ volatile("vmv.v.i v8, " #TEST_VAL); \
+ for (int i = 0; i < vl; i++) { \
+ ptr_vec_1[i] = test_val; \
+ } \
+ __asm__ volatile("vse" #_SEW_ ".v v8, (%0)" : : "r"(ptr_vec_2)); \
+ assert_vec_elem_eq<int##_SEW_##_t>(vlmax, test_vector_1, test_vector_2); \
+ } \
}
DEFINE_TEST_VMV_V_I_I(8, 1, -11)