tests:vle_vse: Enable fractional LMUL load/store tests These tests pass on Renode but fail on Qemu. Need to check with Qemu team to resolve. Change-Id: I403451648598f9385c41d508102db5c01f1122c4
diff --git a/tests/include/test_v_helpers.h b/tests/include/test_v_helpers.h index 2a56f84..7189b72 100644 --- a/tests/include/test_v_helpers.h +++ b/tests/include/test_v_helpers.h
@@ -34,12 +34,10 @@ }; enum VLMUL { + LMUL_MF8 = 5, + LMUL_MF4 = 6, + LMUL_MF2 = 7, - /* // Fractional LMUL not supported by our intrinsic compiler - LMUL_MF8 = 5, - LMUL_MF4 = 6, - LMUL_MF2 = 7, - */ LMUL_M1 = 0, LMUL_M2 = 1, LMUL_M4 = 2,
diff --git a/tests/test_v_helpers.cpp b/tests/test_v_helpers.cpp index 32886eb..133ad07 100644 --- a/tests/test_v_helpers.cpp +++ b/tests/test_v_helpers.cpp
@@ -85,6 +85,45 @@ break; } break; + case VLMUL::LMUL_MF2: + switch (sew) { + case VSEW::SEW_E8: + return vsetvl_e8mf2(avl); + break; + case VSEW::SEW_E16: + return vsetvl_e16mf2(avl); + break; + case VSEW::SEW_E32: + return vsetvl_e32mf2(avl); + break; + default: + return -1; + break; + } + break; + case VLMUL::LMUL_MF4: + switch (sew) { + case VSEW::SEW_E8: + return vsetvl_e8mf4(avl); + break; + case VSEW::SEW_E16: + return vsetvl_e16mf4(avl); + break; + default: + return -1; + break; + } + break; + case VLMUL::LMUL_MF8: + switch (sew) { + case VSEW::SEW_E8: + return vsetvl_e8mf8(avl); + break; + default: + return -1; + break; + } + break; default: break; } @@ -156,6 +195,42 @@ break; } break; + case VLMUL::LMUL_MF2: + switch (sew) { + case VSEW::SEW_E8: + return vsetvlmax_e8mf2(); + break; + case VSEW::SEW_E16: + return vsetvlmax_e16mf2(); + break; + case VSEW::SEW_E32: + return vsetvlmax_e32mf2(); + break; + default: + return -1; + break; + } + case VLMUL::LMUL_MF4: + switch (sew) { + case VSEW::SEW_E8: + return vsetvlmax_e8mf4(); + break; + case VSEW::SEW_E16: + return vsetvlmax_e16mf4(); + break; + default: + return -1; + break; + } + case VLMUL::LMUL_MF8: + switch (sew) { + case VSEW::SEW_E8: + return vsetvlmax_e8mf8(); + break; + default: + return -1; + break; + } default: break; } @@ -249,6 +324,54 @@ return 0; } break; + case VLMUL::LMUL_MF2: + switch (sew) { + case VSEW::SEW_E8: + __asm__ volatile("vsetvli %[VL], %[AVL], e8, mf2, ta, mu" + : [VL] "=r"(vl) + : [AVL] "r"(avl)); + break; + case VSEW::SEW_E16: + __asm__ volatile("vsetvli %[VL], %[AVL], e16, mf2, ta, mu" + : [VL] "=r"(vl) + : [AVL] "r"(avl)); + break; + case VSEW::SEW_E32: + __asm__ volatile("vsetvli %[VL], %[AVL], e32, mf2, ta, mu" + : [VL] "=r"(vl) + : [AVL] "r"(avl)); + break; + default: + return 0; + } + break; + case VLMUL::LMUL_MF4: + switch (sew) { + case VSEW::SEW_E8: + __asm__ volatile("vsetvli %[VL], %[AVL], e8, mf4, ta, mu" + : [VL] "=r"(vl) + : [AVL] "r"(avl)); + break; + case VSEW::SEW_E16: + __asm__ volatile("vsetvli %[VL], %[AVL], e16, mf4, ta, mu" + : [VL] "=r"(vl) + : [AVL] "r"(avl)); + break; + default: + return 0; + } + break; + case VLMUL::LMUL_MF8: + switch (sew) { + case VSEW::SEW_E8: + __asm__ volatile("vsetvli %[VL], %[AVL], e8, mf8, ta, mu" + : [VL] "=r"(vl) + : [AVL] "r"(avl)); + break; + default: + return 0; + } + break; default: return 0; } @@ -352,6 +475,54 @@ return 0; } break; + case VLMUL::LMUL_MF2: + switch (sew) { + case VSEW::SEW_E8: + __asm__ volatile("vsetivli %[VL], %[AVL], e8, mf2, ta, mu" + : [VL] "=r"(vl) + : [AVL] "n"(AVL_CONST)); + break; + case VSEW::SEW_E16: + __asm__ volatile("vsetivli %[VL], %[AVL], e16, mf2, ta, mu" + : [VL] "=r"(vl) + : [AVL] "n"(AVL_CONST)); + break; + case VSEW::SEW_E32: + __asm__ volatile("vsetivli %[VL], %[AVL], e32, mf2, ta, mu" + : [VL] "=r"(vl) + : [AVL] "n"(AVL_CONST)); + break; + default: + return 0; + } + break; + case VLMUL::LMUL_MF4: + switch (sew) { + case VSEW::SEW_E8: + __asm__ volatile("vsetivli %[VL], %[AVL], e8, mf4, ta, mu" + : [VL] "=r"(vl) + : [AVL] "n"(AVL_CONST)); + break; + case VSEW::SEW_E16: + __asm__ volatile("vsetivli %[VL], %[AVL], e16, mf4, ta, mu" + : [VL] "=r"(vl) + : [AVL] "n"(AVL_CONST)); + break; + default: + return 0; + } + break; + case VLMUL::LMUL_MF8: + switch (sew) { + case VSEW::SEW_E8: + __asm__ volatile("vsetivli %[VL], %[AVL], e8, mf8, ta, mu" + : [VL] "=r"(vl) + : [AVL] "n"(AVL_CONST)); + break; + default: + return 0; + } + break; default: return 0; }
diff --git a/tests/vle_vse_test.cpp b/tests/vle_vse_test.cpp index 5542b29..396fdef 100644 --- a/tests/vle_vse_test.cpp +++ b/tests/vle_vse_test.cpp
@@ -13,7 +13,7 @@ uint8_t test_vector_1[MAXVL_BYTES]; uint8_t test_vector_2[MAXVL_BYTES]; -static void vlevse_test(VSEW sew, VLMUL lmul, bool use_intrinsic) { +static void vlevse_test(VSEW sew, VLMUL lmul, bool use_intrinsic = false) { for (int i = 0; i <= AVL_COUNT; ++i) { zero_vector_registers(); int32_t avl = AVLS[i]; @@ -127,5 +127,26 @@ vlevse_test(VSEW::SEW_E32, VLMUL::LMUL_M8, false); } +TEST(VleVseTest, vle8vse8_e8mf2) { vlevse_test(VSEW::SEW_E8, VLMUL::LMUL_MF2); } + +TEST(VleVseTest, vle8vse8_e8mf4) { vlevse_test(VSEW::SEW_E8, VLMUL::LMUL_MF4); } + +TEST(VleVseTest, vle16vse16_e16mf2) { + vlevse_test(VSEW::SEW_E16, VLMUL::LMUL_MF2); +} + +// The following tests failed on Qemu but passed with Renode. +TEST(VleVseTest, DISABLED_vle8vse8_e8mf8) { + vlevse_test(VSEW::SEW_E8, VLMUL::LMUL_MF8); +} + +TEST(VleVseTest, DISABLED_vle16vse16_e16mf4) { + vlevse_test(VSEW::SEW_E16, VLMUL::LMUL_MF4); +} + +TEST(VleVseTest, DISABLED_vle32vse32_e32mf2) { + vlevse_test(VSEW::SEW_E32, VLMUL::LMUL_MF2); +} + } // namespace } // namespace vle_vse_test