Cleanup code for clang
Set clang as the default compiler for sw/vec repo.
clang only supports RVV v0.10, which requires the vset[i]vl[i] syntax of
vset[i]vl[i] rd, avl, e[8|16|32], m[1|2|4|8], [ta|tu], [ma|mu]
Set the default {tu, mu} based on the spec if not specified
Disable e64 and fractional lmul since it may not work yet.
Change-Id: I7edc12897aff2fa2b27eb9050e276e17906f5570
diff --git a/cmake/riscv_baremetal.cmake b/cmake/riscv_baremetal.cmake
index bd67cf9..006cb3d 100644
--- a/cmake/riscv_baremetal.cmake
+++ b/cmake/riscv_baremetal.cmake
@@ -1,5 +1,5 @@
# If set to true, build for IREE using clang. If false, build for gcc.
-set(BUILD_IREE OFF CACHE BOOL "")
+set(BUILD_WITH_CLANG ON CACHE BOOL "Build the library with clang.")
set(CMAKE_SYSTEM_NAME Generic)
set(CMAKE_SYSTEM_ABI ilp32)
@@ -10,9 +10,20 @@
"-O0"
"-g3"
"-ggdb"
- "-Werror")
+ "-Werror"
+ "-ffreestanding"
+ "-ffunction-sections"
+ "-fstack-usage"
+ "-mstrict-align"
+)
-if( ${BUILD_IREE} )
+set(VEC_DEFAULT_LINKOPTS
+ -Wl,--gc-sections
+ -Wl,--print-memory-usage
+ -Wl,-Map=${PROJECT_NAME}.map
+)
+
+if(${BUILD_WITH_CLANG})
set(CMAKE_SYSTEM_PROCESSOR rv32imfv0p10)
set(RISCV_TOOLCHAIN_ROOT "$ENV{CACHE}/toolchain_iree_rv32imf/bin/" CACHE PATH "RISC-V toolchain root path")
@@ -61,12 +72,12 @@
set( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=${CMAKE_SYSTEM_PROCESSOR} -mabi=${CMAKE_SYSTEM_ABI}")
set(VEC_DEFAULT_COPTS "${VEC_DEFAULT_COPTS}"
- "-ffreestanding"
- "-ffunction-sections"
- "-fstack-usage"
- "-mstrict-align"
+ "-Wno-format"
)
-endif()
+ set(VEC_DEFAULT_LINKOPTS "${VEC_DEFAULT_LINKOPTS}"
+ "-specs=nano.specs"
+ )
+endif(${BUILD_WITH_CLANG})
# Check compiler found
if (EXISTS ${RISCV_COMPILER})
@@ -79,5 +90,3 @@
set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS}" CACHE STRING "" )
set( CMAKE_ASM_FLAGS "${CMAKE_C_FLAGS}" CACHE STRING "" )
set( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -nostartfiles " )
-
-set( VEC_DEFAULT_LINKOPTS -specs=nano.specs -Wl,--gc-sections -Wl,--print-memory-usage -Wl,-Map=${PROJECT_NAME}.map)
diff --git a/springbok/crt0.s b/springbok/crt0.s
index 08c7490..e724a02 100644
--- a/springbok/crt0.s
+++ b/springbok/crt0.s
@@ -44,14 +44,14 @@
csrw mstatus, a0
# Set lmul=8 and clear the register file
- vsetvli t0, zero, e8, m8, ta, ma
+ vsetvli t0, zero, e8, m8, tu, mu
vmv.v.i v0, 0
vmv.v.i v8, 0
vmv.v.i v16, 0
vmv.v.i v24, 0
# Set lmul=1
- vsetvli t0, zero, e8, m1, ta, ma
+ vsetvli t0, zero, e8, m1, tu, mu
# Set vector extension to "clean"
xori a0, a0, 0x600
diff --git a/tests/test_v_helpers.cpp b/tests/test_v_helpers.cpp
index f5404e3..ed8d5f2 100644
--- a/tests/test_v_helpers.cpp
+++ b/tests/test_v_helpers.cpp
@@ -172,21 +172,21 @@
switch(sew) {
case VSEW::SEW_E8:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e8, m1"
+ "vsetvli %[VL], %[AVL], e8, m1, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
break;
case VSEW::SEW_E16:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e16, m1"
+ "vsetvli %[VL], %[AVL], e16, m1, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
break;
case VSEW::SEW_E32:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e32, m1"
+ "vsetvli %[VL], %[AVL], e32, m1, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
@@ -199,21 +199,21 @@
switch(sew) {
case VSEW::SEW_E8:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e8, m2"
+ "vsetvli %[VL], %[AVL], e8, m2, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
break;
case VSEW::SEW_E16:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e16, m2"
+ "vsetvli %[VL], %[AVL], e16, m2, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
break;
case VSEW::SEW_E32:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e32, m2"
+ "vsetvli %[VL], %[AVL], e32, m2, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
@@ -226,21 +226,21 @@
switch(sew) {
case VSEW::SEW_E8:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e8, m4"
+ "vsetvli %[VL], %[AVL], e8, m4, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
break;
case VSEW::SEW_E16:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e16, m4"
+ "vsetvli %[VL], %[AVL], e16, m4, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
break;
case VSEW::SEW_E32:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e32, m4"
+ "vsetvli %[VL], %[AVL], e32, m4, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
@@ -253,21 +253,21 @@
switch(sew) {
case VSEW::SEW_E8:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e8, m8"
+ "vsetvli %[VL], %[AVL], e8, m8, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
break;
case VSEW::SEW_E16:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e16, m8"
+ "vsetvli %[VL], %[AVL], e16, m8, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
break;
case VSEW::SEW_E32:
__asm__ volatile(
- "vsetvli %[VL], %[AVL], e32, m8"
+ "vsetvli %[VL], %[AVL], e32, m8, tu, mu"
: [VL] "=r" (vl)
: [AVL] "r" (avl)
);
diff --git a/tests/vle_vse_test.cpp b/tests/vle_vse_test.cpp
index d80548c..5f269da 100644
--- a/tests/vle_vse_test.cpp
+++ b/tests/vle_vse_test.cpp
@@ -11,7 +11,6 @@
using namespace test_v_helpers;
const int VLEN = 512u;
-const int ELEN = 32u;
const int LMUL_MAX = 8u;
const int MAXVL_BYTES = VLEN / sizeof(uint8_t) * LMUL_MAX;
const int AVL_STEP = 4;
diff --git a/tests/vsetvl_test.cpp b/tests/vsetvl_test.cpp
index 09dafb4..b31a553 100644
--- a/tests/vsetvl_test.cpp
+++ b/tests/vsetvl_test.cpp
@@ -12,7 +12,6 @@
using namespace test_v_helpers;
const uint64_t VLEN = 512u;
-const uint64_t ELEN = 32u;
uint32_t AVLS[] = {1, 2, 3, 4, 5, 8, 16, 17,
32, 36, 55, 64, 100, 128, 256, 321,
diff --git a/vector_executive/vector_executive.c b/vector_executive/vector_executive.c
index 1a0619e..3d1f064 100644
--- a/vector_executive/vector_executive.c
+++ b/vector_executive/vector_executive.c
@@ -3,7 +3,7 @@
#include <assert.h>
#define VECLEN 8
-void main(int argc, char **argv) {
+int main(int argc, char **argv) {
LOG_INFO("Vector Executive");
LOG_INFO("Built at: " __DATE__ ", " __TIME__);
@@ -23,7 +23,7 @@
__asm__ volatile("csrw vxsat, 0");
__asm__ volatile("csrw vxrm, 0");
__asm__ volatile("li a1, 8");
- __asm__ volatile("vsetvli t0, a1, e32"); // 8 elements of 32 bits
+ __asm__ volatile("vsetvli t0, a1, e32, m1, tu, mu"); // 8 elements of 32 bits
// Load vectors with data. v0: 1,2,..,8. v1: 9,10,..,16.
uint32_t vector0[VECLEN];
@@ -45,8 +45,9 @@
__asm__ volatile("vse32.v v2, (%0)" : : "r" (vec_add));
for(int i = 0; i < VECLEN; i++) {
- LOG_INFO("Expected %lu, %lu : Actual %lu, %lu", vector0[i] * vector1[i], vector0[i] + vector1[i], vec_mul[i], vec_add[i]);
+ LOG_INFO("Expected %u, %u : Actual %u, %u", vector0[i] * vector1[i], vector0[i] + vector1[i], vec_mul[i], vec_add[i]);
assert(vector0[i] * vector1[i] == vec_mul[i]);
assert(vector0[i] + vector1[i] == vec_add[i]);
}
+ return 0;
}
diff --git a/vector_load_store_tests/CMakeLists.txt b/vector_load_store_tests/CMakeLists.txt
index 359d042..607b3d9 100644
--- a/vector_load_store_tests/CMakeLists.txt
+++ b/vector_load_store_tests/CMakeLists.txt
@@ -1,9 +1,9 @@
vec_cc_binary(
- NAME
- vector_load_store_tests
- SRCS
- vector_load_store_tests.c
+ NAME
+ vector_load_store_tests
+ SRCS
+ vector_load_store_tests.c
DEPS
springbok
vector_tests
diff --git a/vector_load_store_tests/vector_load_store_tests.c b/vector_load_store_tests/vector_load_store_tests.c
index 25d84e5..38fea5e 100644
--- a/vector_load_store_tests/vector_load_store_tests.c
+++ b/vector_load_store_tests/vector_load_store_tests.c
@@ -1,10 +1,13 @@
#include "vector_load_store_tests.h"
+// TODO(b/194689843): Re-enable e64 and mf[2|4|8] tests.
+
bool test_vector(void) {
test_vector_load_store_sanity_e8();
test_vector_load_store_sanity_e16();
test_vector_load_store_sanity_e32();
- test_vector_load_store_sanity_e64();
+ // SEW limited to E32
+ // test_vector_load_store_sanity_e64();
return true;
}
@@ -13,8 +16,8 @@
VSET(4, e8, m1);
uint32_t vl = 0;
- COPY_SCALAR_REG("t0", vl);
- LOG_INFO("vl: %lu", vl);
+ COPY_SCALAR_REG(vl);
+ LOG_INFO("vl: %u", vl);
assert(vl == 4);
volatile uint8_t INP1[] = {0xff, 0x00, 0x0f, 0xf0};
@@ -29,8 +32,8 @@
VSET(4, e16, m1);
uint32_t vl = 0;
- COPY_SCALAR_REG("t0", vl);
- LOG_INFO("vl: %lu", vl);
+ COPY_SCALAR_REG(vl);
+ LOG_INFO("vl: %u", vl);
assert(vl == 4);
volatile uint16_t INP1[] = {0xff00, 0x00ff, 0x0ff0, 0xf00f};
@@ -45,8 +48,8 @@
VSET(4, e32, m1);
uint32_t vl = 0;
- COPY_SCALAR_REG("t0", vl);
- LOG_INFO("vl: %lu", vl);
+ COPY_SCALAR_REG(vl);
+ LOG_INFO("vl: %u", vl);
assert(vl == 4);
volatile uint32_t INP1[] = {0xff0000ff, 0x00ffff00, 0x0ff00ff0, 0xf00ff00f};
@@ -61,8 +64,8 @@
VSET(4, e64, m1);
uint32_t vl = 0;
- COPY_SCALAR_REG("t0", vl);
- LOG_INFO("vl: %lu", vl);
+ COPY_SCALAR_REG(vl);
+ LOG_INFO("vl: %u", vl);
assert(vl == 4);
volatile uint64_t INP1[] = {0xff0000ffff0000ff, 0x00ffff0000ffff00, 0x0ff00ff00ff00ff0, 0xf00ff00ff00ff00f};
diff --git a/vector_tests/include/test_vector.h b/vector_tests/include/test_vector.h
index 0694ae2..abddce7 100644
--- a/vector_tests/include/test_vector.h
+++ b/vector_tests/include/test_vector.h
@@ -13,19 +13,12 @@
#define _TEST_VLENB _TEST_VLEN >> 3
-#define VSET(VLEN, VTYPE, LMUL) \
- do { \
- __asm__ volatile("vsetvli t0, %[A]," #VTYPE "," #LMUL \
- " \n" ::[A] "r"(VLEN)); \
- } while (0)
+#define VSET(VLEN, VTYPE, LMUL) \
+ __asm__ volatile("vsetvli t0, %[A]," #VTYPE "," #LMUL \
+ ", tu, mu\n" ::[A] "r"(VLEN));
-#define COPY_SCALAR_REG(REG, DST) \
- do { \
- { \
- register uint32_t tmp_reg __asm__(REG); \
- DST = tmp_reg; \
- } \
- } while (0)
+#define COPY_SCALAR_REG(DST) \
+ __asm__ volatile("sw t0, 0(%[A])\n" ::[A] "r"(&DST));
extern bool test_vector(void);
bool test_main(void);
@@ -34,4 +27,4 @@
uint32_t get_vtype_e32(uint8_t, bool, bool);
uint32_t get_vtype_e64(uint8_t, bool, bool);
-#endif
\ No newline at end of file
+#endif
diff --git a/vector_tests/test_vector.c b/vector_tests/test_vector.c
index 6c8306f..b031b76 100644
--- a/vector_tests/test_vector.c
+++ b/vector_tests/test_vector.c
@@ -34,7 +34,7 @@
return get_vtype(vsew, vlmul, tail_agnostic, mask_agnostic);
}
-bool main(void) {
+int main(void) {
LOG_INFO("Hello test_vector.c");
LOG_INFO("Built at: " __DATE__ ", " __TIME__);
@@ -54,9 +54,9 @@
uint32_t vlenb;
__asm__ volatile("csrr %0, vlenb" : "=r"(vlenb));
- LOG_INFO("VLENB: 0x%08x, VLEN: %lu", (unsigned int)vlenb, vlenb << 3);
+ LOG_INFO("VLENB: 0x%08x, VLEN: %u", (unsigned int)vlenb, vlenb << 3);
assert(test_vector());
LOG_INFO("test_main done.");
- return true;
-}
\ No newline at end of file
+ return 0;
+}
diff --git a/vector_vadd_vsub_tests/CMakeLists.txt b/vector_vadd_vsub_tests/CMakeLists.txt
index dc52f7a..26f1143 100644
--- a/vector_vadd_vsub_tests/CMakeLists.txt
+++ b/vector_vadd_vsub_tests/CMakeLists.txt
@@ -3,16 +3,16 @@
foreach(OPERAND_TYPE ${OPERAND_TYPES})
vec_cc_binary(
- NAME
- vector_vadd_vsub_${OPERAND_TYPE}_tests
- SRCS
- vector_vadd_vsub_tests.c
+ NAME
+ vector_vadd_vsub_${OPERAND_TYPE}_tests
+ SRCS
+ vector_vadd_vsub_tests.c
DEPS
springbok
vector_tests
COPTS
- -Wno-unused-value
- -D=TEST_${OPERAND_TYPE}
+ -Wno-unused-value
+ -D=TEST_${OPERAND_TYPE}
LINKOPTS
-T${LINKER_SCRIPT}
-Xlinker --defsym=__itcm_length__=128K
diff --git a/vector_vadd_vsub_tests/vector_vadd_vsub_tests.c b/vector_vadd_vsub_tests/vector_vadd_vsub_tests.c
index 30895f3..b911950 100644
--- a/vector_vadd_vsub_tests/vector_vadd_vsub_tests.c
+++ b/vector_vadd_vsub_tests/vector_vadd_vsub_tests.c
@@ -4,6 +4,7 @@
#include "vector_vadd_vsub_tests.h"
#include <springbok.h>
+// TODO(b/194689843): Re-enable e64 and mf[2|4|8] tests.
static void randomize_array(void *, size_t, size_t);
static void check_array_equality(void *, void *, size_t);
@@ -13,8 +14,8 @@
#define SETUP_TEST(VTYPE, BUFFER_SIZE, VL_DST) \
do { \
VSET(BUFFER_SIZE, VTYPE, m1); \
- COPY_SCALAR_REG("t0", VL_DST); \
- LOG_INFO(#VL_DST " = %lu", VL_DST); \
+ COPY_SCALAR_REG(VL_DST); \
+ LOG_INFO(#VL_DST " = %u", VL_DST); \
} while (0)
#define MAKE_TEST(VTYPE, OPERATION, SUBOPERATION, DATATYPE) \
@@ -76,7 +77,7 @@
__asm__ volatile("vrsub.vi v3, v1, 15"); \
break; \
default: \
- assert(("unhandled intermediate for " #OPERATION ".vi", false)); \
+ assert(("unhandled intermediate for " #OPERATION ".vi", false)); \
} \
} \
__asm__ volatile("vs" #VTYPE ".v v3, (%0)" ::"r"(output)); \
@@ -157,8 +158,8 @@
MAKE_TEST(e16, vadd, vv, int16_t);
MAKE_TEST(e32, vadd, vv, uint32_t);
MAKE_TEST(e32, vadd, vv, int32_t);
-MAKE_TEST(e64, vadd, vv, uint64_t);
-MAKE_TEST(e64, vadd, vv, int64_t);
+// MAKE_TEST(e64, vadd, vv, uint64_t);
+// MAKE_TEST(e64, vadd, vv, int64_t);
void test_vector_vadd_vv(void) {
// TODO(julianmb): test signed + unsigned
test_vector_vadd_vv_e8_uint8_t();
@@ -167,8 +168,8 @@
test_vector_vadd_vv_e16_int16_t();
test_vector_vadd_vv_e32_uint32_t();
test_vector_vadd_vv_e32_int32_t();
- test_vector_vadd_vv_e64_uint64_t();
- test_vector_vadd_vv_e64_int64_t();
+ // test_vector_vadd_vv_e64_uint64_t();
+ // test_vector_vadd_vv_e64_int64_t();
}
MAKE_TEST(e8, vsub, vv, uint8_t);
@@ -177,8 +178,8 @@
MAKE_TEST(e16, vsub, vv, int16_t);
MAKE_TEST(e32, vsub, vv, uint32_t);
MAKE_TEST(e32, vsub, vv, int32_t);
-MAKE_TEST(e64, vsub, vv, uint64_t);
-MAKE_TEST(e64, vsub, vv, int64_t);
+// MAKE_TEST(e64, vsub, vv, uint64_t);
+// MAKE_TEST(e64, vsub, vv, int64_t);
void test_vector_vsub_vv(void) {
// TODO(julianmb): test signed + unsigned
test_vector_vsub_vv_e8_uint8_t();
@@ -187,8 +188,8 @@
test_vector_vsub_vv_e16_int16_t();
test_vector_vsub_vv_e32_uint32_t();
test_vector_vsub_vv_e32_int32_t();
- test_vector_vsub_vv_e64_uint64_t();
- test_vector_vsub_vv_e64_int64_t();
+ // test_vector_vsub_vv_e64_uint64_t();
+ // test_vector_vsub_vv_e64_int64_t();
}
MAKE_TEST(e8, vadd, vx, uint8_t);
@@ -197,8 +198,8 @@
MAKE_TEST(e16, vadd, vx, int16_t);
MAKE_TEST(e32, vadd, vx, uint32_t);
MAKE_TEST(e32, vadd, vx, int32_t);
-MAKE_TEST(e64, vadd, vx, uint64_t);
-MAKE_TEST(e64, vadd, vx, int64_t);
+// MAKE_TEST(e64, vadd, vx, uint64_t);
+// MAKE_TEST(e64, vadd, vx, int64_t);
void test_vector_vadd_vx(void) {
LOG_INFO("%s", __FUNCTION__);
// TODO(julianmb): test signed + unsigned
@@ -209,7 +210,7 @@
test_vector_vadd_vx_e32_uint32_t();
test_vector_vadd_vx_e32_int32_t();
// test_vector_vadd_vx_e64_uint64_t();
- test_vector_vadd_vx_e64_int64_t();
+ // test_vector_vadd_vx_e64_int64_t();
}
MAKE_TEST(e8, vsub, vx, uint8_t);
@@ -218,8 +219,8 @@
MAKE_TEST(e16, vsub, vx, int16_t);
MAKE_TEST(e32, vsub, vx, uint32_t);
MAKE_TEST(e32, vsub, vx, int32_t);
-MAKE_TEST(e64, vsub, vx, uint64_t);
-MAKE_TEST(e64, vsub, vx, int64_t);
+// MAKE_TEST(e64, vsub, vx, uint64_t);
+// MAKE_TEST(e64, vsub, vx, int64_t);
void test_vector_vsub_vx(void) {
LOG_INFO("%s", __FUNCTION__);
// TODO(julianmb): test signed + unsigned
@@ -230,7 +231,7 @@
test_vector_vsub_vx_e32_uint32_t();
test_vector_vsub_vx_e32_int32_t();
// test_vector_vsub_vx_e64_uint64_t();
- test_vector_vsub_vx_e64_int64_t();
+ // test_vector_vsub_vx_e64_int64_t();
}
MAKE_TEST(e8, vadd, vi, uint8_t);
@@ -239,8 +240,8 @@
MAKE_TEST(e16, vadd, vi, int16_t);
MAKE_TEST(e32, vadd, vi, uint32_t);
MAKE_TEST(e32, vadd, vi, int32_t);
-MAKE_TEST(e64, vadd, vi, uint64_t);
-MAKE_TEST(e64, vadd, vi, int64_t);
+// MAKE_TEST(e64, vadd, vi, uint64_t);
+// MAKE_TEST(e64, vadd, vi, int64_t);
void test_vector_vadd_vi(void) {
LOG_INFO("%s", __FUNCTION__);
// TODO(julianmb): test signed + unsigned
@@ -250,8 +251,8 @@
test_vector_vadd_vi_e16_int16_t();
test_vector_vadd_vi_e32_uint32_t();
test_vector_vadd_vi_e32_int32_t();
- test_vector_vadd_vi_e64_uint64_t();
- test_vector_vadd_vi_e64_int64_t();
+ // test_vector_vadd_vi_e64_uint64_t();
+ // test_vector_vadd_vi_e64_int64_t();
}
MAKE_TEST(e8, vrsub, vx, uint8_t);
@@ -260,8 +261,8 @@
MAKE_TEST(e16, vrsub, vx, int16_t);
MAKE_TEST(e32, vrsub, vx, uint32_t);
MAKE_TEST(e32, vrsub, vx, int32_t);
-MAKE_TEST(e64, vrsub, vx, uint64_t);
-MAKE_TEST(e64, vrsub, vx, int64_t);
+// MAKE_TEST(e64, vrsub, vx, uint64_t);
+// MAKE_TEST(e64, vrsub, vx, int64_t);
void test_vector_vrsub_vx(void) {
LOG_INFO("%s", __FUNCTION__);
// TODO(julianmb): test signed + unsigned
@@ -272,7 +273,7 @@
test_vector_vrsub_vx_e32_uint32_t();
test_vector_vrsub_vx_e32_int32_t();
// test_vector_vrsub_vx_e64_uint64_t();
- test_vector_vrsub_vx_e64_int64_t();
+ // test_vector_vrsub_vx_e64_int64_t();
}
MAKE_TEST(e8, vrsub, vi, uint8_t);
@@ -281,8 +282,8 @@
MAKE_TEST(e16, vrsub, vi, int16_t);
MAKE_TEST(e32, vrsub, vi, uint32_t);
MAKE_TEST(e32, vrsub, vi, int32_t);
-MAKE_TEST(e64, vrsub, vi, uint64_t);
-MAKE_TEST(e64, vrsub, vi, int64_t);
+// MAKE_TEST(e64, vrsub, vi, uint64_t);
+// MAKE_TEST(e64, vrsub, vi, int64_t);
void test_vector_vrsub_vi(void) {
LOG_INFO("%s", __FUNCTION__);
// TODO(julianmb): test signed + unsigned
@@ -292,8 +293,8 @@
test_vector_vrsub_vi_e16_int16_t();
test_vector_vrsub_vi_e32_uint32_t();
test_vector_vrsub_vi_e32_int32_t();
- test_vector_vrsub_vi_e64_uint64_t();
- test_vector_vrsub_vi_e64_int64_t();
+ // test_vector_vrsub_vi_e64_uint64_t();
+ // test_vector_vrsub_vi_e64_int64_t();
}
static void randomize_array(void *array, size_t length, size_t element_size) {
diff --git a/vector_vset_tests/CMakeLists.txt b/vector_vset_tests/CMakeLists.txt
index 1fa1776..01868b9 100644
--- a/vector_vset_tests/CMakeLists.txt
+++ b/vector_vset_tests/CMakeLists.txt
@@ -1,9 +1,9 @@
vec_cc_binary(
- NAME
- vector_vset_tests
- SRCS
- vector_vset_tests.c
+ NAME
+ vector_vset_tests
+ SRCS
+ vector_vset_tests.c
DEPS
springbok
vector_tests
diff --git a/vector_vset_tests/vector_vset_tests.c b/vector_vset_tests/vector_vset_tests.c
index fd61a7a..a1c84d5 100644
--- a/vector_vset_tests/vector_vset_tests.c
+++ b/vector_vset_tests/vector_vset_tests.c
@@ -2,6 +2,8 @@
#include <string.h>
#include "vector_vset_tests.h"
+// TODO(b/194689843): Re-enable e64 and mf[2|4|8] tests.
+
uint8_t lmul_string_to_vlmul(const char *);
uint32_t construct_vtype(const char *, uint8_t, bool, bool);
void subtest_vsetvl(const char *, const char *, uint32_t, bool, bool, uint32_t,
@@ -69,9 +71,9 @@
__asm__ volatile("vsetvl t0, %[AVL], %[VTYPE]" ::[AVL] "r"(avl_vol),
[ VTYPE ] "r"(vtype));
- COPY_SCALAR_REG("t0", observed_vl);
+ COPY_SCALAR_REG(observed_vl);
if (observed_vl != expected_vl) {
- LOG_INFO("observed_vl = %lu, expected_vl = %lu, test_line = %lu ", observed_vl,
+ LOG_INFO("observed_vl = %u, expected_vl = %u, test_line = %u ", observed_vl,
expected_vl, line);
}
assert(observed_vl == expected_vl);
@@ -88,6 +90,7 @@
};
struct subtest_s subtests[] = {
+ /*
{.vtypei = "e8",
.lmul = "1/8",
.avl = 0,
@@ -390,6 +393,7 @@
.avl = (_TEST_VLENB >> 4) + 1,
.expected_vl = 0,
.line = __LINE__},
+ */
{.vtypei = "e8",
.lmul = "1",
@@ -406,11 +410,11 @@
.avl = 0,
.expected_vl = 0,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "1",
- .avl = 0,
- .expected_vl = 0,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "1",
+ // .avl = 0,
+ // .expected_vl = 0,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "1",
.avl = 1,
@@ -426,11 +430,11 @@
.avl = 1,
.expected_vl = 1,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "1",
- .avl = 1,
- .expected_vl = 1,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "1",
+ // .avl = 1,
+ // .expected_vl = 1,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "1",
.avl = (_TEST_VLENB >> 0) - 1,
@@ -446,11 +450,11 @@
.avl = (_TEST_VLENB >> 2) - 1,
.expected_vl = (_TEST_VLENB >> 2) - 1,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "1",
- .avl = (_TEST_VLENB >> 3) - 1,
- .expected_vl = (_TEST_VLENB >> 3) - 1,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "1",
+ // .avl = (_TEST_VLENB >> 3) - 1,
+ // .expected_vl = (_TEST_VLENB >> 3) - 1,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "1",
.avl = (_TEST_VLENB >> 0),
@@ -466,11 +470,11 @@
.avl = (_TEST_VLENB >> 2),
.expected_vl = (_TEST_VLENB >> 2),
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "1",
- .avl = (_TEST_VLENB >> 3),
- .expected_vl = (_TEST_VLENB >> 3),
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "1",
+ // .avl = (_TEST_VLENB >> 3),
+ // .expected_vl = (_TEST_VLENB >> 3),
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "1",
.avl = (_TEST_VLENB >> 0) + 1,
@@ -486,11 +490,11 @@
.avl = (_TEST_VLENB >> 2) + 1,
.expected_vl = (_TEST_VLENB >> 2),
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "1",
- .avl = (_TEST_VLENB >> 3) + 1,
- .expected_vl = (_TEST_VLENB >> 3),
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "1",
+ // .avl = (_TEST_VLENB >> 3) + 1,
+ // .expected_vl = (_TEST_VLENB >> 3),
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "2",
@@ -507,11 +511,11 @@
.avl = 0,
.expected_vl = 0,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "2",
- .avl = 0,
- .expected_vl = 0,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "2",
+ // .avl = 0,
+ // .expected_vl = 0,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "2",
.avl = 1,
@@ -527,11 +531,11 @@
.avl = 1,
.expected_vl = 1,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "2",
- .avl = 1,
- .expected_vl = 1,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "2",
+ // .avl = 1,
+ // .expected_vl = 1,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "2",
.avl = (_TEST_VLENB << 1) - 1,
@@ -547,11 +551,11 @@
.avl = (_TEST_VLENB >> 1) - 1,
.expected_vl = (_TEST_VLENB >> 1) - 1,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "2",
- .avl = (_TEST_VLENB >> 2) - 1,
- .expected_vl = (_TEST_VLENB >> 2) - 1,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "2",
+ // .avl = (_TEST_VLENB >> 2) - 1,
+ // .expected_vl = (_TEST_VLENB >> 2) - 1,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "2",
.avl = (_TEST_VLENB << 1),
@@ -567,11 +571,11 @@
.avl = (_TEST_VLENB >> 1),
.expected_vl = (_TEST_VLENB >> 1),
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "2",
- .avl = (_TEST_VLENB >> 2),
- .expected_vl = (_TEST_VLENB >> 2),
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "2",
+ // .avl = (_TEST_VLENB >> 2),
+ // .expected_vl = (_TEST_VLENB >> 2),
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "2",
.avl = (_TEST_VLENB << 1) + 1,
@@ -587,11 +591,11 @@
.avl = (_TEST_VLENB >> 1) + 1,
.expected_vl = (_TEST_VLENB >> 1),
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "2",
- .avl = (_TEST_VLENB >> 2) + 1,
- .expected_vl = (_TEST_VLENB >> 2),
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "2",
+ // .avl = (_TEST_VLENB >> 2) + 1,
+ // .expected_vl = (_TEST_VLENB >> 2),
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "4",
@@ -608,11 +612,11 @@
.avl = 0,
.expected_vl = 0,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "4",
- .avl = 0,
- .expected_vl = 0,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "4",
+ // .avl = 0,
+ // .expected_vl = 0,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "4",
.avl = 1,
@@ -628,11 +632,11 @@
.avl = 1,
.expected_vl = 1,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "4",
- .avl = 1,
- .expected_vl = 1,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "4",
+ // .avl = 1,
+ // .expected_vl = 1,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "4",
.avl = (_TEST_VLENB << 2) - 1,
@@ -648,11 +652,11 @@
.avl = (_TEST_VLENB >> 0) - 1,
.expected_vl = (_TEST_VLENB >> 0) - 1,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "4",
- .avl = (_TEST_VLENB >> 1) - 1,
- .expected_vl = (_TEST_VLENB >> 1) - 1,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "4",
+ // .avl = (_TEST_VLENB >> 1) - 1,
+ // .expected_vl = (_TEST_VLENB >> 1) - 1,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "4",
.avl = (_TEST_VLENB << 2),
@@ -668,11 +672,11 @@
.avl = (_TEST_VLENB >> 0),
.expected_vl = (_TEST_VLENB >> 0),
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "4",
- .avl = (_TEST_VLENB >> 1),
- .expected_vl = (_TEST_VLENB >> 1),
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "4",
+ // .avl = (_TEST_VLENB >> 1),
+ // .expected_vl = (_TEST_VLENB >> 1),
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "4",
.avl = (_TEST_VLENB << 2) + 1,
@@ -688,11 +692,11 @@
.avl = (_TEST_VLENB >> 0) + 1,
.expected_vl = (_TEST_VLENB >> 0),
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "4",
- .avl = (_TEST_VLENB >> 1) + 1,
- .expected_vl = (_TEST_VLENB >> 1),
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "4",
+ // .avl = (_TEST_VLENB >> 1) + 1,
+ // .expected_vl = (_TEST_VLENB >> 1),
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "8",
@@ -709,11 +713,11 @@
.avl = 0,
.expected_vl = 0,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "8",
- .avl = 0,
- .expected_vl = 0,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "8",
+ // .avl = 0,
+ // .expected_vl = 0,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "8",
.avl = 1,
@@ -729,11 +733,11 @@
.avl = 1,
.expected_vl = 1,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "8",
- .avl = 1,
- .expected_vl = 1,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "8",
+ // .avl = 1,
+ // .expected_vl = 1,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "8",
.avl = (_TEST_VLENB << 3) - 1,
@@ -749,11 +753,11 @@
.avl = (_TEST_VLENB << 1) - 1,
.expected_vl = (_TEST_VLENB << 1) - 1,
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "8",
- .avl = (_TEST_VLENB >> 0) - 1,
- .expected_vl = (_TEST_VLENB >> 0) - 1,
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "8",
+ // .avl = (_TEST_VLENB >> 0) - 1,
+ // .expected_vl = (_TEST_VLENB >> 0) - 1,
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "8",
.avl = (_TEST_VLENB << 3),
@@ -769,11 +773,11 @@
.avl = (_TEST_VLENB << 1),
.expected_vl = (_TEST_VLENB << 1),
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "8",
- .avl = (_TEST_VLENB >> 0),
- .expected_vl = (_TEST_VLENB >> 0),
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "8",
+ // .avl = (_TEST_VLENB >> 0),
+ // .expected_vl = (_TEST_VLENB >> 0),
+ // .line = __LINE__},
{.vtypei = "e8",
.lmul = "8",
.avl = (_TEST_VLENB << 3) + 1,
@@ -789,16 +793,16 @@
.avl = (_TEST_VLENB << 1) + 1,
.expected_vl = (_TEST_VLENB << 1),
.line = __LINE__},
- {.vtypei = "e64",
- .lmul = "8",
- .avl = (_TEST_VLENB >> 0) + 1,
- .expected_vl = (_TEST_VLENB >> 0),
- .line = __LINE__},
+ // {.vtypei = "e64",
+ // .lmul = "8",
+ // .avl = (_TEST_VLENB >> 0) + 1,
+ // .expected_vl = (_TEST_VLENB >> 0),
+ // .line = __LINE__},
};
uint32_t len = sizeof(subtests) / sizeof(struct subtest_s);
for (uint32_t i = 0; i < len; i++) {
- LOG_INFO("Subtest %lu", i);
+ LOG_INFO("Subtest %u", i);
struct subtest_s subtest = subtests[i];
subtest_vsetvl(subtest.vtypei, subtest.lmul, subtest.avl, false, false,
subtest.expected_vl, subtest.line);
@@ -813,216 +817,180 @@
// vsetvli rd, rs1, vtypei # rd = new vl, rs1 = AVL, vtypei = new vtype
// setting
-#define VSETVLI_TEST_HELPER(VTYPEI_VAR, VTYPEI_SYMBOL, AVL_VAR, \
- MATCH_VARIABLE) \
- do { \
- if (strequal(VTYPEI_VAR, VTYPEI_SYMBOL)) { \
- __asm__ volatile( \
- "vsetvli t0, %[AVL], " VTYPEI_SYMBOL::[AVL] "r"(AVL_VAR)); \
- MATCH_VARIABLE = true; \
- } \
- } while (0)
+#define VSETVLI_TEST_HELPER(VTYPEI_VAR, VTYPEI_SYMBOL, AVL_VAR) \
+ if (strequal(VTYPEI_VAR, VTYPEI_SYMBOL)) { \
+ __asm__ volatile( \
+ "vsetvli t0, %[AVL], " VTYPEI_SYMBOL::[AVL] "r"(AVL_VAR)); \
+ }
void subtest_vsetvli(const char *vtypei, uint32_t avl, uint32_t expected_vl) {
uint32_t observed_vl = 0;
- bool was_instruction_executed = false;
- VSETVLI_TEST_HELPER(vtypei, "e8", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m1,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m1,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m1,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m1,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e16", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m1,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m1,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m1,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m1,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e32", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m1,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m1,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m1,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m1,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e64", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,ta,ma", avl, was_instruction_executed);
+ /*
+ VSETVLI_TEST_HELPER(vtypei, "e64,m1,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,m1,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,m1,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,m1,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf8", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf8,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf8,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf8,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf8,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf8,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf8,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf8,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf8,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf8", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf8,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf8,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf8,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf8,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf8,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf8,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf8,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf8,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf8", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf8,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf8,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf8,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf8,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf8,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf8,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf8,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf8,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf8", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf8,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf8,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf8,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf8,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf8,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf8,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf8,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf8,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf4", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf4,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf4,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf4,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf4,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf4,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf4,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf4,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf4,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf4", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf4,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf4,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf4,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf4,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf4,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf4,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf4,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf4,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf4", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf4,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf4,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf4,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf4,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf4,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf4,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf4,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf4,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf4", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf4,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf4,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf4,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf4,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf4,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf4,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf4,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf4,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf2", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf2,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf2,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf2,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,mf2,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf2,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf2,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf2,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,mf2,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf2", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf2,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf2,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf2,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,mf2,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf2,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf2,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf2,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,mf2,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf2", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf2,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf2,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf2,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,mf2,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf2,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf2,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf2,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,mf2,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf2", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf2,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf2,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf2,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,mf2,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf2,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf2,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf2,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e64,mf2,ta,ma", avl);
+ */
- VSETVLI_TEST_HELPER(vtypei, "e8,m1", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m1,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m1,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m1,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m1,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m1,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m1,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m1,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m1,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e16,m1", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m1,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m1,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m1,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m1,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m1,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m1,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m1,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m1,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e32,m1", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m1,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m1,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m1,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m1,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m1,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m1,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m1,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m1,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e64,m1", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m1,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m1,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m1,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m1,ta,ma", avl, was_instruction_executed);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m1,tu,mu", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m1,tu,ma", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m1,ta,mu", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m1,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e8,m2", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m2,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m2,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m2,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m2,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m2,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m2,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m2,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m2,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e16,m2", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m2,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m2,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m2,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m2,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m2,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m2,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m2,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m2,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e32,m2", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m2,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m2,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m2,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m2,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m2,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m2,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m2,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m2,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e64,m2", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m2,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m2,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m2,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m2,ta,ma", avl, was_instruction_executed);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m2,tu,mu", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m2,tu,ma", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m2,ta,mu", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m2,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e8,m4", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m4,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m4,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m4,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m4,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m4,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m4,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m4,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m4,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e16,m4", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m4,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m4,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m4,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m4,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m4,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m4,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m4,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m4,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e32,m4", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m4,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m4,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m4,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m4,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m4,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m4,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m4,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m4,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e64,m4", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m4,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m4,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m4,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m4,ta,ma", avl, was_instruction_executed);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m4,tu,mu", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m4,tu,ma", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m4,ta,mu", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m4,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e8,m8", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m8,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m8,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m8,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e8,m8,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m8,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m8,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m8,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e8,m8,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e16,m8", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m8,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m8,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m8,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e16,m8,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m8,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m8,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m8,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e16,m8,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e32,m8", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m8,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m8,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m8,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e32,m8,ta,ma", avl, was_instruction_executed);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m8,tu,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m8,tu,ma", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m8,ta,mu", avl);
+ VSETVLI_TEST_HELPER(vtypei, "e32,m8,ta,ma", avl);
- VSETVLI_TEST_HELPER(vtypei, "e64,m8", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m8,tu,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m8,tu,ma", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m8,ta,mu", avl, was_instruction_executed);
- VSETVLI_TEST_HELPER(vtypei, "e64,m8,ta,ma", avl, was_instruction_executed);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m8,tu,mu", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m8,tu,ma", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m8,ta,mu", avl);
+ // VSETVLI_TEST_HELPER(vtypei, "e64,m8,ta,ma", avl);
- assert(was_instruction_executed);
- COPY_SCALAR_REG("t0", observed_vl);
+ COPY_SCALAR_REG(observed_vl);
if (observed_vl != expected_vl) {
- LOG_INFO("Subtest failed, observed_vl = %lu, expected_vl = %lu", observed_vl,
+ LOG_INFO("Subtest failed, observed_vl = %u, expected_vl = %u", observed_vl,
expected_vl);
}
assert(observed_vl == expected_vl);
@@ -1037,51 +1005,7 @@
};
struct subtest_s subtests[] = {
- {.vtypei = "e8", .avl = 0, .expected_vl = 0},
- {.vtypei = "e16", .avl = 0, .expected_vl = 0},
- {.vtypei = "e32", .avl = 0, .expected_vl = 0},
- {.vtypei = "e64", .avl = 0, .expected_vl = 0},
- {.vtypei = "e8", .avl = 1, .expected_vl = 1},
- {.vtypei = "e16", .avl = 1, .expected_vl = 1},
- {.vtypei = "e32", .avl = 1, .expected_vl = 1},
- {.vtypei = "e64", .avl = 1, .expected_vl = 1},
- {.vtypei = "e8",
- .avl = (_TEST_VLENB >> 0) - 1,
- .expected_vl = (_TEST_VLENB >> 0) - 1},
- {.vtypei = "e16",
- .avl = (_TEST_VLENB >> 1) - 1,
- .expected_vl = (_TEST_VLENB >> 1) - 1},
- {.vtypei = "e32",
- .avl = (_TEST_VLENB >> 2) - 1,
- .expected_vl = (_TEST_VLENB >> 2) - 1},
- {.vtypei = "e64",
- .avl = (_TEST_VLENB >> 3) - 1,
- .expected_vl = (_TEST_VLENB >> 3) - 1},
- {.vtypei = "e8",
- .avl = (_TEST_VLENB >> 0),
- .expected_vl = (_TEST_VLENB >> 0)},
- {.vtypei = "e16",
- .avl = (_TEST_VLENB >> 1),
- .expected_vl = (_TEST_VLENB >> 1)},
- {.vtypei = "e32",
- .avl = (_TEST_VLENB >> 2),
- .expected_vl = (_TEST_VLENB >> 2)},
- {.vtypei = "e64",
- .avl = (_TEST_VLENB >> 3),
- .expected_vl = (_TEST_VLENB >> 3)},
- {.vtypei = "e8",
- .avl = (_TEST_VLENB >> 0) + 1,
- .expected_vl = (_TEST_VLENB >> 0)},
- {.vtypei = "e16",
- .avl = (_TEST_VLENB >> 1) + 1,
- .expected_vl = (_TEST_VLENB >> 1)},
- {.vtypei = "e32",
- .avl = (_TEST_VLENB >> 2) + 1,
- .expected_vl = (_TEST_VLENB >> 2)},
- {.vtypei = "e64",
- .avl = (_TEST_VLENB >> 3) + 1,
- .expected_vl = (_TEST_VLENB >> 3)},
-
+ /*
{.vtypei = "e8,mf8", .avl = 0, .expected_vl = 0},
{.vtypei = "e16,mf8", .avl = 0, .expected_vl = 0},
{.vtypei = "e32,mf8", .avl = 0, .expected_vl = 0},
@@ -1180,15 +1104,16 @@
.avl = (_TEST_VLENB >> 3) + 1,
.expected_vl = (_TEST_VLENB >> 3)},
{.vtypei = "e64,mf2", .avl = (_TEST_VLENB >> 4) + 1, .expected_vl = 0},
+ */
{.vtypei = "e8,m1", .avl = 0, .expected_vl = 0},
{.vtypei = "e16,m1", .avl = 0, .expected_vl = 0},
{.vtypei = "e32,m1", .avl = 0, .expected_vl = 0},
- {.vtypei = "e64,m1", .avl = 0, .expected_vl = 0},
+ // {.vtypei = "e64,m1", .avl = 0, .expected_vl = 0},
{.vtypei = "e8,m1", .avl = 1, .expected_vl = 1},
{.vtypei = "e16,m1", .avl = 1, .expected_vl = 1},
{.vtypei = "e32,m1", .avl = 1, .expected_vl = 1},
- {.vtypei = "e64,m1", .avl = 1, .expected_vl = 1},
+ // {.vtypei = "e64,m1", .avl = 1, .expected_vl = 1},
{.vtypei = "e8,m1",
.avl = (_TEST_VLENB >> 0) - 1,
.expected_vl = (_TEST_VLENB >> 0) - 1},
@@ -1198,9 +1123,9 @@
{.vtypei = "e32,m1",
.avl = (_TEST_VLENB >> 2) - 1,
.expected_vl = (_TEST_VLENB >> 2) - 1},
- {.vtypei = "e64,m1",
- .avl = (_TEST_VLENB >> 3) - 1,
- .expected_vl = (_TEST_VLENB >> 3) - 1},
+ // {.vtypei = "e64,m1",
+ // .avl = (_TEST_VLENB >> 3) - 1,
+ // .expected_vl = (_TEST_VLENB >> 3) - 1},
{.vtypei = "e8,m1",
.avl = (_TEST_VLENB >> 0),
.expected_vl = (_TEST_VLENB >> 0)},
@@ -1210,9 +1135,9 @@
{.vtypei = "e32,m1",
.avl = (_TEST_VLENB >> 2),
.expected_vl = (_TEST_VLENB >> 2)},
- {.vtypei = "e64,m1",
- .avl = (_TEST_VLENB >> 3),
- .expected_vl = (_TEST_VLENB >> 3)},
+ // {.vtypei = "e64,m1",
+ // .avl = (_TEST_VLENB >> 3),
+ // .expected_vl = (_TEST_VLENB >> 3)},
{.vtypei = "e8,m1",
.avl = (_TEST_VLENB >> 0) + 1,
.expected_vl = (_TEST_VLENB >> 0)},
@@ -1222,18 +1147,18 @@
{.vtypei = "e32,m1",
.avl = (_TEST_VLENB >> 2) + 1,
.expected_vl = (_TEST_VLENB >> 2)},
- {.vtypei = "e64,m1",
- .avl = (_TEST_VLENB >> 3) + 1,
- .expected_vl = (_TEST_VLENB >> 3)},
+ // {.vtypei = "e64,m1",
+ // .avl = (_TEST_VLENB >> 3) + 1,
+ // .expected_vl = (_TEST_VLENB >> 3)},
{.vtypei = "e8,m2", .avl = 0, .expected_vl = 0},
{.vtypei = "e16,m2", .avl = 0, .expected_vl = 0},
{.vtypei = "e32,m2", .avl = 0, .expected_vl = 0},
- {.vtypei = "e64,m2", .avl = 0, .expected_vl = 0},
+ // {.vtypei = "e64,m2", .avl = 0, .expected_vl = 0},
{.vtypei = "e8,m2", .avl = 1, .expected_vl = 1},
{.vtypei = "e16,m2", .avl = 1, .expected_vl = 1},
{.vtypei = "e32,m2", .avl = 1, .expected_vl = 1},
- {.vtypei = "e64,m2", .avl = 1, .expected_vl = 1},
+ // {.vtypei = "e64,m2", .avl = 1, .expected_vl = 1},
{.vtypei = "e8,m2",
.avl = (_TEST_VLENB << 1) - 1,
.expected_vl = (_TEST_VLENB << 1) - 1},
@@ -1243,9 +1168,9 @@
{.vtypei = "e32,m2",
.avl = (_TEST_VLENB >> 1) - 1,
.expected_vl = (_TEST_VLENB >> 1) - 1},
- {.vtypei = "e64,m2",
- .avl = (_TEST_VLENB >> 2) - 1,
- .expected_vl = (_TEST_VLENB >> 2) - 1},
+ // {.vtypei = "e64,m2",
+ // .avl = (_TEST_VLENB >> 2) - 1,
+ // .expected_vl = (_TEST_VLENB >> 2) - 1},
{.vtypei = "e8,m2",
.avl = (_TEST_VLENB << 1),
.expected_vl = (_TEST_VLENB << 1)},
@@ -1255,9 +1180,9 @@
{.vtypei = "e32,m2",
.avl = (_TEST_VLENB >> 1),
.expected_vl = (_TEST_VLENB >> 1)},
- {.vtypei = "e64,m2",
- .avl = (_TEST_VLENB >> 2),
- .expected_vl = (_TEST_VLENB >> 2)},
+ // {.vtypei = "e64,m2",
+ // .avl = (_TEST_VLENB >> 2),
+ // .expected_vl = (_TEST_VLENB >> 2)},
{.vtypei = "e8,m2",
.avl = (_TEST_VLENB << 1) + 1,
.expected_vl = (_TEST_VLENB << 1)},
@@ -1267,18 +1192,18 @@
{.vtypei = "e32,m2",
.avl = (_TEST_VLENB >> 1) + 1,
.expected_vl = (_TEST_VLENB >> 1)},
- {.vtypei = "e64,m2",
- .avl = (_TEST_VLENB >> 2) + 1,
- .expected_vl = (_TEST_VLENB >> 2)},
+ // {.vtypei = "e64,m2",
+ // .avl = (_TEST_VLENB >> 2) + 1,
+ // .expected_vl = (_TEST_VLENB >> 2)},
{.vtypei = "e8,m4", .avl = 0, .expected_vl = 0},
{.vtypei = "e16,m4", .avl = 0, .expected_vl = 0},
{.vtypei = "e32,m4", .avl = 0, .expected_vl = 0},
- {.vtypei = "e64,m4", .avl = 0, .expected_vl = 0},
+ // {.vtypei = "e64,m4", .avl = 0, .expected_vl = 0},
{.vtypei = "e8,m4", .avl = 1, .expected_vl = 1},
{.vtypei = "e16,m4", .avl = 1, .expected_vl = 1},
{.vtypei = "e32,m4", .avl = 1, .expected_vl = 1},
- {.vtypei = "e64,m4", .avl = 1, .expected_vl = 1},
+ // {.vtypei = "e64,m4", .avl = 1, .expected_vl = 1},
{.vtypei = "e8,m4",
.avl = (_TEST_VLENB << 2) - 1,
.expected_vl = (_TEST_VLENB << 2) - 1},
@@ -1288,9 +1213,9 @@
{.vtypei = "e32,m4",
.avl = (_TEST_VLENB >> 0) - 1,
.expected_vl = (_TEST_VLENB >> 0) - 1},
- {.vtypei = "e64,m4",
- .avl = (_TEST_VLENB >> 1) - 1,
- .expected_vl = (_TEST_VLENB >> 1) - 1},
+ // {.vtypei = "e64,m4",
+ // .avl = (_TEST_VLENB >> 1) - 1,
+ // .expected_vl = (_TEST_VLENB >> 1) - 1},
{.vtypei = "e8,m4",
.avl = (_TEST_VLENB << 2),
.expected_vl = (_TEST_VLENB << 2)},
@@ -1300,9 +1225,9 @@
{.vtypei = "e32,m4",
.avl = (_TEST_VLENB >> 0),
.expected_vl = (_TEST_VLENB >> 0)},
- {.vtypei = "e64,m4",
- .avl = (_TEST_VLENB >> 1),
- .expected_vl = (_TEST_VLENB >> 1)},
+ // {.vtypei = "e64,m4",
+ // .avl = (_TEST_VLENB >> 1),
+ // .expected_vl = (_TEST_VLENB >> 1)},
{.vtypei = "e8,m4",
.avl = (_TEST_VLENB << 2) + 1,
.expected_vl = (_TEST_VLENB << 2)},
@@ -1312,18 +1237,18 @@
{.vtypei = "e32,m4",
.avl = (_TEST_VLENB >> 0) + 1,
.expected_vl = (_TEST_VLENB >> 0)},
- {.vtypei = "e64,m4",
- .avl = (_TEST_VLENB >> 1) + 1,
- .expected_vl = (_TEST_VLENB >> 1)},
+ // {.vtypei = "e64,m4",
+ // .avl = (_TEST_VLENB >> 1) + 1,
+ // .expected_vl = (_TEST_VLENB >> 1)},
{.vtypei = "e8,m8", .avl = 0, .expected_vl = 0},
{.vtypei = "e16,m8", .avl = 0, .expected_vl = 0},
{.vtypei = "e32,m8", .avl = 0, .expected_vl = 0},
- {.vtypei = "e64,m8", .avl = 0, .expected_vl = 0},
+ // {.vtypei = "e64,m8", .avl = 0, .expected_vl = 0},
{.vtypei = "e8,m8", .avl = 1, .expected_vl = 1},
{.vtypei = "e16,m8", .avl = 1, .expected_vl = 1},
{.vtypei = "e32,m8", .avl = 1, .expected_vl = 1},
- {.vtypei = "e64,m8", .avl = 1, .expected_vl = 1},
+ // {.vtypei = "e64,m8", .avl = 1, .expected_vl = 1},
{.vtypei = "e8,m8",
.avl = (_TEST_VLENB << 3) - 1,
.expected_vl = (_TEST_VLENB << 3) - 1},
@@ -1333,9 +1258,9 @@
{.vtypei = "e32,m8",
.avl = (_TEST_VLENB << 1) - 1,
.expected_vl = (_TEST_VLENB << 1) - 1},
- {.vtypei = "e64,m8",
- .avl = (_TEST_VLENB >> 0) - 1,
- .expected_vl = (_TEST_VLENB >> 0) - 1},
+ // {.vtypei = "e64,m8",
+ // .avl = (_TEST_VLENB >> 0) - 1,
+ // .expected_vl = (_TEST_VLENB >> 0) - 1},
{.vtypei = "e8,m8",
.avl = (_TEST_VLENB << 3),
.expected_vl = (_TEST_VLENB << 3)},
@@ -1345,9 +1270,9 @@
{.vtypei = "e32,m8",
.avl = (_TEST_VLENB << 1),
.expected_vl = (_TEST_VLENB << 1)},
- {.vtypei = "e64,m8",
- .avl = (_TEST_VLENB >> 0),
- .expected_vl = (_TEST_VLENB >> 0)},
+ // {.vtypei = "e64,m8",
+ // .avl = (_TEST_VLENB >> 0),
+ // .expected_vl = (_TEST_VLENB >> 0)},
{.vtypei = "e8,m8",
.avl = (_TEST_VLENB << 3) + 1,
.expected_vl = (_TEST_VLENB << 3)},
@@ -1357,16 +1282,15 @@
{.vtypei = "e32,m8",
.avl = (_TEST_VLENB << 1) + 1,
.expected_vl = (_TEST_VLENB << 1)},
- {.vtypei = "e64,m8",
- .avl = (_TEST_VLENB >> 0) + 1,
- .expected_vl = (_TEST_VLENB >> 0)},
+ // {.vtypei = "e64,m8",
+ // .avl = (_TEST_VLENB >> 0) + 1,
+ // .expected_vl = (_TEST_VLENB >> 0)},
};
uint32_t len = sizeof(subtests) / sizeof(struct subtest_s);
for (uint32_t i = 0; i < len; i++) {
- LOG_INFO("Subtest %lu", i);
+ LOG_INFO("Subtest %u", i);
struct subtest_s subtest = subtests[i];
- subtest_vsetvli(subtest.vtypei, subtest.avl, subtest.expected_vl);
char new_vtypei[32];
uint32_t vtypei_len = strlength(subtest.vtypei);
@@ -1390,44 +1314,37 @@
// setting
#define VSETIVLI_SUBTEST(AVL, VTYPEI, EXPECTED_VL) \
do { \
- LOG_INFO("Subtest %lu", subtest_count++); \
+ LOG_INFO("Subtest %u", subtest_count++); \
uint32_t observed_vl = 0; \
- __asm__ volatile("vsetivli t0, " STRING_LITERAL(AVL) ", " VTYPEI); \
- COPY_SCALAR_REG("t0", observed_vl); \
+ __asm__ volatile("vsetivli t0, " STRING_LITERAL(AVL) ", " VTYPEI \
+ ", tu, mu"); \
+ COPY_SCALAR_REG(observed_vl); \
if (observed_vl != EXPECTED_VL) { \
- LOG_INFO("Subtest observed_vl = %lu, expected_vl = %d", observed_vl, \
+ LOG_INFO("Subtest observed_vl = %u, expected_vl = %d", observed_vl, \
EXPECTED_VL); \
} \
assert(observed_vl == EXPECTED_VL); \
__asm__ volatile("vsetivli t0, " STRING_LITERAL(AVL) ", " VTYPEI \
- ",tu,mu"); \
- COPY_SCALAR_REG("t0", observed_vl); \
+ ", tu, ma"); \
+ COPY_SCALAR_REG(observed_vl); \
if (observed_vl != EXPECTED_VL) { \
- LOG_INFO("Subtest observed_vl = %lu, expected_vl = %d", observed_vl, \
+ LOG_INFO("Subtest observed_vl = %u, expected_vl = %d", observed_vl, \
EXPECTED_VL); \
} \
assert(observed_vl == EXPECTED_VL); \
__asm__ volatile("vsetivli t0, " STRING_LITERAL(AVL) ", " VTYPEI \
- ",tu,ma"); \
- COPY_SCALAR_REG("t0", observed_vl); \
+ ", ta, mu"); \
+ COPY_SCALAR_REG(observed_vl); \
if (observed_vl != EXPECTED_VL) { \
- LOG_INFO("Subtest observed_vl = %lu, expected_vl = %d", observed_vl, \
+ LOG_INFO("Subtest observed_vl = %u, expected_vl = %d", observed_vl, \
EXPECTED_VL); \
} \
assert(observed_vl == EXPECTED_VL); \
__asm__ volatile("vsetivli t0, " STRING_LITERAL(AVL) ", " VTYPEI \
- ",ta,mu"); \
- COPY_SCALAR_REG("t0", observed_vl); \
+ ", ta, ma"); \
+ COPY_SCALAR_REG(observed_vl); \
if (observed_vl != EXPECTED_VL) { \
- LOG_INFO("Subtest observed_vl = %lu, expected_vl = %d", observed_vl, \
- EXPECTED_VL); \
- } \
- assert(observed_vl == EXPECTED_VL); \
- __asm__ volatile("vsetivli t0, " STRING_LITERAL(AVL) ", " VTYPEI \
- ",ta,ma"); \
- COPY_SCALAR_REG("t0", observed_vl); \
- if (observed_vl != EXPECTED_VL) { \
- LOG_INFO("Subtest observed_vl = %lu, expected_vl = %d", observed_vl, \
+ LOG_INFO("Subtest observed_vl = %u, expected_vl = %d", observed_vl, \
EXPECTED_VL); \
} \
assert(observed_vl == EXPECTED_VL); \
@@ -1497,14 +1414,7 @@
// AVL immediate is 5 bits -> 31 is the max
uint32_t subtest_count = 0;
- VSETIVLI_SUBTEST(0, "e8", 0);
- VSETIVLI_SUBTEST(0, "e16", 0);
- VSETIVLI_SUBTEST(0, "e32", 0);
- VSETIVLI_SUBTEST(0, "e64", 0);
- VSETIVLI_SUBTEST(1, "e8", 1);
- VSETIVLI_SUBTEST(1, "e16", 1);
- VSETIVLI_SUBTEST(1, "e32", 1);
- VSETIVLI_SUBTEST(1, "e64", 1);
+ /*
VSETIVLI_SUBTEST(0, "e8,mf8", 0);
VSETIVLI_SUBTEST(0, "e16,mf8", 0);
VSETIVLI_SUBTEST(0, "e32,mf8", 0);
@@ -1529,113 +1439,105 @@
VSETIVLI_SUBTEST(1, "e16,mf2", 1);
VSETIVLI_SUBTEST(1, "e32,mf2", 1);
VSETIVLI_SUBTEST(1, "e64,mf2", 0);
+ */
VSETIVLI_SUBTEST(0, "e8,m1", 0);
VSETIVLI_SUBTEST(0, "e16,m1", 0);
VSETIVLI_SUBTEST(0, "e32,m1", 0);
- VSETIVLI_SUBTEST(0, "e64,m1", 0);
+ // VSETIVLI_SUBTEST(0, "e64,m1", 0);
VSETIVLI_SUBTEST(1, "e8,m1", 1);
VSETIVLI_SUBTEST(1, "e16,m1", 1);
VSETIVLI_SUBTEST(1, "e32,m1", 1);
- VSETIVLI_SUBTEST(1, "e64,m1", 1);
+ // VSETIVLI_SUBTEST(1, "e64,m1", 1);
VSETIVLI_SUBTEST(0, "e8,m2", 0);
VSETIVLI_SUBTEST(0, "e16,m2", 0);
VSETIVLI_SUBTEST(0, "e32,m2", 0);
- VSETIVLI_SUBTEST(0, "e64,m2", 0);
+ // VSETIVLI_SUBTEST(0, "e64,m2", 0);
VSETIVLI_SUBTEST(1, "e8,m2", 1);
VSETIVLI_SUBTEST(1, "e16,m2", 1);
VSETIVLI_SUBTEST(1, "e32,m2", 1);
- VSETIVLI_SUBTEST(1, "e64,m2", 1);
+ // VSETIVLI_SUBTEST(1, "e64,m2", 1);
VSETIVLI_SUBTEST(0, "e8,m4", 0);
VSETIVLI_SUBTEST(0, "e16,m4", 0);
VSETIVLI_SUBTEST(0, "e32,m4", 0);
- VSETIVLI_SUBTEST(0, "e64,m4", 0);
+ // VSETIVLI_SUBTEST(0, "e64,m4", 0);
VSETIVLI_SUBTEST(1, "e8,m4", 1);
VSETIVLI_SUBTEST(1, "e16,m4", 1);
VSETIVLI_SUBTEST(1, "e32,m4", 1);
- VSETIVLI_SUBTEST(1, "e64,m4", 1);
+ // VSETIVLI_SUBTEST(1, "e64,m4", 1);
VSETIVLI_SUBTEST(0, "e8,m8", 0);
VSETIVLI_SUBTEST(0, "e16,m8", 0);
VSETIVLI_SUBTEST(0, "e32,m8", 0);
- VSETIVLI_SUBTEST(0, "e64,m8", 0);
+ // VSETIVLI_SUBTEST(0, "e64,m8", 0);
VSETIVLI_SUBTEST(1, "e8,m8", 1);
VSETIVLI_SUBTEST(1, "e16,m8", 1);
VSETIVLI_SUBTEST(1, "e32,m8", 1);
- VSETIVLI_SUBTEST(1, "e64,m8", 1);
+ // VSETIVLI_SUBTEST(1, "e64,m8", 1);
#if VLENB_DIV8_SUB1 < 32
- VSETIVLI_SUBTEST(VLENB_DIV8_SUB1, "e64", VLENB_DIV8_SUB1);
- VSETIVLI_SUBTEST(VLENB_DIV8_SUB1, "e8,mf8", VLENB_DIV8_SUB1);
- VSETIVLI_SUBTEST(VLENB_DIV8_SUB1, "e16,mf4", VLENB_DIV8_SUB1);
- VSETIVLI_SUBTEST(VLENB_DIV8_SUB1, "e32,mf2", VLENB_DIV8_SUB1);
- VSETIVLI_SUBTEST(VLENB_DIV8_SUB1, "e64,m1", VLENB_DIV8_SUB1);
+ // VSETIVLI_SUBTEST(VLENB_DIV8_SUB1, "e8,mf8", VLENB_DIV8_SUB1);
+ // VSETIVLI_SUBTEST(VLENB_DIV8_SUB1, "e16,mf4", VLENB_DIV8_SUB1);
+ // VSETIVLI_SUBTEST(VLENB_DIV8_SUB1, "e32,mf2", VLENB_DIV8_SUB1);
+ // VSETIVLI_SUBTEST(VLENB_DIV8_SUB1, "e64,m1", VLENB_DIV8_SUB1);
#endif
#if VLENB_DIV8 < 32
- VSETIVLI_SUBTEST(VLENB_DIV8, "e64", VLENB_DIV8);
- VSETIVLI_SUBTEST(VLENB_DIV8, "e8,mf8", VLENB_DIV8);
- VSETIVLI_SUBTEST(VLENB_DIV8, "e16,mf4", VLENB_DIV8);
- VSETIVLI_SUBTEST(VLENB_DIV8, "e32,mf2", VLENB_DIV8);
- VSETIVLI_SUBTEST(VLENB_DIV8, "e64,m1", VLENB_DIV8);
+ // VSETIVLI_SUBTEST(VLENB_DIV8, "e8,mf8", VLENB_DIV8);
+ // VSETIVLI_SUBTEST(VLENB_DIV8, "e16,mf4", VLENB_DIV8);
+ // VSETIVLI_SUBTEST(VLENB_DIV8, "e32,mf2", VLENB_DIV8);
+ // VSETIVLI_SUBTEST(VLENB_DIV8, "e64,m1", VLENB_DIV8);
#endif
#if VLENB_DIV8_ADD1 < 32
- VSETIVLI_SUBTEST(VLENB_DIV8_ADD1, "e64", VLENB_DIV8);
- VSETIVLI_SUBTEST(VLENB_DIV8_ADD1, "e8,mf8", VLENB_DIV8);
- VSETIVLI_SUBTEST(VLENB_DIV8_ADD1, "e16,mf4", VLENB_DIV8);
- VSETIVLI_SUBTEST(VLENB_DIV8_ADD1, "e32,mf2", VLENB_DIV8);
- VSETIVLI_SUBTEST(VLENB_DIV8_ADD1, "e64,m1", VLENB_DIV8);
+ // VSETIVLI_SUBTEST(VLENB_DIV8_ADD1, "e8,mf8", VLENB_DIV8);
+ // VSETIVLI_SUBTEST(VLENB_DIV8_ADD1, "e16,mf4", VLENB_DIV8);
+ // VSETIVLI_SUBTEST(VLENB_DIV8_ADD1, "e32,mf2", VLENB_DIV8);
+ // VSETIVLI_SUBTEST(VLENB_DIV8_ADD1, "e64,m1", VLENB_DIV8);
#endif
#if VLENB_DIV4_SUB1 < 32
- VSETIVLI_SUBTEST(VLENB_DIV4_SUB1, "e32", VLENB_DIV4_SUB1);
- VSETIVLI_SUBTEST(VLENB_DIV4_SUB1, "e8,mf4", VLENB_DIV4_SUB1);
- VSETIVLI_SUBTEST(VLENB_DIV4_SUB1, "e16,mf2", VLENB_DIV4_SUB1);
+ // VSETIVLI_SUBTEST(VLENB_DIV4_SUB1, "e8,mf4", VLENB_DIV4_SUB1);
+ // VSETIVLI_SUBTEST(VLENB_DIV4_SUB1, "e16,mf2", VLENB_DIV4_SUB1);
VSETIVLI_SUBTEST(VLENB_DIV4_SUB1, "e32,m1", VLENB_DIV4_SUB1);
- VSETIVLI_SUBTEST(VLENB_DIV4_SUB1, "e64,m2", VLENB_DIV4_SUB1);
+ // VSETIVLI_SUBTEST(VLENB_DIV4_SUB1, "e64,m2", VLENB_DIV4_SUB1);
#endif
#if VLENB_DIV4 < 32
- VSETIVLI_SUBTEST(VLENB_DIV4, "e32", VLENB_DIV4);
- VSETIVLI_SUBTEST(VLENB_DIV4, "e8,mf4", VLENB_DIV4);
- VSETIVLI_SUBTEST(VLENB_DIV4, "e16,mf2", VLENB_DIV4);
+ // VSETIVLI_SUBTEST(VLENB_DIV4, "e8,mf4", VLENB_DIV4);
+ // VSETIVLI_SUBTEST(VLENB_DIV4, "e16,mf2", VLENB_DIV4);
VSETIVLI_SUBTEST(VLENB_DIV4, "e32,m1", VLENB_DIV4);
- VSETIVLI_SUBTEST(VLENB_DIV4, "e64,m2", VLENB_DIV4);
+ // VSETIVLI_SUBTEST(VLENB_DIV4, "e64,m2", VLENB_DIV4);
#endif
#if VLENB_DIV4_ADD1 < 32
- VSETIVLI_SUBTEST(VLENB_DIV4_ADD1, "e32", VLENB_DIV4);
- VSETIVLI_SUBTEST(VLENB_DIV4_ADD1, "e8,mf4", VLENB_DIV4);
- VSETIVLI_SUBTEST(VLENB_DIV4_ADD1, "e16,mf2", VLENB_DIV4);
+ // VSETIVLI_SUBTEST(VLENB_DIV4_ADD1, "e8,mf4", VLENB_DIV4);
+ // VSETIVLI_SUBTEST(VLENB_DIV4_ADD1, "e16,mf2", VLENB_DIV4);
VSETIVLI_SUBTEST(VLENB_DIV4_ADD1, "e32,m1", VLENB_DIV4);
- VSETIVLI_SUBTEST(VLENB_DIV4_ADD1, "e64,m2", VLENB_DIV4);
+ // VSETIVLI_SUBTEST(VLENB_DIV4_ADD1, "e64,m2", VLENB_DIV4);
#endif
#if VLENB_DIV2_SUB1 < 32
- VSETIVLI_SUBTEST(VLENB_DIV2_SUB1, "e16", VLENB_DIV2_SUB1);
- VSETIVLI_SUBTEST(VLENB_DIV2_SUB1, "e8,mf2", VLENB_DIV2_SUB1);
+ // VSETIVLI_SUBTEST(VLENB_DIV2_SUB1, "e8,mf2", VLENB_DIV2_SUB1);
VSETIVLI_SUBTEST(VLENB_DIV2_SUB1, "e16,m1", VLENB_DIV2_SUB1);
VSETIVLI_SUBTEST(VLENB_DIV2_SUB1, "e32,m2", VLENB_DIV2_SUB1);
- VSETIVLI_SUBTEST(VLENB_DIV2_SUB1, "e64,m4", VLENB_DIV2_SUB1);
+ // VSETIVLI_SUBTEST(VLENB_DIV2_SUB1, "e64,m4", VLENB_DIV2_SUB1);
#endif
#if VLENB_DIV2 < 32
- VSETIVLI_SUBTEST(VLENB_DIV2, "e16", VLENB_DIV2);
- VSETIVLI_SUBTEST(VLENB_DIV2, "e8,mf2", VLENB_DIV2);
+ // VSETIVLI_SUBTEST(VLENB_DIV2, "e8,mf2", VLENB_DIV2);
VSETIVLI_SUBTEST(VLENB_DIV2, "e16,m1", VLENB_DIV2);
VSETIVLI_SUBTEST(VLENB_DIV2, "e32,m2", VLENB_DIV2);
- VSETIVLI_SUBTEST(VLENB_DIV2, "e64,m4", VLENB_DIV2);
+ // VSETIVLI_SUBTEST(VLENB_DIV2, "e64,m4", VLENB_DIV2);
#endif
#if VLENB_DIV2_ADD1 < 32
- VSETIVLI_SUBTEST(VLENB_DIV2_ADD1, "e16", VLENB_DIV2);
- VSETIVLI_SUBTEST(VLENB_DIV2_ADD1, "e8,mf2", VLENB_DIV2);
+ // VSETIVLI_SUBTEST(VLENB_DIV2_ADD1, "e8,mf2", VLENB_DIV2);
VSETIVLI_SUBTEST(VLENB_DIV2_ADD1, "e16,m1", VLENB_DIV2);
VSETIVLI_SUBTEST(VLENB_DIV2_ADD1, "e32,m2", VLENB_DIV2);
- VSETIVLI_SUBTEST(VLENB_DIV2_ADD1, "e64,m4", VLENB_DIV2);
+ // VSETIVLI_SUBTEST(VLENB_DIV2_ADD1, "e64,m4", VLENB_DIV2);
#endif
#if VLENB_SUB1 < 32
- VSETIVLI_SUBTEST(VLENB_SUB1, "e8", VLENB_SUB1);
+ /*
VSETIVLI_SUBTEST(VLENB_SUB1, "e16,mf8", 0);
VSETIVLI_SUBTEST(VLENB_SUB1, "e32,mf8", 0);
VSETIVLI_SUBTEST(VLENB_SUB1, "e64,mf8", 0);
@@ -1654,26 +1556,25 @@
VSETIVLI_SUBTEST(VLENB_SUB1, "e64,mf2", 0);
VSETIVLI_SUBTEST(VLENB_SUB1, "e64,mf2", 0);
VSETIVLI_SUBTEST(VLENB_SUB1, "e64,mf2", 0);
+ */
VSETIVLI_SUBTEST(VLENB_SUB1, "e8,m1", VLENB_SUB1);
VSETIVLI_SUBTEST(VLENB_SUB1, "e16,m2", VLENB_SUB1);
VSETIVLI_SUBTEST(VLENB_SUB1, "e32,m4", VLENB_SUB1);
- VSETIVLI_SUBTEST(VLENB_SUB1, "e64,m8", VLENB_SUB1);
+ // VSETIVLI_SUBTEST(VLENB_SUB1, "e64,m8", VLENB_SUB1);
#endif
#if VLENB < 32
- VSETIVLI_SUBTEST(VLENB, "e8", VLENB);
VSETIVLI_SUBTEST(VLENB, "e8,m1", VLENB);
VSETIVLI_SUBTEST(VLENB, "e16,m2", VLENB);
VSETIVLI_SUBTEST(VLENB, "e32,m4", VLENB);
- VSETIVLI_SUBTEST(VLENB, "e64,m8", VLENB);
+ // VSETIVLI_SUBTEST(VLENB, "e64,m8", VLENB);
#endif
#if VLENB_ADD1 < 32
- VSETIVLI_SUBTEST(VLENB_ADD1, "e8", VLENB);
VSETIVLI_SUBTEST(VLENB_ADD1, "e8,m1", VLENB);
VSETIVLI_SUBTEST(VLENB_ADD1, "e16,m2", VLENB);
VSETIVLI_SUBTEST(VLENB_ADD1, "e32,m4", VLENB);
- VSETIVLI_SUBTEST(VLENB_ADD1, "e64,m8", VLENB);
+ // VSETIVLI_SUBTEST(VLENB_ADD1, "e64,m8", VLENB);
#endif
#if VLENB_MUL2_SUB1 < 32