Add vector load/store test.

Change-Id: I6d51729f6e49404be2f17aebe4b880b614d13b47
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 53ac99b..3a6f209 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -15,4 +15,4 @@
 add_subdirectory(springbok)
 add_subdirectory(hello_vec)
 add_subdirectory(vector_tests)
-#add_subdirectory(vector_load_tests)
+add_subdirectory(vector_load_tests)
diff --git a/vector_load_tests/CMakeLists.txt b/vector_load_tests/CMakeLists.txt
new file mode 100644
index 0000000..4b2820f
--- /dev/null
+++ b/vector_load_tests/CMakeLists.txt
@@ -0,0 +1,36 @@
+cmake_minimum_required(VERSION 3.10)
+
+project(vector_load_tests)
+
+
+set(TARGET vector_load_tests)
+set(ELF ${TARGET}.elf)
+
+add_executable(${ELF} vector_load_store_tests.c)
+
+target_include_directories(${ELF} PUBLIC include)
+
+set_target_properties(${ELF} PROPERTIES LINK_DEPENDS "${LINKER_SCRIPT}")
+
+target_link_libraries(${ELF} vector_tests)
+
+set_target_properties(
+	${ELF}
+	PROPERTIES
+	LINK_FLAGS
+	"-T${LINKER_SCRIPT} \
+	 -specs=nano.specs \
+	 -Wl,--gc-sections \
+	 -Wl,--print-memory-usage \
+	 -Wl,-Map=${PROJECT_NAME}.map")
+
+target_compile_options(${ELF} PUBLIC
+	-nostdlib
+	-ffreestanding
+	-ffunction-sections
+	-Wall
+	-Werror
+	-std=gnu11
+	-O3
+	-g)
+
diff --git a/vector_load_tests/include/vector_load_store_tests.h b/vector_load_tests/include/vector_load_store_tests.h
new file mode 100644
index 0000000..4b5d4a9
--- /dev/null
+++ b/vector_load_tests/include/vector_load_store_tests.h
@@ -0,0 +1,14 @@
+#ifndef VECTOR_TESTS_VECTOR_LOAD_STORE_TESTS_H_
+#define VECTOR_TESTS_VECTOR_LOAD_STORE_TESTS_H_
+
+#include "test_vector.h"
+#include <string.h>
+#include <springbok.h>
+#include <assert.h>
+
+void test_vector_load_store_sanity_e8(void);
+void test_vector_load_store_sanity_e16(void);
+void test_vector_load_store_sanity_e32(void);
+void test_vector_load_store_sanity_e64(void);
+
+#endif
\ No newline at end of file
diff --git a/vector_load_tests/vector_load_store_tests.c b/vector_load_tests/vector_load_store_tests.c
new file mode 100644
index 0000000..25d84e5
--- /dev/null
+++ b/vector_load_tests/vector_load_store_tests.c
@@ -0,0 +1,75 @@
+#include "vector_load_store_tests.h"
+
+bool test_vector(void) {
+  test_vector_load_store_sanity_e8();
+  test_vector_load_store_sanity_e16();
+  test_vector_load_store_sanity_e32();
+  test_vector_load_store_sanity_e64();
+  return true;
+}
+
+void test_vector_load_store_sanity_e8(void) {
+  LOG_INFO("%s", __FUNCTION__);
+  VSET(4, e8, m1);
+
+  uint32_t vl = 0;
+  COPY_SCALAR_REG("t0", vl);
+  LOG_INFO("vl: %lu", vl);
+  assert(vl == 4);
+
+  volatile uint8_t INP1[] = {0xff, 0x00, 0x0f, 0xf0};
+  volatile uint8_t OUT1[4];
+  __asm__ volatile ("vle8.v v1, (%0)"::"r" (INP1));
+  __asm__ volatile ("vse8.v v1, (%0)"::"r" (OUT1));
+  assert(memcmp((void*)INP1,(void*)OUT1, 4 * sizeof(uint8_t)) == 0);
+}
+
+void test_vector_load_store_sanity_e16(void) {
+  LOG_INFO("%s", __FUNCTION__);
+  VSET(4, e16, m1);
+
+  uint32_t vl = 0;
+  COPY_SCALAR_REG("t0", vl);
+  LOG_INFO("vl: %lu", vl);
+  assert(vl == 4);
+
+  volatile uint16_t INP1[] = {0xff00, 0x00ff, 0x0ff0, 0xf00f};
+  volatile uint16_t OUT1[4];
+  __asm__ volatile ("vle16.v v1, (%0)"::"r" (INP1));
+  __asm__ volatile ("vse16.v v1, (%0)"::"r" (OUT1));
+  assert(memcmp((void*)INP1,(void*)OUT1, 4 * sizeof(uint16_t)) == 0);
+}
+
+void test_vector_load_store_sanity_e32(void) {
+  LOG_INFO("%s", __FUNCTION__);
+  VSET(4, e32, m1);
+
+  uint32_t vl = 0;
+  COPY_SCALAR_REG("t0", vl);
+  LOG_INFO("vl: %lu", vl);
+  assert(vl == 4);
+
+  volatile uint32_t INP1[] = {0xff0000ff, 0x00ffff00, 0x0ff00ff0, 0xf00ff00f};
+  volatile uint32_t OUT1[4];
+  __asm__ volatile ("vle32.v v1, (%0)"::"r" (INP1));
+  __asm__ volatile ("vse32.v v1, (%0)"::"r" (OUT1));
+  assert(memcmp((void*)INP1,(void*)OUT1, 4 * sizeof(uint32_t)) == 0);
+}
+
+void test_vector_load_store_sanity_e64(void) {
+  LOG_INFO("%s", __FUNCTION__);
+  VSET(4, e64, m1);
+
+  uint32_t vl = 0;
+  COPY_SCALAR_REG("t0", vl);
+  LOG_INFO("vl: %lu", vl);
+  assert(vl == 4);
+
+  volatile uint64_t INP1[] = {0xff0000ffff0000ff, 0x00ffff0000ffff00, 0x0ff00ff00ff00ff0, 0xf00ff00ff00ff00f};
+  volatile uint64_t OUT1[4];
+  __asm__ volatile ("vle64.v v1, (%0)"::"r" (INP1));
+  __asm__ volatile ("vse64.v v1, (%0)"::"r" (OUT1));
+  assert(memcmp((void*)INP1,(void*)OUT1, 4 * sizeof(uint64_t)) == 0);
+}
+
+// TODO(julianmb): test mask load and mask store. vlm.v vsm.v