blob: 67f8f8678108e8d3d21979ed161af0211eafb8b1 [file] [log] [blame]
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <limits.h>
#include <riscv_vector.h>
#include <springbok.h>
#include <stdio.h>
#include <stdlib.h>
#include <bit>
#include <tuple>
#include <type_traits>
#include "pw_unit_test/framework.h"
#include "test_v_helpers.h"
namespace vmv_s_x_test {
namespace {
using namespace test_v_helpers;
uint8_t test_vector_1[MAXVL_BYTES];
uint8_t reference_vector_1[MAXVL_BYTES];
class VmvsxTest : public ::testing::Test {
protected:
void SetUp() override { zero_vector_registers(); }
void TearDown() override { zero_vector_registers(); }
};
template <typename T>
void vmv_s_x_test(const int32_t test_value) {
for (int lmul = LMUL_M1; lmul <= LMUL_M8; lmul++) {
// TODO(gkielian): increase upper bound to LMUL_MF2 after support is added
// TODO(gkielian): skip lmul==4 (unused) on adding fractional lmul to range
for (int32_t i = 0; i < AVL_COUNT; i++) {
int32_t avl = AVLS[i];
int32_t vlmax;
int32_t vl;
// reset vector elements to zero and perform appropriate vsetvl
std::tie(vlmax, vl) = vector_test_setup<T>(
static_cast<VLMUL>(lmul), avl, {test_vector_1, reference_vector_1});
if (avl > vlmax) {
continue;
}
// recast test and ref vector to appropriate sew
T *test_ptr_vec = reinterpret_cast<T *>(test_vector_1);
T *ref_ptr_vec = reinterpret_cast<T *>(reference_vector_1);
// populate test and ref vectors
fill_random_vector<T>(ref_ptr_vec, avl);
// load test_value to scalar register
__asm__ volatile("lw x5, (%0)" : : "r"(&test_value));
switch (sizeof(T)) {
case sizeof(int8_t):
__asm__ volatile("vle8.v v8, (%0)" : : "r"(ref_ptr_vec));
break;
case sizeof(int16_t):
__asm__ volatile("vle16.v v8, (%0)" : : "r"(ref_ptr_vec));
break;
case sizeof(int32_t):
__asm__ volatile("vle32.v v8, (%0)" : : "r"(ref_ptr_vec));
break;
default:
EXPECT_TRUE(false);
break;
}
// load value of x5 into v8[0], truncating x5 to fit into the current SEW
__asm__ volatile("vmv.s.x v8, x5");
switch (sizeof(T)) {
case sizeof(int8_t):
__asm__ volatile("vse8.v v8, (%0)" : : "r"(test_ptr_vec));
break;
case sizeof(int16_t):
__asm__ volatile("vse16.v v8, (%0)" : : "r"(test_ptr_vec));
break;
case sizeof(int32_t):
__asm__ volatile("vse32.v v8, (%0)" : : "r"(test_ptr_vec));
break;
default:
EXPECT_TRUE(false);
break;
}
// perform operations on reference vector
ref_ptr_vec[0] = static_cast<T>(test_value);
assert_vec_elem_eq<T>(vlmax, test_ptr_vec, ref_ptr_vec);
}
}
}
TEST_F(VmvsxTest, vmv_s_x_test_e8) {
vmv_s_x_test<int8_t>(INT32_MIN);
vmv_s_x_test<int8_t>(INT32_MAX);
vmv_s_x_test<int8_t>(INT8_MIN);
vmv_s_x_test<int8_t>(INT8_MAX);
vmv_s_x_test<int8_t>(0);
}
TEST_F(VmvsxTest, vmv_s_x_test_e16) {
vmv_s_x_test<int16_t>(INT32_MIN);
vmv_s_x_test<int16_t>(INT32_MAX);
vmv_s_x_test<int16_t>(INT8_MIN);
vmv_s_x_test<int16_t>(INT8_MAX);
vmv_s_x_test<int16_t>(0);
}
TEST_F(VmvsxTest, vmv_s_x_test_e32) {
vmv_s_x_test<int32_t>(INT32_MIN);
vmv_s_x_test<int32_t>(INT32_MAX);
vmv_s_x_test<int32_t>(INT8_MIN);
vmv_s_x_test<int32_t>(INT8_MAX);
vmv_s_x_test<int32_t>(0);
}
} // namespace
} // namespace vmv_s_x_test