| /* |
| * Copyright 2023 Google LLC |
| * |
| * Licensed under the Apache License, Version 2.0 (the "License"); |
| * you may not use this file except in compliance with the License. |
| * You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| |
| #include "vector_load_store_tests.h" |
| |
| // TODO(b/194689843): Re-enable e64 and mf[2|4|8] tests. |
| |
| bool test_vector(void) { |
| test_vector_load_store_sanity_e8(); |
| test_vector_load_store_sanity_e16(); |
| test_vector_load_store_sanity_e32(); |
| // SEW limited to E32 |
| // test_vector_load_store_sanity_e64(); |
| return true; |
| } |
| |
| void test_vector_load_store_sanity_e8(void) { |
| LOG_INFO("%s", __FUNCTION__); |
| VSET(4, e8, m1); |
| |
| uint32_t vl = 0; |
| COPY_SCALAR_REG(vl); |
| LOG_INFO("vl: %u", vl); |
| assert(vl == 4); |
| |
| volatile uint8_t INP1[] = {0xff, 0x00, 0x0f, 0xf0}; |
| volatile uint8_t OUT1[4]; |
| __asm__ volatile ("vle8.v v1, (%0)"::"r" (INP1)); |
| __asm__ volatile ("vse8.v v1, (%0)"::"r" (OUT1)); |
| assert(memcmp((void*)INP1,(void*)OUT1, 4 * sizeof(uint8_t)) == 0); |
| } |
| |
| void test_vector_load_store_sanity_e16(void) { |
| LOG_INFO("%s", __FUNCTION__); |
| VSET(4, e16, m1); |
| |
| uint32_t vl = 0; |
| COPY_SCALAR_REG(vl); |
| LOG_INFO("vl: %u", vl); |
| assert(vl == 4); |
| |
| volatile uint16_t INP1[] = {0xff00, 0x00ff, 0x0ff0, 0xf00f}; |
| volatile uint16_t OUT1[4]; |
| __asm__ volatile ("vle16.v v1, (%0)"::"r" (INP1)); |
| __asm__ volatile ("vse16.v v1, (%0)"::"r" (OUT1)); |
| assert(memcmp((void*)INP1,(void*)OUT1, 4 * sizeof(uint16_t)) == 0); |
| } |
| |
| void test_vector_load_store_sanity_e32(void) { |
| LOG_INFO("%s", __FUNCTION__); |
| VSET(4, e32, m1); |
| |
| uint32_t vl = 0; |
| COPY_SCALAR_REG(vl); |
| LOG_INFO("vl: %u", vl); |
| assert(vl == 4); |
| |
| volatile uint32_t INP1[] = {0xff0000ff, 0x00ffff00, 0x0ff00ff0, 0xf00ff00f}; |
| volatile uint32_t OUT1[4]; |
| __asm__ volatile ("vle32.v v1, (%0)"::"r" (INP1)); |
| __asm__ volatile ("vse32.v v1, (%0)"::"r" (OUT1)); |
| assert(memcmp((void*)INP1,(void*)OUT1, 4 * sizeof(uint32_t)) == 0); |
| } |
| |
| void test_vector_load_store_sanity_e64(void) { |
| LOG_INFO("%s", __FUNCTION__); |
| VSET(4, e64, m1); |
| |
| uint32_t vl = 0; |
| COPY_SCALAR_REG(vl); |
| LOG_INFO("vl: %u", vl); |
| assert(vl == 4); |
| |
| volatile uint64_t INP1[] = {0xff0000ffff0000ff, 0x00ffff0000ffff00, 0x0ff00ff00ff00ff0, 0xf00ff00ff00ff00f}; |
| volatile uint64_t OUT1[4]; |
| __asm__ volatile ("vle64.v v1, (%0)"::"r" (INP1)); |
| __asm__ volatile ("vse64.v v1, (%0)"::"r" (OUT1)); |
| assert(memcmp((void*)INP1,(void*)OUT1, 4 * sizeof(uint64_t)) == 0); |
| } |
| |
| // TODO(julianmb): test mask load and mask store. vlm.v vsm.v |