aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdityaK <appujee@google.com>2024-04-19 20:05:33 -0700
committerAdityaK <appujee@google.com>2024-04-25 16:15:00 -0700
commit24705dbdb5c1140d2601757bde09975b6acdced2 (patch)
tree511368567705abc685be41a98eeb9fa9807df4bd
parent785e161dd8e0e44fa79c02fad34fb3992a7c81e4 (diff)
downloadbionic-24705dbdb5c1140d2601757bde09975b6acdced2.tar.gz
[RISC-V] Add misaligned load store tests
hwprobe test may be insufficient to guarantee fast (or even supported) unaligned access. Test case based on: https://github.com/llvm/llvm-project/issues/88029 Previous commit got reverted due to compiler errors(b/336800888). Not sure why the errors were not detected in pre-submit builds. Bug: https://github.com/google/android-riscv64/issues/142 Change-Id: If1c4150701298c0f351baa9ce1870509a00c250a
-rw-r--r--tests/sys_hwprobe_test.cpp64
1 files changed, 63 insertions, 1 deletions
diff --git a/tests/sys_hwprobe_test.cpp b/tests/sys_hwprobe_test.cpp
index 6b74e1875..fd59e1ddc 100644
--- a/tests/sys_hwprobe_test.cpp
+++ b/tests/sys_hwprobe_test.cpp
@@ -33,6 +33,68 @@
#include <sys/syscall.h>
#endif
+
+#if defined(__riscv)
+#include <riscv_vector.h>
+
+__attribute__((noinline))
+uint64_t scalar_cast(uint8_t const* p) {
+ return *(uint64_t const*)p;
+}
+
+__attribute__((noinline))
+uint64_t scalar_memcpy(uint8_t const* p) {
+ uint64_t r;
+ __builtin_memcpy(&r, p, sizeof(r));
+ return r;
+}
+
+__attribute__((noinline))
+uint64_t vector_memcpy(uint8_t* d, uint8_t const* p) {
+ __builtin_memcpy(d, p, 16);
+ return *(uint64_t const*)d;
+}
+
+__attribute__((noinline))
+uint64_t vector_ldst(uint8_t* d, uint8_t const* p) {
+ __riscv_vse8(d, __riscv_vle8_v_u8m1(p, 16), 16);
+ return *(uint64_t const*)d;
+}
+
+__attribute__((noinline))
+uint64_t vector_ldst64(uint8_t* d, uint8_t const* p) {
+ __riscv_vse64((unsigned long *)d, __riscv_vle64_v_u64m1((const unsigned long *)p, 16), 16);
+ return *(uint64_t const*)d;
+}
+
+// For testing scalar and vector unaligned accesses.
+uint64_t tmp[3] = {1,1,1};
+uint64_t dst[3] = {1,1,1};
+#endif
+
+TEST(sys_hwprobe, __riscv_hwprobe_misaligned_scalar) {
+#if defined(__riscv)
+ uint8_t* p = (uint8_t*)tmp + 1;
+ ASSERT_NE(0U, scalar_cast(p));
+ ASSERT_NE(0U, scalar_memcpy(p));
+#else
+ GTEST_SKIP() << "__riscv_hwprobe requires riscv64";
+#endif
+}
+
+TEST(sys_hwprobe, __riscv_hwprobe_misaligned_vector) {
+#if defined(__riscv)
+ uint8_t* p = (uint8_t*)tmp + 1;
+ uint8_t* d = (uint8_t*)dst + 1;
+
+ ASSERT_NE(0U, vector_ldst(d, p));
+ ASSERT_NE(0U, vector_memcpy(d, p));
+ ASSERT_NE(0U, vector_ldst64(d, p));
+#else
+ GTEST_SKIP() << "__riscv_hwprobe requires riscv64";
+#endif
+}
+
TEST(sys_hwprobe, __riscv_hwprobe) {
#if defined(__riscv) && __has_include(<sys/hwprobe.h>)
riscv_hwprobe probes[] = {{.key = RISCV_HWPROBE_KEY_IMA_EXT_0},
@@ -82,4 +144,4 @@ TEST(sys_hwprobe, __riscv_hwprobe_fail) {
#else
GTEST_SKIP() << "__riscv_hwprobe requires riscv64";
#endif
-}
+} \ No newline at end of file