aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorJacob Bramley <jacob.bramley@arm.com>2020-04-30 15:35:56 +0100
committerJacob Bramley <jacob.bramley@arm.com>2020-05-21 14:43:09 +0000
commit8667956fc31bf89ef268830db4f06fab101d6816 (patch)
treefff3401ba0bc2a3de14c3eb255dca676722bff1e /test
parent113d9199a4047b7c93809a98e3d95ed542c2f6c3 (diff)
downloadvixl-8667956fc31bf89ef268830db4f06fab101d6816.tar.gz
Remove undefined casts to PrefetchOperation.
Because several tests relied on being able to cast arbitrary uint5 values to PrefetchOperation, this patch also overloads `prfm` variants to accept such values. Existing user code passing in `static_cast<PrefetchOperation>(op)` with unallocated `op`s will now fail. However, simply removing the cast should encode the desired values without using any undefined C++ behaviour. This is only possible in the Assembler; the MacroAssembler requires named prefetch operations. Change-Id: I6b001bf0ce06d46ea7762e884238ceb1f0224b35
Diffstat (limited to 'test')
-rw-r--r--test/aarch64/test-assembler-aarch64.cc99
-rw-r--r--test/aarch64/test-disasm-aarch64.cc14
2 files changed, 83 insertions, 30 deletions
diff --git a/test/aarch64/test-assembler-aarch64.cc b/test/aarch64/test-assembler-aarch64.cc
index ae92dff6..fa5151ee 100644
--- a/test/aarch64/test-assembler-aarch64.cc
+++ b/test/aarch64/test-assembler-aarch64.cc
@@ -4113,6 +4113,28 @@ TEST(ldr_literal_custom_shared) {
}
}
+static const PrefetchOperation kPrfmOperations[] = {
+ PLDL1KEEP,
+ PLDL1STRM,
+ PLDL2KEEP,
+ PLDL2STRM,
+ PLDL3KEEP,
+ PLDL3STRM,
+
+ PLIL1KEEP,
+ PLIL1STRM,
+ PLIL2KEEP,
+ PLIL2STRM,
+ PLIL3KEEP,
+ PLIL3STRM,
+
+ PSTL1KEEP,
+ PSTL1STRM,
+ PSTL2KEEP,
+ PSTL2STRM,
+ PSTL3KEEP,
+ PSTL3STRM
+};
TEST(prfm_offset) {
SETUP();
@@ -4121,15 +4143,18 @@ TEST(prfm_offset) {
// The address used in prfm doesn't have to be valid.
__ Mov(x0, 0x0123456789abcdef);
- for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
+ for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
// Unallocated prefetch operations are ignored, so test all of them.
- PrefetchOperation op = static_cast<PrefetchOperation>(i);
+ // We have to use the Assembler directly for this.
+ ExactAssemblyScope guard(&masm, 3 * kInstructionSize);
+ __ prfm(op, MemOperand(x0));
+ __ prfm(op, MemOperand(x0, 8));
+ __ prfm(op, MemOperand(x0, 32760));
+ }
- __ Prfm(op, MemOperand(x0));
- __ Prfm(op, MemOperand(x0, 8));
- __ Prfm(op, MemOperand(x0, 32760));
+ for (PrefetchOperation op: kPrfmOperations) {
+ // Also test named operations.
__ Prfm(op, MemOperand(x0, 32768));
-
__ Prfm(op, MemOperand(x0, 1));
__ Prfm(op, MemOperand(x0, 9));
__ Prfm(op, MemOperand(x0, 255));
@@ -4167,14 +4192,21 @@ TEST(prfm_regoffset) {
__ Mov(x17, -255);
__ Mov(x18, 0xfedcba9876543210);
- for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
+ for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
// Unallocated prefetch operations are ignored, so test all of them.
- PrefetchOperation op = static_cast<PrefetchOperation>(i);
+ // We have to use the Assembler directly for this.
+ ExactAssemblyScope guard(&masm, inputs.GetCount() * kInstructionSize);
+ CPURegList loop = inputs;
+ while (!loop.IsEmpty()) {
+ __ prfm(op, MemOperand(x0, Register(loop.PopLowestIndex())));
+ }
+ }
+ for (PrefetchOperation op: kPrfmOperations) {
+ // Also test named operations.
CPURegList loop = inputs;
while (!loop.IsEmpty()) {
Register input(loop.PopLowestIndex());
- __ Prfm(op, MemOperand(x0, input));
__ Prfm(op, MemOperand(x0, input, UXTW));
__ Prfm(op, MemOperand(x0, input, UXTW, 3));
__ Prfm(op, MemOperand(x0, input, LSL));
@@ -4197,15 +4229,19 @@ TEST(prfm_literal_imm19) {
SETUP();
START();
- for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
+ for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
// Unallocated prefetch operations are ignored, so test all of them.
- PrefetchOperation op = static_cast<PrefetchOperation>(i);
-
- ExactAssemblyScope scope(&masm, 7 * kInstructionSize);
- // The address used in prfm doesn't have to be valid.
+ // We have to use the Assembler directly for this.
+ ExactAssemblyScope guard(&masm, 3 * kInstructionSize);
__ prfm(op, INT64_C(0));
__ prfm(op, 1);
__ prfm(op, -1);
+ }
+
+ for (PrefetchOperation op: kPrfmOperations) {
+ // Also test named operations.
+ ExactAssemblyScope guard(&masm, 4 * kInstructionSize);
+ // The address used in prfm doesn't have to be valid.
__ prfm(op, 1000);
__ prfm(op, -1000);
__ prfm(op, 0x3ffff);
@@ -4237,10 +4273,16 @@ TEST(prfm_literal) {
}
__ Bind(&end_of_pool_before);
- for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
+ for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
// Unallocated prefetch operations are ignored, so test all of them.
- PrefetchOperation op = static_cast<PrefetchOperation>(i);
+ // We have to use the Assembler directly for this.
+ ExactAssemblyScope guard(&masm, 2 * kInstructionSize);
+ __ prfm(op, &before);
+ __ prfm(op, &after);
+ }
+ for (PrefetchOperation op: kPrfmOperations) {
+ // Also test named operations.
ExactAssemblyScope guard(&masm, 2 * kInstructionSize);
__ prfm(op, &before);
__ prfm(op, &after);
@@ -4268,10 +4310,7 @@ TEST(prfm_wide) {
// The address used in prfm doesn't have to be valid.
__ Mov(x0, 0x0123456789abcdef);
- for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
- // Unallocated prefetch operations are ignored, so test all of them.
- PrefetchOperation op = static_cast<PrefetchOperation>(i);
-
+ for (PrefetchOperation op: kPrfmOperations) {
__ Prfm(op, MemOperand(x0, 0x40000));
__ Prfm(op, MemOperand(x0, -0x40001));
__ Prfm(op, MemOperand(x0, UINT64_C(0x5555555555555555)));
@@ -4319,9 +4358,25 @@ TEST(load_prfm_literal) {
}
__ Bind(&end_of_pool_before);
- for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
+ for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
// Unallocated prefetch operations are ignored, so test all of them.
- PrefetchOperation op = static_cast<PrefetchOperation>(i);
+ ExactAssemblyScope scope(&masm, 10 * kInstructionSize);
+
+ __ prfm(op, &before_x);
+ __ prfm(op, &before_w);
+ __ prfm(op, &before_sx);
+ __ prfm(op, &before_d);
+ __ prfm(op, &before_s);
+
+ __ prfm(op, &after_x);
+ __ prfm(op, &after_w);
+ __ prfm(op, &after_sx);
+ __ prfm(op, &after_d);
+ __ prfm(op, &after_s);
+ }
+
+ for (PrefetchOperation op: kPrfmOperations) {
+ // Also test named operations.
ExactAssemblyScope scope(&masm, 10 * kInstructionSize);
__ prfm(op, &before_x);
diff --git a/test/aarch64/test-disasm-aarch64.cc b/test/aarch64/test-disasm-aarch64.cc
index 065459a7..c59b8671 100644
--- a/test/aarch64/test-disasm-aarch64.cc
+++ b/test/aarch64/test-disasm-aarch64.cc
@@ -1955,11 +1955,10 @@ TEST(prfm_operations) {
const int expected_count = sizeof(expected) / sizeof(expected[0]);
VIXL_STATIC_ASSERT((1 << ImmPrefetchOperation_width) == expected_count);
- for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
- PrefetchOperation op = static_cast<PrefetchOperation>(i);
- COMPARE_PREFIX(prfm(op, INT64_C(0)), expected[i]);
- COMPARE_PREFIX(prfm(op, MemOperand(x0, 0)), expected[i]);
- COMPARE_PREFIX(prfm(op, MemOperand(x0, x1)), expected[i]);
+ for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
+ COMPARE_PREFIX(prfm(op, INT64_C(0)), expected[op]);
+ COMPARE_PREFIX(prfm(op, MemOperand(x0, 0)), expected[op]);
+ COMPARE_PREFIX(prfm(op, MemOperand(x0, x1)), expected[op]);
}
CLEANUP();
@@ -1986,9 +1985,8 @@ TEST(prfum_operations) {
const int expected_count = sizeof(expected) / sizeof(expected[0]);
VIXL_STATIC_ASSERT((1 << ImmPrefetchOperation_width) == expected_count);
- for (int i = 0; i < (1 << ImmPrefetchOperation_width); i++) {
- PrefetchOperation op = static_cast<PrefetchOperation>(i);
- COMPARE_PREFIX(prfum(op, MemOperand(x0, 0)), expected[i]);
+ for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
+ COMPARE_PREFIX(prfum(op, MemOperand(x0, 0)), expected[op]);
}
CLEANUP();