diff options
author | Martyn Capewell <martyn.capewell@arm.com> | 2020-06-02 16:40:09 +0100 |
---|---|---|
committer | Jacob Bramley <jacob.bramley@arm.com> | 2020-06-19 08:27:52 +0000 |
commit | 7db8210df5ceb1609e8d2fe879ec73d749e83ba6 (patch) | |
tree | c93492d61da22522ab33e96480672d45546dbc08 /test | |
parent | ae3902af7d8a8bd817ff2dbe34be30bf905099a7 (diff) | |
download | vixl-7db8210df5ceb1609e8d2fe879ec73d749e83ba6.tar.gz |
[sve] Implement fmov aliases.
Implement fmov aliases using fcpy and fdup.
Change-Id: I8fb95bac8a2c2aed0f74872d071e1ad4b30b68bd
Diffstat (limited to 'test')
-rw-r--r-- | test/aarch64/test-assembler-sve-aarch64.cc | 11 | ||||
-rw-r--r-- | test/aarch64/test-disasm-sve-aarch64.cc | 27 |
2 files changed, 30 insertions, 8 deletions
diff --git a/test/aarch64/test-assembler-sve-aarch64.cc b/test/aarch64/test-assembler-sve-aarch64.cc index 2e647a87..6fb1092e 100644 --- a/test/aarch64/test-assembler-sve-aarch64.cc +++ b/test/aarch64/test-assembler-sve-aarch64.cc @@ -6689,7 +6689,7 @@ TEST_SVE(sve_fcpy_imm) { __ Fcpy(z6.VnS(), pg.Merging(), 6.0); __ Fcpy(z7.VnD(), pg.Merging(), Float16(7.0)); __ Fcpy(z8.VnD(), pg.Merging(), 8.0f); - __ Fcpy(z9.VnD(), pg.Merging(), -9.0); + __ Fmov(z9.VnD(), pg.Merging(), -9.0); // Unencodable immediates. __ Fcpy(z10.VnS(), pg.Merging(), 0.0); @@ -6697,6 +6697,11 @@ TEST_SVE(sve_fcpy_imm) { __ Fcpy(z12.VnD(), pg.Merging(), RawbitsToDouble(0x7ff0000012340000)); // NaN __ Fcpy(z13.VnH(), pg.Merging(), kFP64NegativeInfinity); + // Fmov alias. + __ Fmov(z14.VnS(), pg.Merging(), 0.0); + __ Fmov(z15.VnH(), pg.Merging(), Float16(42.0)); + __ Fmov(z16.VnD(), pg.Merging(), RawbitsToDouble(0x7ff0000012340000)); // NaN + __ Fmov(z17.VnH(), pg.Merging(), kFP64NegativeInfinity); END(); if (CAN_RUN()) { @@ -6781,6 +6786,10 @@ TEST_SVE(sve_fcpy_imm) { {0xe9eaebecfc00eff0, 0xf1f2fc00f5f6fc00, 0xfc00fbfcfdfefc00}; ASSERT_EQUAL_SVE(expected_z13, z13.VnD()); + ASSERT_EQUAL_SVE(z10.VnD(), z14.VnD()); + ASSERT_EQUAL_SVE(z11.VnD(), z15.VnD()); + ASSERT_EQUAL_SVE(z12.VnD(), z16.VnD()); + ASSERT_EQUAL_SVE(z13.VnD(), z17.VnD()); // clang-format on } } diff --git a/test/aarch64/test-disasm-sve-aarch64.cc b/test/aarch64/test-disasm-sve-aarch64.cc index 768f2e13..3a6ab7d5 100644 --- a/test/aarch64/test-disasm-sve-aarch64.cc +++ b/test/aarch64/test-disasm-sve-aarch64.cc @@ -3021,11 +3021,11 @@ TEST(sve_cpy_fcpy_imm) { "mov z25.d, p13/m, #-42, lsl #8"); COMPARE_PREFIX(fcpy(z20.VnH(), p11.Merging(), 29.0), - "fcpy z20.h, p11/m, #0x3d (29.0000)"); - COMPARE_PREFIX(fcpy(z20.VnS(), p11.Merging(), -31.0), - "fcpy z20.s, p11/m, #0xbf (-31.0000)"); + "fmov z20.h, p11/m, #0x3d (29.0000)"); + COMPARE_PREFIX(fmov(z20.VnS(), p11.Merging(), -31.0), + "fmov z20.s, p11/m, #0xbf (-31.0000)"); COMPARE_PREFIX(fcpy(z20.VnD(), p11.Merging(), 1.0), - "fcpy z20.d, p11/m, #0x70 (1.0000)"); + "fmov z20.d, p11/m, #0x70 (1.0000)"); CLEANUP(); } @@ -3090,9 +3090,10 @@ TEST(sve_int_wide_imm_unpredicated) { "uqsub z13.d, z13.d, #245, lsl #8"); COMPARE_PREFIX(fdup(z26.VnH(), Float16(-5.0f)), - "fdup z26.h, #0x94 (-5.0000)"); - COMPARE_PREFIX(fdup(z27.VnS(), -13.0f), "fdup z27.s, #0xaa (-13.0000)"); - COMPARE_PREFIX(fdup(z28.VnD(), 1.0f), "fdup z28.d, #0x70 (1.0000)"); + "fmov z26.h, #0x94 (-5.0000)"); + COMPARE_PREFIX(fdup(z27.VnS(), -13.0f), "fmov z27.s, #0xaa (-13.0000)"); + COMPARE_PREFIX(fdup(z28.VnD(), 1.0f), "fmov z28.d, #0x70 (1.0000)"); + COMPARE_PREFIX(fmov(z28.VnD(), 1.0f), "fmov z28.d, #0x70 (1.0000)"); COMPARE_PREFIX(mul(z15.VnB(), z15.VnB(), -128), "mul z15.b, z15.b, #-128"); COMPARE_PREFIX(mul(z16.VnH(), z16.VnH(), -1), "mul z16.h, z16.h, #-1"); @@ -3236,6 +3237,7 @@ TEST(sve_int_wide_imm_unpredicated_macro) { COMPARE_MACRO(Dup(z9.VnD(), 0x80000000), "mov z9.d, #0x80000000"); COMPARE_MACRO(Mov(z9.VnD(), 0x80000000), "mov z9.d, #0x80000000"); COMPARE_MACRO(Fdup(z26.VnH(), Float16(0.0)), "mov z26.h, #0"); + COMPARE_MACRO(Fdup(z26.VnH(), Float16(0.0)), "mov z26.h, #0"); COMPARE_MACRO(Fdup(z27.VnS(), 255.0f), "mov w16, #0x437f0000\n" "mov z27.s, w16"); @@ -3245,6 +3247,17 @@ TEST(sve_int_wide_imm_unpredicated_macro) { "movk x16, #0xb0f2, lsl #32\n" "movk x16, #0x4028, lsl #48\n" "mov z28.d, x16"); + COMPARE_MACRO(Fmov(z26.VnH(), Float16(0.0)), "mov z26.h, #0"); + COMPARE_MACRO(Fmov(z26.VnH(), Float16(0.0)), "mov z26.h, #0"); + COMPARE_MACRO(Fmov(z27.VnS(), 255.0f), + "mov w16, #0x437f0000\n" + "mov z27.s, w16"); + COMPARE_MACRO(Fmov(z28.VnD(), 12.3456), + "mov x16, #0xfec5\n" + "movk x16, #0x7bb2, lsl #16\n" + "movk x16, #0xb0f2, lsl #32\n" + "movk x16, #0x4028, lsl #48\n" + "mov z28.d, x16"); // Only predicated version of instruction is supported for unencodable // immediate. |