diff options
author | Jacob Bramley <jacob.bramley@arm.com> | 2020-06-30 21:40:35 +0100 |
---|---|---|
committer | Jacob Bramley <jacob.bramley@arm.com> | 2020-07-16 10:52:42 +0100 |
commit | b9616b366a8ac4be13bbd729c203049cd39dca93 (patch) | |
tree | f14b5cfb2df5525e2641aa6815287088a06e58c3 /test | |
parent | 8c4ceb6a3ac0d32dd562998baef0322882d295bd (diff) | |
download | vixl-b9616b366a8ac4be13bbd729c203049cd39dca93.tar.gz |
Fix and enable CanTakeSVEMovprfx.
Change-Id: I9afc03fb9e11546b9e6caf04497339bf45b285b6
Diffstat (limited to 'test')
-rw-r--r-- | test/aarch64/test-api-movprfx-aarch64.cc | 1762 |
1 files changed, 739 insertions, 1023 deletions
diff --git a/test/aarch64/test-api-movprfx-aarch64.cc b/test/aarch64/test-api-movprfx-aarch64.cc index e218ac41..c8d10f6c 100644 --- a/test/aarch64/test-api-movprfx-aarch64.cc +++ b/test/aarch64/test-api-movprfx-aarch64.cc @@ -71,8 +71,6 @@ static void CheckAndMaybeDisassembleMovprfxPairs(const CodeBuffer* buffer, VIXL_CHECK(!any_failures); } -// Disable some movprfx tests until CanTakeSVEMovprfx() is updated. -#if 0 TEST(movprfx_negative_aliasing) { // Test that CanTakeSVEMovprfx() checks that the movprfx destination does not // alias an input to the prefixed instruction. @@ -81,7 +79,7 @@ TEST(movprfx_negative_aliasing) { { // We have to use the Assembler directly to generate movprfx, so we need // to manually reserve space for the code we're about to emit. - static const size_t kPairCount = 69; + static const size_t kPairCount = 73; CodeBufferCheckScope guard(&assm, kPairCount * 2 * kInstructionSize); __ movprfx(z0.VnB(), p0.Merging(), z9.VnB()); @@ -96,13 +94,11 @@ TEST(movprfx_negative_aliasing) { __ movprfx(z2, z4); __ asr(z2.VnS(), p2.Merging(), z2.VnS(), z2.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z10, z18); - // __ asr(z10.VnH(), p2.Merging(), z10.VnH(), z10.VnD()); + __ movprfx(z10, z18); + __ asr(z10.VnH(), p2.Merging(), z10.VnH(), z10.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z17.VnD(), p5.Zeroing(), z20.VnD()); - // __ asr(z17.VnD(), p5.Merging(), z17.VnD(), z17.VnD()); + __ movprfx(z17.VnD(), p5.Zeroing(), z20.VnD()); + __ asr(z17.VnD(), p5.Merging(), z17.VnD(), z17.VnD()); __ movprfx(z22, z9); __ asrr(z22.VnH(), p1.Merging(), z22.VnH(), z22.VnH()); @@ -110,13 +106,11 @@ TEST(movprfx_negative_aliasing) { __ movprfx(z0.VnS(), p6.Zeroing(), z6.VnS()); __ bic(z0.VnS(), p6.Merging(), z0.VnS(), z0.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z12, z16); - // __ clasta(z12.VnD(), p5, z12.VnD(), z12.VnD()); + __ movprfx(z12, z16); + __ clasta(z12.VnD(), p5, z12.VnD(), z12.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z7, z15); - // __ clastb(z7.VnS(), p7, z7.VnS(), z7.VnS()); + __ movprfx(z7, z15); + __ clastb(z7.VnS(), p7, z7.VnS(), z7.VnS()); __ movprfx(z10, z29); __ cls(z10.VnH(), p2.Merging(), z10.VnH()); @@ -133,20 +127,17 @@ TEST(movprfx_negative_aliasing) { __ movprfx(z19.VnB(), p6.Zeroing(), z4.VnB()); __ eor(z19.VnB(), p6.Merging(), z19.VnB(), z19.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z27, z2); - // __ ext(z27.VnB(), z27.VnB(), z27.VnB(), 42); + __ movprfx(z27, z2); + __ ext(z27.VnB(), z27.VnB(), z27.VnB(), 42); __ movprfx(z4.VnS(), p1.Zeroing(), z22.VnS()); __ lsl(z4.VnS(), p1.Merging(), z4.VnS(), z4.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z4, z5); - // __ lsl(z4.VnB(), p5.Merging(), z4.VnB(), z4.VnD()); + __ movprfx(z4, z5); + __ lsl(z4.VnB(), p5.Merging(), z4.VnB(), z4.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z11.VnD(), p4.Merging(), z29.VnD()); - // __ lsl(z11.VnD(), p4.Merging(), z11.VnD(), z11.VnD()); + __ movprfx(z11.VnD(), p4.Merging(), z29.VnD()); + __ lsl(z11.VnD(), p4.Merging(), z11.VnD(), z11.VnD()); __ movprfx(z12.VnD(), p6.Merging(), z3.VnD()); __ lslr(z12.VnD(), p6.Merging(), z12.VnD(), z12.VnD()); @@ -154,13 +145,11 @@ TEST(movprfx_negative_aliasing) { __ movprfx(z7, z2); __ lsr(z7.VnB(), p4.Merging(), z7.VnB(), z7.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z25.VnH(), p6.Merging(), z28.VnH()); - // __ lsr(z25.VnH(), p6.Merging(), z25.VnH(), z25.VnD()); + __ movprfx(z25.VnH(), p6.Merging(), z28.VnH()); + __ lsr(z25.VnH(), p6.Merging(), z25.VnH(), z25.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z14.VnD(), p6.Merging(), z6.VnD()); - // __ lsr(z14.VnD(), p6.Merging(), z14.VnD(), z14.VnD()); + __ movprfx(z14.VnD(), p6.Merging(), z6.VnD()); + __ lsr(z14.VnD(), p6.Merging(), z14.VnD(), z14.VnD()); __ movprfx(z26.VnH(), p6.Zeroing(), z27.VnH()); __ lsrr(z26.VnH(), p6.Merging(), z26.VnH(), z26.VnH()); @@ -201,46 +190,44 @@ TEST(movprfx_negative_aliasing) { __ movprfx(z25.VnH(), p5.Zeroing(), z11.VnH()); __ orr(z25.VnH(), p5.Merging(), z25.VnH(), z25.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z17.VnH(), p1.Merging(), z22.VnH()); - // __ rbit(z17.VnH(), p1.Merging(), z17.VnH()); + __ movprfx(z17.VnH(), p1.Merging(), z22.VnH()); + __ rbit(z17.VnH(), p1.Merging(), z17.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z11, z25); - // __ revb(z11.VnD(), p6.Merging(), z11.VnD()); + __ movprfx(z11, z25); + __ revb(z11.VnD(), p6.Merging(), z11.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z13, z27); - // __ revh(z13.VnS(), p2.Merging(), z13.VnS()); + __ movprfx(z13, z27); + __ revh(z13.VnS(), p2.Merging(), z13.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z30.VnD(), p6.Merging(), z20.VnD()); - // __ revw(z30.VnD(), p6.Merging(), z30.VnD()); + __ movprfx(z30.VnD(), p6.Merging(), z20.VnD()); + __ revw(z30.VnD(), p6.Merging(), z30.VnD()); __ movprfx(z2.VnD(), p2.Merging(), z21.VnD()); __ sabd(z2.VnD(), p2.Merging(), z2.VnD(), z2.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z0, z7); - // __ sdiv(z0.VnD(), p0.Merging(), z0.VnD(), z0.VnD()); + __ movprfx(z0, z7); + __ sdiv(z0.VnD(), p0.Merging(), z0.VnD(), z0.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z19, z28); - // __ sdivr(z19.VnS(), p1.Merging(), z19.VnS(), z19.VnS()); + __ movprfx(z19, z28); + __ sdivr(z19.VnS(), p1.Merging(), z19.VnS(), z19.VnS()); - __ movprfx(z20, z29); - __ sdot(z20.VnS(), z20.VnB(), z18.VnB()); + __ movprfx(z5, z18); + __ sdot(z5.VnS(), z18.VnB(), z5.VnB(), 1); - // TODO: Enable once implemented. - // __ movprfx(z21, z2); - // __ sdot(z21.VnS(), z21.VnH(), z2.VnH(), 1); + __ movprfx(z15, z11); + __ sdot(z15.VnD(), z2.VnH(), z15.VnH(), 1); - __ movprfx(z2, z16); - __ sdot(z2.VnS(), z16.VnB(), z2.VnB()); + __ movprfx(z30, z13); + __ sdot(z30.VnD(), z30.VnH(), z13.VnH(), 1); - // TODO: Enable once implemented. - // __ movprfx(z5, z23); - // __ sdot(z5.VnS(), z23.VnH(), z5.VnH(), 1); + __ movprfx(z8, z9); + __ sdot(z8.VnS(), z8.VnB(), z9.VnB()); + + __ movprfx(z23, z14); + __ sdot(z23.VnS(), z14.VnB(), z23.VnB()); + + __ movprfx(z26, z5); + __ sdot(z26.VnS(), z26.VnB(), z5.VnB(), 1); __ movprfx(z14, z15); __ smax(z14.VnB(), p2.Merging(), z14.VnB(), z14.VnB()); @@ -251,9 +238,8 @@ TEST(movprfx_negative_aliasing) { __ movprfx(z22, z18); __ smulh(z22.VnB(), p2.Merging(), z22.VnB(), z22.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z23.VnB(), p2.Zeroing(), z13.VnB()); - // __ splice(z23.VnB(), p2.Merging(), z23.VnB(), z23.VnB()); + __ movprfx(z8, z19); + __ splice(z8.VnD(), p2, z8.VnD(), z8.VnD()); __ movprfx(z23.VnH(), p6.Zeroing(), z2.VnH()); __ sub(z23.VnH(), p6.Merging(), z23.VnH(), z23.VnH()); @@ -264,38 +250,38 @@ TEST(movprfx_negative_aliasing) { __ movprfx(z28, z31); __ sxtb(z28.VnS(), p6.Merging(), z28.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z14.VnD(), p6.Merging(), z17.VnD()); - // __ sxth(z14.VnD(), p6.Merging(), z14.VnD()); + __ movprfx(z14.VnD(), p6.Merging(), z17.VnD()); + __ sxth(z14.VnD(), p6.Merging(), z14.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z21.VnD(), p0.Zeroing(), z28.VnD()); - // __ sxtw(z21.VnD(), p0.Merging(), z21.VnD()); + __ movprfx(z21.VnD(), p0.Zeroing(), z28.VnD()); + __ sxtw(z21.VnD(), p0.Merging(), z21.VnD()); __ movprfx(z25, z30); __ uabd(z25.VnB(), p5.Merging(), z25.VnB(), z25.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z13.VnD(), p2.Merging(), z30.VnD()); - // __ udiv(z13.VnD(), p2.Merging(), z13.VnD(), z13.VnD()); + __ movprfx(z13.VnD(), p2.Merging(), z30.VnD()); + __ udiv(z13.VnD(), p2.Merging(), z13.VnD(), z13.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z19.VnD(), p4.Zeroing(), z6.VnD()); - // __ udivr(z19.VnD(), p4.Merging(), z19.VnD(), z19.VnD()); + __ movprfx(z19.VnD(), p4.Zeroing(), z6.VnD()); + __ udivr(z19.VnD(), p4.Merging(), z19.VnD(), z19.VnD()); - __ movprfx(z11, z9); - __ udot(z11.VnS(), z11.VnB(), z9.VnB()); + __ movprfx(z1, z20); + __ udot(z1.VnS(), z18.VnB(), z1.VnB(), 1); - // TODO: Enable once implemented. - // __ movprfx(z7, z19); - // __ udot(z7.VnD(), z7.VnS(), z19.VnS(), 1); + __ movprfx(z8, z2); + __ udot(z8.VnD(), z2.VnH(), z8.VnH(), 1); - __ movprfx(z14, z24); - __ udot(z14.VnS(), z0.VnB(), z14.VnB()); + __ movprfx(z28, z10); + __ udot(z28.VnD(), z28.VnH(), z7.VnH(), 1); - // TODO: Enable once implemented. - // __ movprfx(z6, z28); - // __ udot(z6.VnD(), z28.VnS(), z6.VnS(), 1); + __ movprfx(z21, z11); + __ udot(z21.VnD(), z21.VnH(), z11.VnH()); + + __ movprfx(z1, z22); + __ udot(z1.VnD(), z10.VnH(), z1.VnH()); + + __ movprfx(z8, z23); + __ udot(z8.VnS(), z8.VnB(), z0.VnB(), 1); __ movprfx(z10.VnB(), p5.Zeroing(), z0.VnB()); __ umax(z10.VnB(), p5.Merging(), z10.VnB(), z10.VnB()); @@ -309,13 +295,11 @@ TEST(movprfx_negative_aliasing) { __ movprfx(z23, z25); __ uxtb(z23.VnS(), p7.Merging(), z23.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z14.VnS(), p3.Zeroing(), z5.VnS()); - // __ uxth(z14.VnS(), p3.Merging(), z14.VnS()); + __ movprfx(z14.VnS(), p3.Zeroing(), z5.VnS()); + __ uxth(z14.VnS(), p3.Merging(), z14.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z14, z5); - // __ uxtw(z14.VnD(), p3.Merging(), z14.VnD()); + __ movprfx(z14, z5); + __ uxtw(z14.VnD(), p3.Merging(), z14.VnD()); } assm.FinalizeCode(); @@ -330,87 +314,68 @@ TEST(movprfx_negative_aliasing_fp) { { // We have to use the Assembler directly to generate movprfx, so we need // to manually reserve space for the code we're about to emit. - static const size_t kPairCount = 74; + static const size_t kPairCount = 78; CodeBufferCheckScope guard(&assm, kPairCount * 2 * kInstructionSize); - // TODO: Enable once implemented. - // __ movprfx(z17.VnS(), p1.Zeroing(), z12.VnS()); - // __ fabd(z17.VnS(), p1.Merging(), z17.VnS(), z17.VnS()); + __ movprfx(z17.VnS(), p1.Zeroing(), z12.VnS()); + __ fabd(z17.VnS(), p1.Merging(), z17.VnS(), z17.VnS()); __ movprfx(z13, z23); __ fabs(z13.VnS(), p4.Merging(), z13.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z24.VnS(), p5.Merging(), z15.VnS()); - // __ fadd(z24.VnS(), p5.Merging(), z24.VnS(), z24.VnS()); + __ movprfx(z24.VnS(), p5.Merging(), z15.VnS()); + __ fadd(z24.VnS(), p5.Merging(), z24.VnS(), z24.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z28.VnD(), p5.Zeroing(), z14.VnD()); - // __ fcadd(z28.VnD(), p5.Merging(), z28.VnD(), z28.VnD(), 90); + __ movprfx(z28.VnD(), p5.Zeroing(), z14.VnD()); + __ fcadd(z28.VnD(), p5.Merging(), z28.VnD(), z28.VnD(), 90); - // TODO: Enable once implemented. - // __ movprfx(z19, z28); - // __ fcmla(z19.VnH(), z19.VnH(), z11.VnH(), 2, 180); + __ movprfx(z5, z0); + __ fcmla(z5.VnH(), z0.VnH(), z5.VnH(), 2, 180); - // TODO: Enable once implemented. - // __ movprfx(z15, z26); - // __ fcmla(z15.VnH(), z26.VnH(), z15.VnH(), 2, 180); + __ movprfx(z10, z4); + __ fcmla(z10.VnS(), z8.VnS(), z10.VnS(), 1, 270); - // TODO: Enable once implemented. - // __ movprfx(z31, z0); - // __ fcmla(z31.VnS(), z31.VnS(), z0.VnS(), 1, 270); + __ movprfx(z12, z26); + __ fcmla(z12.VnH(), z12.VnH(), z3.VnH(), 2, 180); - // TODO: Enable once implemented. - // __ movprfx(z3, z18); - // __ fcmla(z3.VnS(), z18.VnS(), z3.VnS(), 1, 270); + __ movprfx(z8, z1); + __ fcmla(z8.VnS(), z8.VnS(), z1.VnS(), 1, 270); - // TODO: Enable once implemented. - // __ movprfx(z16.VnD(), p0.Merging(), z13.VnD()); - // __ fcvt(z16.VnD(), p0.Merging(), z16.VnH()); + __ movprfx(z16.VnD(), p0.Merging(), z13.VnD()); + __ fcvt(z16.VnD(), p0.Merging(), z16.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z12.VnD(), p7.Zeroing(), z13.VnD()); - // __ fcvt(z12.VnD(), p7.Merging(), z12.VnS()); + __ movprfx(z12.VnD(), p7.Zeroing(), z13.VnD()); + __ fcvt(z12.VnD(), p7.Merging(), z12.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z14, z26); - // __ fcvt(z14.VnS(), p5.Merging(), z14.VnD()); + __ movprfx(z14, z26); + __ fcvt(z14.VnS(), p5.Merging(), z14.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z26, z2); - // __ fcvt(z26.VnH(), p7.Merging(), z26.VnD()); + __ movprfx(z26, z2); + __ fcvt(z26.VnH(), p7.Merging(), z26.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z25.VnD(), p2.Merging(), z13.VnD()); - // __ fcvtzs(z25.VnD(), p2.Merging(), z25.VnH()); + __ movprfx(z25.VnD(), p2.Merging(), z13.VnD()); + __ fcvtzs(z25.VnD(), p2.Merging(), z25.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z31, z2); - // __ fcvtzs(z31.VnH(), p7.Merging(), z31.VnH()); + __ movprfx(z31, z2); + __ fcvtzs(z31.VnH(), p7.Merging(), z31.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z21.VnD(), p1.Merging(), z7.VnD()); - // __ fcvtzs(z21.VnD(), p1.Merging(), z21.VnS()); + __ movprfx(z21.VnD(), p1.Merging(), z7.VnD()); + __ fcvtzs(z21.VnD(), p1.Merging(), z21.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z5, z17); - // __ fcvtzs(z5.VnS(), p5.Merging(), z5.VnD()); + __ movprfx(z5, z17); + __ fcvtzs(z5.VnS(), p5.Merging(), z5.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z19.VnD(), p1.Zeroing(), z16.VnD()); - // __ fcvtzu(z19.VnD(), p1.Merging(), z19.VnH()); + __ movprfx(z19.VnD(), p1.Zeroing(), z16.VnD()); + __ fcvtzu(z19.VnD(), p1.Merging(), z19.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z2.VnH(), p7.Zeroing(), z28.VnH()); - // __ fcvtzu(z2.VnH(), p7.Merging(), z2.VnH()); + __ movprfx(z2.VnH(), p7.Zeroing(), z28.VnH()); + __ fcvtzu(z2.VnH(), p7.Merging(), z2.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z21.VnD(), p7.Zeroing(), z27.VnD()); - // __ fcvtzu(z21.VnD(), p7.Merging(), z21.VnS()); + __ movprfx(z21.VnD(), p7.Zeroing(), z27.VnD()); + __ fcvtzu(z21.VnD(), p7.Merging(), z21.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z22.VnD(), p4.Zeroing(), z8.VnD()); - // __ fcvtzu(z22.VnS(), p4.Merging(), z22.VnD()); + __ movprfx(z22.VnD(), p4.Zeroing(), z8.VnD()); + __ fcvtzu(z22.VnS(), p4.Merging(), z22.VnD()); __ movprfx(z0.VnS(), p5.Merging(), z5.VnS()); __ fdiv(z0.VnS(), p5.Merging(), z0.VnS(), z0.VnS()); @@ -424,21 +389,29 @@ TEST(movprfx_negative_aliasing_fp) { __ movprfx(z2.VnS(), p5.Zeroing(), z10.VnS()); __ fmad(z2.VnS(), p5.Merging(), z14.VnS(), z2.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z24, z5); - // __ fmax(z24.VnS(), p1.Merging(), z24.VnS(), z24.VnS()); + __ movprfx(z24, z5); + __ fmax(z24.VnS(), p1.Merging(), z24.VnS(), z24.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z15.VnD(), p2.Merging(), z26.VnD()); - // __ fmaxnm(z15.VnD(), p2.Merging(), z15.VnD(), z15.VnD()); + __ movprfx(z15.VnD(), p2.Merging(), z26.VnD()); + __ fmaxnm(z15.VnD(), p2.Merging(), z15.VnD(), z15.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z20, z22); - // __ fmin(z20.VnH(), p0.Merging(), z20.VnH(), z20.VnH()); + __ movprfx(z20, z22); + __ fmin(z20.VnH(), p0.Merging(), z20.VnH(), z20.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z24.VnS(), p6.Zeroing(), z30.VnS()); - // __ fminnm(z24.VnS(), p6.Merging(), z24.VnS(), z24.VnS()); + __ movprfx(z24.VnS(), p6.Zeroing(), z30.VnS()); + __ fminnm(z24.VnS(), p6.Merging(), z24.VnS(), z24.VnS()); + + __ movprfx(z4, z24); + __ fmla(z4.VnH(), z24.VnH(), z4.VnH(), 7); + + __ movprfx(z4, z7); + __ fmla(z4.VnS(), z24.VnS(), z4.VnS(), 3); + + __ movprfx(z5, z28); + __ fmla(z5.VnD(), z28.VnD(), z5.VnD(), 1); + + __ movprfx(z24, z2); + __ fmla(z24.VnD(), z24.VnD(), z2.VnD(), 1); __ movprfx(z7, z21); __ fmla(z7.VnH(), p2.Merging(), z7.VnH(), z31.VnH()); @@ -446,21 +419,23 @@ TEST(movprfx_negative_aliasing_fp) { __ movprfx(z25.VnH(), p5.Zeroing(), z29.VnH()); __ fmla(z25.VnH(), p5.Merging(), z29.VnH(), z25.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z27, z29); - // __ fmla(z27.VnH(), z27.VnH(), z1.VnH(), 7); + __ movprfx(z31, z25); + __ fmla(z31.VnH(), z31.VnH(), z2.VnH(), 7); - // TODO: Enable once implemented. - // __ movprfx(z0, z21); - // __ fmla(z0.VnH(), z21.VnH(), z0.VnH(), 7); + __ movprfx(z15, z4); + __ fmla(z15.VnS(), z15.VnS(), z4.VnS(), 3); - // TODO: Enable once implemented. - // __ movprfx(z9, z20); - // __ fmla(z9.VnS(), z9.VnS(), z20.VnS(), 1); + __ movprfx(z7, z11); + __ fmls(z7.VnH(), z11.VnH(), z7.VnH(), 4); - // TODO: Enable once implemented. - // __ movprfx(z31, z3); - // __ fmla(z31.VnD(), z3.VnD(), z31.VnD(), 1); + __ movprfx(z3, z10); + __ fmls(z3.VnS(), z10.VnS(), z3.VnS(), 3); + + __ movprfx(z5, z16); + __ fmls(z5.VnD(), z16.VnD(), z5.VnD(), 1); + + __ movprfx(z31, z26); + __ fmls(z31.VnD(), z31.VnD(), z8.VnD(), 1); __ movprfx(z5.VnH(), p3.Merging(), z2.VnH()); __ fmls(z5.VnH(), p3.Merging(), z5.VnH(), z2.VnH()); @@ -468,21 +443,11 @@ TEST(movprfx_negative_aliasing_fp) { __ movprfx(z22.VnS(), p3.Zeroing(), z17.VnS()); __ fmls(z22.VnS(), p3.Merging(), z21.VnS(), z22.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z27, z24); - // __ fmls(z27.VnH(), z27.VnH(), z24.VnH(), 4); - - // TODO: Enable once implemented. - // __ movprfx(z1, z29); - // __ fmls(z1.VnH(), z21.VnH(), z1.VnH(), 4); - - // TODO: Enable once implemented. - // __ movprfx(z4, z12); - // __ fmls(z4.VnS(), z4.VnS(), z14.VnS(), 1); + __ movprfx(z17, z2); + __ fmls(z17.VnH(), z17.VnH(), z2.VnH(), 4); - // TODO: Enable once implemented. - // __ movprfx(z14, z17); - // __ fmls(z14.VnD(), z17.VnD(), z14.VnD(), 1); + __ movprfx(z28, z11); + __ fmls(z28.VnS(), z28.VnS(), z0.VnS(), 3); __ movprfx(z15.VnD(), p1.Merging(), z31.VnD()); __ fmsb(z15.VnD(), p1.Merging(), z15.VnD(), z31.VnD()); @@ -490,13 +455,11 @@ TEST(movprfx_negative_aliasing_fp) { __ movprfx(z21.VnD(), p0.Zeroing(), z5.VnD()); __ fmsb(z21.VnD(), p0.Merging(), z19.VnD(), z21.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z0.VnH(), p3.Merging(), z31.VnH()); - // __ fmul(z0.VnH(), p3.Merging(), z0.VnH(), z0.VnH()); + __ movprfx(z0.VnH(), p3.Merging(), z31.VnH()); + __ fmul(z0.VnH(), p3.Merging(), z0.VnH(), z0.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z31.VnH(), p6.Merging(), z8.VnH()); - // __ fmulx(z31.VnH(), p6.Merging(), z31.VnH(), z31.VnH()); + __ movprfx(z31.VnH(), p6.Merging(), z8.VnH()); + __ fmulx(z31.VnH(), p6.Merging(), z31.VnH(), z31.VnH()); __ movprfx(z17.VnH(), p1.Zeroing(), z10.VnH()); __ fneg(z17.VnH(), p1.Merging(), z17.VnH()); @@ -525,89 +488,68 @@ TEST(movprfx_negative_aliasing_fp) { __ movprfx(z29.VnH(), p2.Zeroing(), z24.VnH()); __ fnmsb(z29.VnH(), p2.Merging(), z24.VnH(), z29.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z7.VnH(), p6.Merging(), z23.VnH()); - // __ frecpx(z7.VnH(), p6.Merging(), z7.VnH()); + __ movprfx(z7.VnH(), p6.Merging(), z23.VnH()); + __ frecpx(z7.VnH(), p6.Merging(), z7.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z17.VnS(), p5.Zeroing(), z2.VnS()); - // __ frinta(z17.VnS(), p5.Merging(), z17.VnS()); + __ movprfx(z17.VnS(), p5.Zeroing(), z2.VnS()); + __ frinta(z17.VnS(), p5.Merging(), z17.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z0.VnS(), p2.Zeroing(), z7.VnS()); - // __ frinti(z0.VnS(), p2.Merging(), z0.VnS()); + __ movprfx(z0.VnS(), p2.Zeroing(), z7.VnS()); + __ frinti(z0.VnS(), p2.Merging(), z0.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z8.VnH(), p3.Merging(), z20.VnH()); - // __ frintm(z8.VnH(), p3.Merging(), z8.VnH()); + __ movprfx(z8.VnH(), p3.Merging(), z20.VnH()); + __ frintm(z8.VnH(), p3.Merging(), z8.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z3.VnD(), p2.Zeroing(), z20.VnD()); - // __ frintn(z3.VnD(), p2.Merging(), z3.VnD()); + __ movprfx(z3.VnD(), p2.Zeroing(), z20.VnD()); + __ frintn(z3.VnD(), p2.Merging(), z3.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z11, z3); - // __ frintp(z11.VnS(), p4.Merging(), z11.VnS()); + __ movprfx(z11, z3); + __ frintp(z11.VnS(), p4.Merging(), z11.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z23, z29); - // __ frintx(z23.VnD(), p4.Merging(), z23.VnD()); + __ movprfx(z23, z29); + __ frintx(z23.VnD(), p4.Merging(), z23.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z4.VnH(), p4.Zeroing(), z14.VnH()); - // __ frintz(z4.VnH(), p4.Merging(), z4.VnH()); + __ movprfx(z4.VnH(), p4.Zeroing(), z14.VnH()); + __ frintz(z4.VnH(), p4.Merging(), z4.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z18.VnH(), p3.Zeroing(), z0.VnH()); - // __ fscale(z18.VnH(), p3.Merging(), z18.VnH(), z18.VnH()); + __ movprfx(z18.VnH(), p3.Zeroing(), z0.VnH()); + __ fscale(z18.VnH(), p3.Merging(), z18.VnH(), z18.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z2.VnS(), p6.Zeroing(), z4.VnS()); - // __ fsqrt(z2.VnS(), p6.Merging(), z2.VnS()); + __ movprfx(z2.VnS(), p6.Zeroing(), z4.VnS()); + __ fsqrt(z2.VnS(), p6.Merging(), z2.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z14.VnD(), p4.Zeroing(), z31.VnD()); - // __ fsub(z14.VnD(), p4.Merging(), z14.VnD(), z14.VnD()); + __ movprfx(z14.VnD(), p4.Zeroing(), z31.VnD()); + __ fsub(z14.VnD(), p4.Merging(), z14.VnD(), z14.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z31.VnH(), p2.Merging(), z6.VnH()); - // __ fsubr(z31.VnH(), p2.Merging(), z31.VnH(), z31.VnH()); + __ movprfx(z31.VnH(), p2.Merging(), z6.VnH()); + __ fsubr(z31.VnH(), p2.Merging(), z31.VnH(), z31.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z4, z30); - // __ ftmad(z4.VnH(), z4.VnH(), z4.VnH(), 2); + __ movprfx(z4, z30); + __ ftmad(z4.VnH(), z4.VnH(), z4.VnH(), 2); - // TODO: Enable once implemented. - // __ movprfx(z0.VnD(), p1.Merging(), z6.VnD()); - // __ scvtf(z0.VnD(), p1.Merging(), z0.VnS());; + __ movprfx(z25.VnD(), p6.Zeroing(), z2.VnD()); + __ scvtf(z25.VnD(), p6.Merging(), z25.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z7.VnS(), p1.Zeroing(), z31.VnS()); - // __ scvtf(z7.VnS(), p1.Merging(), z7.VnS());; + __ movprfx(z0.VnD(), p3.Merging(), z16.VnD()); + __ scvtf(z0.VnD(), p3.Merging(), z0.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z10, z30); - // __ scvtf(z10.VnH(), p7.Merging(), z10.VnS());; + __ movprfx(z19, z23); + __ scvtf(z19.VnS(), p7.Merging(), z19.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z4, z31); - // __ scvtf(z4.VnH(), p4.Merging(), z4.VnD());; + __ movprfx(z19, z4); + __ scvtf(z19.VnH(), p4.Merging(), z19.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z26.VnD(), p6.Merging(), z23.VnD()); - // __ ucvtf(z26.VnD(), p6.Merging(), z26.VnS());; + __ movprfx(z13.VnD(), p4.Zeroing(), z6.VnD()); + __ ucvtf(z13.VnD(), p4.Merging(), z13.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z31.VnD(), p3.Zeroing(), z30.VnD()); - // __ ucvtf(z31.VnD(), p3.Merging(), z31.VnD());; + __ movprfx(z6.VnH(), p0.Zeroing(), z14.VnH()); + __ ucvtf(z6.VnH(), p0.Merging(), z6.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z15, z2); - // __ ucvtf(z15.VnH(), p5.Merging(), z15.VnS());; + __ movprfx(z19.VnS(), p4.Merging(), z12.VnS()); + __ ucvtf(z19.VnH(), p4.Merging(), z19.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z14.VnD(), p1.Zeroing(), z13.VnD()); - // __ ucvtf(z14.VnH(), p1.Merging(), z14.VnD());; + __ movprfx(z0.VnD(), p5.Zeroing(), z12.VnD()); + __ ucvtf(z0.VnH(), p5.Merging(), z0.VnD()); } assm.FinalizeCode(); @@ -652,14 +594,12 @@ TEST(movprfx_negative_instructions) { // This looks like a merging unary operation, but it's actually an alias of // sel, which isn't destructive. - // TODO: Enable once implemented. - // __ movprfx(z0, z18); - // __ mov(z0.VnS(), p6.Merging(), z18.VnS()); + __ movprfx(z0, z18); + __ mov(z0.VnS(), p6.Merging(), z18.VnS()); // The merging form can take movprfx, but the zeroing form cannot. - // TODO: Enable once implemented. - // __ movprfx(z12.VnS(), p2.Merging(), z11.VnS()); - // __ mov(z12.VnS(), p2.Zeroing(), -42); + __ movprfx(z12.VnS(), p2.Merging(), z11.VnS()); + __ mov(z12.VnS(), p2.Zeroing(), -42); __ movprfx(z13, z6); __ movprfx(z13, z2); @@ -684,7 +624,7 @@ TEST(movprfx_negative_lane_size) { { // We have to use the Assembler directly to generate movprfx, so we need // to manually reserve space for the code we're about to emit. - static const size_t kPairCount = 64; + static const size_t kPairCount = 63; CodeBufferCheckScope guard(&assm, kPairCount * 2 * kInstructionSize); __ movprfx(z0.VnH(), p2.Zeroing(), z17.VnH()); @@ -696,24 +636,20 @@ TEST(movprfx_negative_lane_size) { __ movprfx(z25.VnS(), p4.Zeroing(), z26.VnS()); __ and_(z25.VnB(), p4.Merging(), z25.VnB(), z27.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z26.VnD(), p5.Merging(), z23.VnD()); - // __ asr(z26.VnB(), p5.Merging(), z26.VnB(), 3); + __ movprfx(z26.VnD(), p5.Merging(), z23.VnD()); + __ asr(z26.VnB(), p5.Merging(), z26.VnB(), 3); __ movprfx(z25.VnS(), p7.Zeroing(), z14.VnS()); __ asr(z25.VnH(), p7.Merging(), z25.VnH(), z14.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z12.VnS(), p7.Zeroing(), z23.VnS()); - // __ asr(z12.VnH(), p7.Merging(), z12.VnH(), z23.VnD()); + __ movprfx(z12.VnS(), p7.Zeroing(), z23.VnS()); + __ asr(z12.VnH(), p7.Merging(), z12.VnH(), z23.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z3.VnH(), p4.Zeroing(), z18.VnH()); - // __ asr(z3.VnD(), p4.Merging(), z3.VnD(), z15.VnD()); + __ movprfx(z3.VnH(), p4.Zeroing(), z18.VnH()); + __ asr(z3.VnD(), p4.Merging(), z3.VnD(), z15.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z29.VnH(), p4.Merging(), z31.VnH()); - // __ asrd(z29.VnB(), p4.Merging(), z29.VnB(), 3); + __ movprfx(z29.VnH(), p4.Merging(), z31.VnH()); + __ asrd(z29.VnB(), p4.Merging(), z29.VnB(), 3); __ movprfx(z31.VnH(), p5.Zeroing(), z14.VnH()); __ asrr(z31.VnB(), p5.Merging(), z31.VnB(), z5.VnB()); @@ -736,49 +672,41 @@ TEST(movprfx_negative_lane_size) { __ movprfx(z29.VnS(), p0.Merging(), z7.VnS()); __ cpy(z29.VnD(), p0.Merging(), -42); - // TODO: Enable once implemented. - // __ movprfx(z13.VnB(), p2.Merging(), z31.VnB()); - // __ cpy(z13.VnS(), p2.Merging(), w13); + __ movprfx(z13.VnB(), p2.Merging(), z31.VnB()); + __ cpy(z13.VnS(), p2.Merging(), w13); - // TODO: Enable once implemented. - // __ movprfx(z0.VnS(), p3.Merging(), z15.VnS()); - // __ cpy(z0.VnH(), p3.Merging(), h0); + __ movprfx(z0.VnS(), p3.Merging(), z15.VnS()); + __ cpy(z0.VnH(), p3.Merging(), h0); __ movprfx(z2.VnD(), p6.Zeroing(), z26.VnD()); __ eor(z2.VnB(), p6.Merging(), z2.VnB(), z26.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z7.VnS(), p7.Zeroing(), z30.VnS()); - // __ lsl(z7.VnD(), p7.Merging(), z7.VnD(), 3); + __ movprfx(z7.VnS(), p7.Zeroing(), z30.VnS()); + __ lsl(z7.VnD(), p7.Merging(), z7.VnD(), 3); __ movprfx(z11.VnH(), p3.Merging(), z23.VnH()); __ lsl(z11.VnB(), p3.Merging(), z11.VnB(), z21.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z31.VnS(), p7.Zeroing(), z21.VnS()); - // __ lsl(z31.VnH(), p7.Merging(), z31.VnH(), z21.VnD()); + __ movprfx(z31.VnS(), p7.Zeroing(), z21.VnS()); + __ lsl(z31.VnH(), p7.Merging(), z31.VnH(), z21.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z26.VnH(), p0.Merging(), z0.VnH()); - // __ lsl(z26.VnD(), p0.Merging(), z26.VnD(), z24.VnD()); + __ movprfx(z26.VnH(), p0.Merging(), z0.VnH()); + __ lsl(z26.VnD(), p0.Merging(), z26.VnD(), z24.VnD()); __ movprfx(z1.VnS(), p2.Zeroing(), z6.VnS()); __ lslr(z1.VnB(), p2.Merging(), z1.VnB(), z6.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z4.VnD(), p4.Zeroing(), z6.VnD()); - // __ lsr(z4.VnH(), p4.Merging(), z4.VnH(), 3); + __ movprfx(z4.VnD(), p4.Zeroing(), z6.VnD()); + __ lsr(z4.VnH(), p4.Merging(), z4.VnH(), 3); __ movprfx(z27.VnH(), p0.Zeroing(), z29.VnH()); __ lsr(z27.VnS(), p0.Merging(), z27.VnS(), z29.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z5.VnD(), p2.Zeroing(), z16.VnD()); - // __ lsr(z5.VnH(), p2.Merging(), z5.VnH(), z2.VnD()); + __ movprfx(z5.VnD(), p2.Zeroing(), z16.VnD()); + __ lsr(z5.VnH(), p2.Merging(), z5.VnH(), z2.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z27.VnB(), p4.Zeroing(), z5.VnB()); - // __ lsr(z27.VnD(), p4.Merging(), z27.VnD(), z5.VnD()); + __ movprfx(z27.VnB(), p4.Zeroing(), z5.VnB()); + __ lsr(z27.VnD(), p4.Merging(), z27.VnD(), z5.VnD()); __ movprfx(z27.VnS(), p3.Merging(), z13.VnS()); __ lsrr(z27.VnD(), p3.Merging(), z27.VnD(), z13.VnD()); @@ -793,17 +721,14 @@ TEST(movprfx_negative_lane_size) { __ mls(z28.VnS(), p2.Merging(), z3.VnS(), z22.VnS()); // Aliases of cpy. - // TODO: Enable once implemented. - // __ movprfx(z18.VnH(), p6.Zeroing(), z25.VnH()); - // __ mov(z18.VnD(), p6.Merging(), -42); + __ movprfx(z18.VnH(), p6.Zeroing(), z25.VnH()); + __ mov(z18.VnD(), p6.Merging(), -42); - // TODO: Enable once implemented. - // __ movprfx(z22.VnD(), p2.Zeroing(), z6.VnD()); - // __ mov(z22.VnS(), p2.Merging(), w22); + __ movprfx(z22.VnD(), p2.Zeroing(), z6.VnD()); + __ mov(z22.VnS(), p2.Merging(), w22); - // TODO: Enable once implemented. - // __ movprfx(z3.VnH(), p0.Zeroing(), z13.VnH()); - // __ mov(z3.VnB(), p0.Merging(), b0); + __ movprfx(z3.VnH(), p0.Zeroing(), z13.VnH()); + __ mov(z3.VnB(), p0.Merging(), b0); __ movprfx(z31.VnS(), p7.Zeroing(), z12.VnS()); __ msb(z31.VnH(), p7.Merging(), z14.VnH(), z12.VnH()); @@ -820,32 +745,26 @@ TEST(movprfx_negative_lane_size) { __ movprfx(z9.VnH(), p3.Zeroing(), z23.VnH()); __ orr(z9.VnS(), p3.Merging(), z9.VnS(), z13.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z25.VnD(), p2.Zeroing(), z21.VnD()); - // __ rbit(z25.VnS(), p2.Merging(), z21.VnS()); + __ movprfx(z25.VnD(), p2.Zeroing(), z21.VnD()); + __ rbit(z25.VnS(), p2.Merging(), z21.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z26.VnH(), p3.Merging(), z13.VnH()); - // __ revb(z26.VnD(), p3.Merging(), z13.VnD()); + __ movprfx(z26.VnH(), p3.Merging(), z13.VnH()); + __ revb(z26.VnD(), p3.Merging(), z13.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z8.VnH(), p5.Merging(), z20.VnH()); - // __ revh(z8.VnS(), p5.Merging(), z0.VnS()); + __ movprfx(z8.VnH(), p5.Merging(), z20.VnH()); + __ revh(z8.VnS(), p5.Merging(), z0.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z22.VnH(), p6.Merging(), z15.VnH()); - // __ revw(z22.VnD(), p6.Merging(), z10.VnD()); + __ movprfx(z22.VnH(), p6.Merging(), z15.VnH()); + __ revw(z22.VnD(), p6.Merging(), z10.VnD()); __ movprfx(z1.VnD(), p3.Merging(), z15.VnD()); __ sabd(z1.VnB(), p3.Merging(), z1.VnB(), z15.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z25.VnD(), p1.Zeroing(), z30.VnD()); - // __ sdiv(z25.VnS(), p1.Merging(), z25.VnS(), z30.VnS()); + __ movprfx(z25.VnD(), p1.Zeroing(), z30.VnD()); + __ sdiv(z25.VnS(), p1.Merging(), z25.VnS(), z30.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z19.VnS(), p3.Zeroing(), z11.VnS()); - // __ sdivr(z19.VnD(), p3.Merging(), z19.VnD(), z24.VnD()); + __ movprfx(z19.VnS(), p3.Zeroing(), z11.VnS()); + __ sdivr(z19.VnD(), p3.Merging(), z19.VnD(), z24.VnD()); __ movprfx(z12.VnH(), p2.Merging(), z2.VnH()); __ smax(z12.VnS(), p2.Merging(), z12.VnS(), z24.VnS()); @@ -856,10 +775,6 @@ TEST(movprfx_negative_lane_size) { __ movprfx(z13.VnS(), p5.Merging(), z22.VnS()); __ smulh(z13.VnB(), p5.Merging(), z13.VnB(), z27.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z9.VnB(), p0.Merging(), z19.VnB()); - // __ splice(z9.VnH(), p0.Merging(), z9.VnH(), z19.VnH()); - __ movprfx(z11.VnH(), p5.Zeroing(), z25.VnH()); __ sub(z11.VnB(), p5.Merging(), z11.VnB(), z7.VnB()); @@ -869,24 +784,20 @@ TEST(movprfx_negative_lane_size) { __ movprfx(z26.VnH(), p5.Merging(), z1.VnH()); __ sxtb(z26.VnS(), p5.Merging(), z17.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z11.VnB(), p7.Zeroing(), z26.VnB()); - // __ sxth(z11.VnS(), p7.Merging(), z26.VnS()); + __ movprfx(z11.VnB(), p7.Zeroing(), z26.VnB()); + __ sxth(z11.VnS(), p7.Merging(), z26.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z1.VnS(), p2.Merging(), z21.VnS()); - // __ sxtw(z1.VnD(), p2.Merging(), z21.VnD()); + __ movprfx(z1.VnS(), p2.Merging(), z21.VnS()); + __ sxtw(z1.VnD(), p2.Merging(), z21.VnD()); __ movprfx(z4.VnS(), p6.Zeroing(), z6.VnS()); __ uabd(z4.VnH(), p6.Merging(), z4.VnH(), z6.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z26.VnB(), p2.Zeroing(), z11.VnB()); - // __ udiv(z26.VnD(), p2.Merging(), z26.VnD(), z11.VnD()); + __ movprfx(z26.VnB(), p2.Zeroing(), z11.VnB()); + __ udiv(z26.VnD(), p2.Merging(), z26.VnD(), z11.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z19.VnB(), p5.Merging(), z6.VnB()); - // __ udivr(z19.VnS(), p5.Merging(), z19.VnS(), z9.VnS()); + __ movprfx(z19.VnB(), p5.Merging(), z6.VnB()); + __ udivr(z19.VnS(), p5.Merging(), z19.VnS(), z9.VnS()); __ movprfx(z16.VnB(), p4.Merging(), z6.VnB()); __ umax(z16.VnH(), p4.Merging(), z16.VnH(), z6.VnH()); @@ -900,13 +811,11 @@ TEST(movprfx_negative_lane_size) { __ movprfx(z29.VnB(), p4.Merging(), z2.VnB()); __ uxtb(z29.VnS(), p4.Merging(), z31.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z27.VnH(), p5.Merging(), z21.VnH()); - // __ uxth(z27.VnD(), p5.Merging(), z1.VnD()); + __ movprfx(z27.VnH(), p5.Merging(), z21.VnH()); + __ uxth(z27.VnD(), p5.Merging(), z1.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z29.VnB(), p2.Merging(), z7.VnB()); - // __ uxtw(z29.VnD(), p2.Merging(), z7.VnD()); + __ movprfx(z29.VnB(), p2.Merging(), z7.VnB()); + __ uxtw(z29.VnD(), p2.Merging(), z7.VnD()); } assm.FinalizeCode(); @@ -924,76 +833,59 @@ TEST(movprfx_negative_lane_size_fp) { static const size_t kPairCount = 64; CodeBufferCheckScope guard(&assm, kPairCount * 2 * kInstructionSize); - // TODO: Enable once implemented. - // __ movprfx(z29.VnD(), p5.Zeroing(), z8.VnD()); - // __ fabd(z29.VnS(), p5.Merging(), z29.VnS(), z26.VnS()); + __ movprfx(z29.VnD(), p5.Zeroing(), z8.VnD()); + __ fabd(z29.VnS(), p5.Merging(), z29.VnS(), z26.VnS()); __ movprfx(z9.VnB(), p0.Zeroing(), z1.VnB()); __ fabs(z9.VnS(), p0.Merging(), z15.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z24.VnD(), p0.Zeroing(), z8.VnD()); - // __ fadd(z24.VnH(), p0.Merging(), z24.VnH(), 0.5); + __ movprfx(z24.VnD(), p0.Zeroing(), z8.VnD()); + __ fadd(z24.VnH(), p0.Merging(), z24.VnH(), 0.5); - // TODO: Enable once implemented. - // __ movprfx(z24.VnB(), p1.Zeroing(), z27.VnB()); - // __ fadd(z24.VnH(), p1.Merging(), z24.VnH(), z27.VnH()); + __ movprfx(z24.VnB(), p1.Zeroing(), z27.VnB()); + __ fadd(z24.VnH(), p1.Merging(), z24.VnH(), z27.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z14.VnH(), p7.Merging(), z12.VnH()); - // __ fcadd(z14.VnD(), p7.Merging(), z14.VnD(), z12.VnD(), 90); + __ movprfx(z14.VnH(), p7.Merging(), z12.VnH()); + __ fcadd(z14.VnD(), p7.Merging(), z14.VnD(), z12.VnD(), 90); - // TODO: Enable once implemented. - // __ movprfx(z10.VnB(), p6.Merging(), z11.VnB()); - // __ fcpy(z10.VnH(), p6.Merging(), 1.25); + __ movprfx(z10.VnB(), p6.Merging(), z11.VnB()); + __ fcpy(z10.VnH(), p6.Merging(), 1.25); - // TODO: Enable once implemented. - // __ movprfx(z12.VnB(), p6.Merging(), z18.VnB()); - // __ fcvt(z12.VnD(), p6.Merging(), z18.VnH()); + __ movprfx(z12.VnB(), p6.Merging(), z18.VnB()); + __ fcvt(z12.VnD(), p6.Merging(), z18.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z18.VnH(), p7.Zeroing(), z2.VnH()); - // __ fcvt(z18.VnD(), p7.Merging(), z0.VnS()); + __ movprfx(z18.VnH(), p7.Zeroing(), z2.VnH()); + __ fcvt(z18.VnD(), p7.Merging(), z0.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z3.VnH(), p5.Merging(), z14.VnH()); - // __ fcvt(z3.VnS(), p5.Merging(), z21.VnD()); + __ movprfx(z3.VnH(), p5.Merging(), z14.VnH()); + __ fcvt(z3.VnS(), p5.Merging(), z21.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z15.VnH(), p1.Zeroing(), z12.VnH()); - // __ fcvt(z15.VnH(), p1.Merging(), z12.VnD()); + __ movprfx(z15.VnH(), p1.Zeroing(), z12.VnH()); + __ fcvt(z15.VnH(), p1.Merging(), z12.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z3.VnH(), p2.Merging(), z22.VnH()); - // __ fcvtzs(z3.VnD(), p2.Merging(), z7.VnH()); + __ movprfx(z3.VnH(), p2.Merging(), z22.VnH()); + __ fcvtzs(z3.VnD(), p2.Merging(), z7.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z17.VnS(), p3.Merging(), z14.VnS()); - // __ fcvtzs(z17.VnD(), p3.Merging(), z14.VnD()); + __ movprfx(z17.VnS(), p3.Merging(), z14.VnS()); + __ fcvtzs(z17.VnD(), p3.Merging(), z14.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z2.VnH(), p1.Zeroing(), z16.VnH()); - // __ fcvtzs(z2.VnS(), p1.Merging(), z31.VnH()); + __ movprfx(z2.VnH(), p1.Zeroing(), z16.VnH()); + __ fcvtzs(z2.VnS(), p1.Merging(), z31.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z13.VnB(), p2.Merging(), z9.VnB()); - // __ fcvtzs(z13.VnS(), p2.Merging(), z23.VnD()); + __ movprfx(z13.VnB(), p2.Merging(), z9.VnB()); + __ fcvtzs(z13.VnS(), p2.Merging(), z23.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z19.VnB(), p1.Merging(), z4.VnB()); - // __ fcvtzu(z19.VnD(), p1.Merging(), z14.VnH()); + __ movprfx(z19.VnB(), p1.Merging(), z4.VnB()); + __ fcvtzu(z19.VnD(), p1.Merging(), z14.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z29.VnS(), p2.Merging(), z19.VnS()); - // __ fcvtzu(z29.VnD(), p2.Merging(), z19.VnD()); + __ movprfx(z29.VnS(), p2.Merging(), z19.VnS()); + __ fcvtzu(z29.VnD(), p2.Merging(), z19.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z21.VnS(), p4.Zeroing(), z17.VnS()); - // __ fcvtzu(z21.VnD(), p4.Merging(), z17.VnS()); + __ movprfx(z21.VnS(), p4.Zeroing(), z17.VnS()); + __ fcvtzu(z21.VnD(), p4.Merging(), z17.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z19.VnH(), p4.Zeroing(), z30.VnH()); - // __ fcvtzu(z19.VnS(), p4.Merging(), z16.VnD()); + __ movprfx(z19.VnH(), p4.Zeroing(), z30.VnH()); + __ fcvtzu(z19.VnS(), p4.Merging(), z16.VnD()); __ movprfx(z10.VnS(), p7.Zeroing(), z27.VnS()); __ fdiv(z10.VnH(), p7.Merging(), z10.VnH(), z27.VnH()); @@ -1004,37 +896,29 @@ TEST(movprfx_negative_lane_size_fp) { __ movprfx(z22.VnB(), p0.Merging(), z27.VnB()); __ fmad(z22.VnH(), p0.Merging(), z27.VnH(), z15.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z14.VnD(), p1.Zeroing(), z11.VnD()); - // __ fmax(z14.VnS(), p1.Merging(), z14.VnS(), 0.0); + __ movprfx(z14.VnD(), p1.Zeroing(), z11.VnD()); + __ fmax(z14.VnS(), p1.Merging(), z14.VnS(), 0.0); - // TODO: Enable once implemented. - // __ movprfx(z27.VnB(), p5.Merging(), z14.VnB()); - // __ fmax(z27.VnD(), p5.Merging(), z27.VnD(), z14.VnD()); + __ movprfx(z27.VnB(), p5.Merging(), z14.VnB()); + __ fmax(z27.VnD(), p5.Merging(), z27.VnD(), z14.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z31.VnH(), p7.Merging(), z24.VnH()); - // __ fmaxnm(z31.VnD(), p7.Merging(), z31.VnD(), 0.0); + __ movprfx(z31.VnH(), p7.Merging(), z24.VnH()); + __ fmaxnm(z31.VnD(), p7.Merging(), z31.VnD(), 0.0); - // TODO: Enable once implemented. - // __ movprfx(z11.VnD(), p7.Zeroing(), z25.VnD()); - // __ fmaxnm(z11.VnS(), p7.Merging(), z11.VnS(), z28.VnS()); + __ movprfx(z11.VnD(), p7.Zeroing(), z25.VnD()); + __ fmaxnm(z11.VnS(), p7.Merging(), z11.VnS(), z28.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z31.VnD(), p6.Merging(), z19.VnD()); - // __ fmin(z31.VnH(), p6.Merging(), z31.VnH(), 0.0); + __ movprfx(z31.VnD(), p6.Merging(), z19.VnD()); + __ fmin(z31.VnH(), p6.Merging(), z31.VnH(), 0.0); - // TODO: Enable once implemented. - // __ movprfx(z20.VnS(), p3.Zeroing(), z15.VnS()); - // __ fmin(z20.VnH(), p3.Merging(), z20.VnH(), z8.VnH()); + __ movprfx(z20.VnS(), p3.Zeroing(), z15.VnS()); + __ fmin(z20.VnH(), p3.Merging(), z20.VnH(), z8.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z6.VnS(), p0.Merging(), z30.VnS()); - // __ fminnm(z6.VnH(), p0.Merging(), z6.VnH(), 0.0); + __ movprfx(z6.VnS(), p0.Merging(), z30.VnS()); + __ fminnm(z6.VnH(), p0.Merging(), z6.VnH(), 0.0); - // TODO: Enable once implemented. - // __ movprfx(z1.VnH(), p1.Zeroing(), z14.VnH()); - // __ fminnm(z1.VnS(), p1.Merging(), z1.VnS(), z14.VnS()); + __ movprfx(z1.VnH(), p1.Zeroing(), z14.VnH()); + __ fminnm(z1.VnS(), p1.Merging(), z1.VnS(), z14.VnS()); __ movprfx(z13.VnB(), p3.Zeroing(), z21.VnB()); __ fmla(z13.VnD(), p3.Merging(), z12.VnD(), z21.VnD()); @@ -1043,27 +927,23 @@ TEST(movprfx_negative_lane_size_fp) { __ fmls(z15.VnH(), p1.Merging(), z28.VnH(), z20.VnH()); // TODO: Enable once implemented. - // __ movprfx(z18.VnB(), p4.Merging(), z8.VnB()); - // __ fmov(z18.VnD(), p4.Merging(), 0); + // __ movprfx(z19.VnD(), p3.Zeroing(), z31.VnD()); + // __ fmov(z19.VnH(), p3.Merging(), 0.0); - // TODO: Enable once implemented. - // __ movprfx(z16.VnS(), p7.Merging(), z30.VnS()); - // __ fmov(z16.VnH(), p7.Merging(), 2.5); + __ movprfx(z16.VnS(), p7.Merging(), z30.VnS()); + __ fmov(z16.VnH(), p7.Merging(), 2.5); __ movprfx(z21.VnB(), p1.Merging(), z28.VnB()); __ fmsb(z21.VnH(), p1.Merging(), z30.VnH(), z28.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z21.VnS(), p1.Zeroing(), z19.VnS()); - // __ fmul(z21.VnH(), p1.Merging(), z21.VnH(), 2.0); + __ movprfx(z21.VnS(), p1.Zeroing(), z19.VnS()); + __ fmul(z21.VnH(), p1.Merging(), z21.VnH(), 2.0); - // TODO: Enable once implemented. - // __ movprfx(z28.VnB(), p7.Zeroing(), z8.VnB()); - // __ fmul(z28.VnS(), p7.Merging(), z28.VnS(), z26.VnS()); + __ movprfx(z28.VnB(), p7.Zeroing(), z8.VnB()); + __ fmul(z28.VnS(), p7.Merging(), z28.VnS(), z26.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z2.VnB(), p4.Merging(), z31.VnB()); - // __ fmulx(z2.VnH(), p4.Merging(), z2.VnH(), z31.VnH()); + __ movprfx(z2.VnB(), p4.Merging(), z31.VnB()); + __ fmulx(z2.VnH(), p4.Merging(), z2.VnH(), z31.VnH()); __ movprfx(z6.VnB(), p2.Zeroing(), z0.VnB()); __ fneg(z6.VnS(), p2.Merging(), z28.VnS()); @@ -1081,93 +961,71 @@ TEST(movprfx_negative_lane_size_fp) { __ fnmsb(z4.VnS(), p0.Merging(), z30.VnS(), z3.VnS()); // Note that frecpe and frecps _cannot_ take movprfx. - // TODO: Enable once implemented. - // __ movprfx(z9.VnH(), p0.Zeroing(), z21.VnH()); - // __ frecpx(z9.VnS(), p0.Merging(), z14.VnS()); + __ movprfx(z9.VnH(), p0.Zeroing(), z21.VnH()); + __ frecpx(z9.VnS(), p0.Merging(), z14.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z6.VnH(), p2.Zeroing(), z28.VnH()); - // __ frinta(z6.VnD(), p2.Merging(), z28.VnD()); + __ movprfx(z6.VnH(), p2.Zeroing(), z28.VnH()); + __ frinta(z6.VnD(), p2.Merging(), z28.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z12.VnS(), p4.Zeroing(), z7.VnS()); - // __ frinti(z12.VnH(), p4.Merging(), z7.VnH()); + __ movprfx(z12.VnS(), p4.Zeroing(), z7.VnS()); + __ frinti(z12.VnH(), p4.Merging(), z7.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z6.VnB(), p5.Merging(), z20.VnB()); - // __ frintm(z6.VnD(), p5.Merging(), z20.VnD()); + __ movprfx(z6.VnB(), p5.Merging(), z20.VnB()); + __ frintm(z6.VnD(), p5.Merging(), z20.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z7.VnB(), p6.Merging(), z19.VnB()); - // __ frintn(z7.VnH(), p6.Merging(), z11.VnH()); + __ movprfx(z7.VnB(), p6.Merging(), z19.VnB()); + __ frintn(z7.VnH(), p6.Merging(), z11.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z12.VnD(), p2.Merging(), z31.VnD()); - // __ frintp(z12.VnS(), p2.Merging(), z31.VnS()); + __ movprfx(z12.VnD(), p2.Merging(), z31.VnD()); + __ frintp(z12.VnS(), p2.Merging(), z31.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z1.VnS(), p5.Merging(), z10.VnS()); - // __ frintx(z1.VnD(), p5.Merging(), z0.VnD()); + __ movprfx(z1.VnS(), p5.Merging(), z10.VnS()); + __ frintx(z1.VnD(), p5.Merging(), z0.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z6.VnH(), p0.Merging(), z12.VnH()); - // __ frintz(z6.VnS(), p0.Merging(), z7.VnS()); + __ movprfx(z6.VnH(), p0.Merging(), z12.VnH()); + __ frintz(z6.VnS(), p0.Merging(), z7.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z8.VnH(), p2.Merging(), z6.VnH()); - // __ fscale(z8.VnD(), p2.Merging(), z8.VnD(), z6.VnD()); + __ movprfx(z8.VnH(), p2.Merging(), z6.VnH()); + __ fscale(z8.VnD(), p2.Merging(), z8.VnD(), z6.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z20.VnH(), p2.Zeroing(), z2.VnH()); - // __ fsqrt(z20.VnD(), p2.Merging(), z15.VnD()); + __ movprfx(z20.VnH(), p2.Zeroing(), z2.VnH()); + __ fsqrt(z20.VnD(), p2.Merging(), z15.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z28.VnS(), p6.Zeroing(), z19.VnS()); - // __ fsub(z28.VnD(), p6.Merging(), z28.VnD(), 1.0); + __ movprfx(z28.VnS(), p6.Zeroing(), z19.VnS()); + __ fsub(z28.VnD(), p6.Merging(), z28.VnD(), 1.0); - // TODO: Enable once implemented. - // __ movprfx(z6.VnB(), p0.Zeroing(), z12.VnB()); - // __ fsub(z6.VnD(), p0.Merging(), z6.VnD(), z20.VnD()); + __ movprfx(z6.VnB(), p0.Zeroing(), z12.VnB()); + __ fsub(z6.VnD(), p0.Merging(), z6.VnD(), z20.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z6.VnS(), p7.Zeroing(), z11.VnS()); - // __ fsubr(z6.VnH(), p7.Merging(), z6.VnH(), 1.0); + __ movprfx(z6.VnS(), p7.Zeroing(), z11.VnS()); + __ fsubr(z6.VnH(), p7.Merging(), z6.VnH(), 1.0); - // TODO: Enable once implemented. - // __ movprfx(z28.VnB(), p3.Merging(), z10.VnB()); - // __ fsubr(z28.VnS(), p3.Merging(), z28.VnS(), z9.VnS()); + __ movprfx(z28.VnB(), p3.Merging(), z10.VnB()); + __ fsubr(z28.VnS(), p3.Merging(), z28.VnS(), z9.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z19.VnH(), p4.Merging(), z9.VnH()); - // __ scvtf(z19.VnD(), p4.Merging(), z3.VnS());; + __ movprfx(z22.VnB(), p3.Zeroing(), z14.VnB()); + __ scvtf(z22.VnD(), p3.Merging(), z24.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z4.VnS(), p4.Zeroing(), z16.VnS()); - // __ scvtf(z4.VnD(), p4.Merging(), z31.VnD());; + __ movprfx(z20.VnS(), p2.Merging(), z9.VnS()); + __ scvtf(z20.VnH(), p2.Merging(), z9.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z7.VnB(), p4.Zeroing(), z3.VnB()); - // __ scvtf(z7.VnH(), p4.Merging(), z31.VnS());; + __ movprfx(z19.VnH(), p1.Merging(), z21.VnH()); + __ scvtf(z19.VnS(), p1.Merging(), z6.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z11.VnS(), p5.Merging(), z28.VnS()); - // __ scvtf(z11.VnH(), p5.Merging(), z19.VnD());; + __ movprfx(z31.VnS(), p3.Merging(), z22.VnS()); + __ scvtf(z31.VnH(), p3.Merging(), z22.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z26.VnS(), p7.Zeroing(), z13.VnS()); - // __ ucvtf(z26.VnD(), p7.Merging(), z14.VnS());; + __ movprfx(z8.VnS(), p3.Merging(), z3.VnS()); + __ ucvtf(z8.VnD(), p3.Merging(), z1.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z4.VnS(), p4.Zeroing(), z17.VnS()); - // __ ucvtf(z4.VnH(), p4.Merging(), z19.VnH());; + __ movprfx(z0.VnB(), p0.Merging(), z23.VnB()); + __ ucvtf(z0.VnH(), p0.Merging(), z12.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z26.VnD(), p7.Merging(), z9.VnD()); - // __ ucvtf(z26.VnH(), p7.Merging(), z9.VnS());; + __ movprfx(z8.VnH(), p3.Zeroing(), z4.VnH()); + __ ucvtf(z8.VnH(), p3.Merging(), z4.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z18.VnH(), p4.Zeroing(), z4.VnH()); - // __ ucvtf(z18.VnH(), p4.Merging(), z23.VnD());; + __ movprfx(z20.VnH(), p2.Zeroing(), z10.VnH()); + __ ucvtf(z20.VnH(), p2.Merging(), z11.VnD()); } assm.FinalizeCode(); @@ -1182,69 +1040,56 @@ TEST(movprfx_negative_predication) { { // We have to use the Assembler directly to generate movprfx, so we need // to manually reserve space for the code we're about to emit. - static const size_t kPairCount = 51; + static const size_t kPairCount = 54; CodeBufferCheckScope guard(&assm, kPairCount * 2 * kInstructionSize); __ movprfx(z27.VnS(), p1.Zeroing(), z12.VnS()); __ add(z27.VnS(), z27.VnS(), 42); - // TODO: Enable once implemented. - // __ movprfx(z31.VnS(), p6.Zeroing(), z1.VnS()); - // __ and_(z31.VnS(), z31.VnS(), 4); + __ movprfx(z31.VnS(), p6.Zeroing(), z1.VnS()); + __ and_(z31.VnS(), z31.VnS(), 4); - // TODO: Enable once implemented. - // __ movprfx(z27.VnS(), p5.Merging(), z24.VnS()); - // __ bic(z27.VnS(), z27.VnS(), 4); + __ movprfx(z27.VnS(), p5.Merging(), z24.VnS()); + __ bic(z27.VnS(), z27.VnS(), 4); - // TODO: Enable once implemented. - // __ movprfx(z6.VnH(), p7.Merging(), z30.VnH()); - // __ clasta(z6.VnH(), p7, z6.VnH(), z14.VnH()); + __ movprfx(z6.VnH(), p7.Merging(), z30.VnH()); + __ clasta(z6.VnH(), p7, z6.VnH(), z14.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z11.VnB(), p6.Merging(), z5.VnB()); - // __ clastb(z11.VnB(), p6, z11.VnB(), z29.VnB()); + __ movprfx(z11.VnB(), p6.Merging(), z5.VnB()); + __ clastb(z11.VnB(), p6, z11.VnB(), z29.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z27.VnD(), p4.Zeroing(), z10.VnD()); - // __ decd(z27.VnD(), MUL3); + __ movprfx(z5.VnD(), p0.Merging(), z1.VnD()); + __ decd(z5.VnD(), SVE_MUL3); - // TODO: Enable once implemented. - // __ movprfx(z2.VnH(), p5.Zeroing(), z5.VnH()); - // __ dech(z2.VnH(), VL2); + __ movprfx(z11.VnH(), p7.Zeroing(), z28.VnH()); + __ dech(z11.VnH(), SVE_VL2); __ movprfx(z14.VnS(), p5.Zeroing(), z6.VnS()); __ decp(z14.VnS(), p5); - // TODO: Enable once implemented. - // __ movprfx(z31.VnS(), p2.Zeroing(), z7.VnS()); - // __ decw(z31.VnS(), ALL); + __ movprfx(z6.VnS(), p5.Merging(), z10.VnS()); + __ decw(z6.VnS(), SVE_ALL); - // TODO: Enable once implemented. - // __ movprfx(z26.VnH(), p0.Zeroing(), z14.VnH()); - // __ eon_(z26.VnH(), z26.VnH(), 4); + __ movprfx(z27.VnH(), p7.Zeroing(), z9.VnH()); + __ eon(z27.VnH(), z27.VnH(), 4); - // TODO: Enable once implemented. - // __ movprfx(z10.VnS(), p4.Zeroing(), z5.VnS()); - // __ eor_(z10.VnS(), z10.VnS(), 4); + __ movprfx(z3.VnS(), p3.Zeroing(), z2.VnS()); + __ eor(z3.VnS(), z3.VnS(), 4); - // TODO: Enable once implemented. - // __ movprfx(z30.VnB(), p2.Zeroing(), z25.VnB()); - // __ ext(z30.VnB(), z30.VnB(), z25.VnB(), 42); + __ movprfx(z30.VnB(), p2.Zeroing(), z25.VnB()); + __ ext(z30.VnB(), z30.VnB(), z25.VnB(), 42); - // TODO: Enable once implemented. - // __ movprfx(z22.VnD(), p7.Merging(), z19.VnD()); - // __ incd(z22.VnD(), MUL3); + __ movprfx(z22.VnD(), p0.Merging(), z0.VnD()); + __ incd(z22.VnD(), SVE_MUL3); - // TODO: Enable once implemented. - // __ movprfx(z8.VnH(), p5.Zeroing(), z30.VnH()); - // __ inch(z8.VnH(), VL2); + __ movprfx(z7.VnH(), p3.Merging(), z3.VnH()); + __ inch(z7.VnH(), SVE_VL2); __ movprfx(z9.VnD(), p1.Zeroing(), z28.VnD()); __ incp(z9.VnD(), p1); - // TODO: Enable once implemented. - // __ movprfx(z27.VnS(), p1.Merging(), z30.VnS()); - // __ incw(z27.VnS(), ALL); + __ movprfx(z30.VnS(), p3.Merging(), z4.VnS()); + __ incw(z30.VnS(), SVE_ALL); __ movprfx(z30.VnB(), p7.Zeroing(), z21.VnB()); __ insr(z30.VnB(), w30); @@ -1252,70 +1097,62 @@ TEST(movprfx_negative_predication) { __ movprfx(z2.VnB(), p4.Zeroing(), z26.VnB()); __ insr(z2.VnB(), b0); - // TODO: Enable once implemented. - // __ movprfx(z27.VnS(), p5.Zeroing(), z5.VnS()); - // __ mul(z27.VnS(), z27.VnS(), 42); + __ movprfx(z27.VnS(), p5.Zeroing(), z5.VnS()); + __ mul(z27.VnS(), z27.VnS(), 42); - // TODO: Enable once implemented. - // __ movprfx(z5.VnS(), p0.Merging(), z26.VnS()); - // __ orn(z5.VnS(), z5.VnS(), 4); + __ movprfx(z5.VnS(), p0.Merging(), z26.VnS()); + __ orn(z5.VnS(), z5.VnS(), 4); - // TODO: Enable once implemented. - // __ movprfx(z5.VnS(), p0.Merging(), z26.VnS()); - // __ orn(z5.VnS(), z5.VnS(), 4); + __ movprfx(z5.VnS(), p0.Merging(), z26.VnS()); + __ orn(z5.VnS(), z5.VnS(), 4); - __ movprfx(z28.VnD(), p0.Merging(), z14.VnD()); - __ sdot(z28.VnD(), z14.VnH(), z24.VnH()); + __ movprfx(z16.VnD(), p1.Merging(), z13.VnD()); + __ sdot(z16.VnD(), z11.VnH(), z7.VnH(), 1); - // TODO: Enable once implemented. - // __ movprfx(z13.VnD(), p3.Merging(), z1.VnD()); - // __ sdot(z13.VnD(), z23.VnS(), z1.VnS(), 1); + __ movprfx(z27.VnD(), p5.Merging(), z18.VnD()); + __ sdot(z27.VnD(), z18.VnH(), z0.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z19.VnD(), p0.Zeroing(), z7.VnD()); - // __ smax(z19.VnD(), z19.VnD(), 42); + __ movprfx(z20.VnS(), p6.Merging(), z1.VnS()); + __ sdot(z20.VnS(), z10.VnB(), z1.VnB(), 1); - // TODO: Enable once implemented. - // __ movprfx(z15.VnD(), p1.Zeroing(), z7.VnD()); - // __ smin(z15.VnD(), z15.VnD(), 42); + __ movprfx(z19.VnD(), p0.Zeroing(), z7.VnD()); + __ smax(z19.VnD(), z19.VnD(), 42); - // TODO: Enable once implemented. - // __ movprfx(z5.VnB(), p6.Zeroing(), z4.VnB()); - // __ sqadd(z5.VnB(), z5.VnB(), 42); + __ movprfx(z15.VnD(), p1.Zeroing(), z7.VnD()); + __ smin(z15.VnD(), z15.VnD(), 42); - // TODO: Enable once implemented. - // __ movprfx(z19.VnD(), p7.Zeroing(), z4.VnD()); - // __ sqdecd(z19.VnD(), MUL3); + __ movprfx(z15.VnB(), p5.Merging(), z3.VnB()); + __ splice(z15.VnB(), p5, z15.VnB(), z3.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z2.VnH(), p1.Zeroing(), z29.VnH()); - // __ sqdech(z2.VnH(), VL2); + __ movprfx(z5.VnB(), p6.Zeroing(), z4.VnB()); + __ sqadd(z5.VnB(), z5.VnB(), 42); + + __ movprfx(z16.VnD(), p0.Zeroing(), z18.VnD()); + __ sqdecd(z16.VnD(), SVE_MUL3); + + __ movprfx(z7.VnH(), p3.Merging(), z28.VnH()); + __ sqdech(z7.VnH(), SVE_VL2); __ movprfx(z7.VnS(), p2.Merging(), z13.VnS()); __ sqdecp(z7.VnS(), p2); - // TODO: Enable once implemented. - // __ movprfx(z0.VnS(), p2.Merging(), z2.VnS()); - // __ sqdecw(z0.VnS(), ALL); + __ movprfx(z22.VnS(), p7.Zeroing(), z20.VnS()); + __ sqdecw(z22.VnS(), SVE_ALL); - // TODO: Enable once implemented. - // __ movprfx(z30.VnD(), p5.Merging(), z11.VnD()); - // __ sqincd(z30.VnD(), MUL3); + __ movprfx(z26.VnD(), p1.Zeroing(), z0.VnD()); + __ sqincd(z26.VnD(), SVE_MUL3); - // TODO: Enable once implemented. - // __ movprfx(z11.VnH(), p3.Merging(), z23.VnH()); - // __ sqinch(z11.VnH(), VL2); + __ movprfx(z15.VnH(), p7.Zeroing(), z27.VnH()); + __ sqinch(z15.VnH(), SVE_VL2); __ movprfx(z4.VnD(), p7.Merging(), z13.VnD()); __ sqincp(z4.VnD(), p7); - // TODO: Enable once implemented. - // __ movprfx(z29.VnS(), p2.Zeroing(), z23.VnS()); - // __ sqincw(z29.VnS(), ALL); + __ movprfx(z29.VnS(), p6.Merging(), z14.VnS()); + __ sqincw(z29.VnS(), SVE_ALL); - // TODO: Enable once implemented. - // __ movprfx(z17.VnB(), p1.Merging(), z24.VnB()); - // __ sqsub(z17.VnB(), z17.VnB(), 42); + __ movprfx(z17.VnB(), p1.Merging(), z24.VnB()); + __ sqsub(z17.VnB(), z17.VnB(), 42); __ movprfx(z26.VnS(), p5.Zeroing(), z19.VnS()); __ sub(z26.VnS(), z26.VnS(), 42); @@ -1323,58 +1160,50 @@ TEST(movprfx_negative_predication) { __ movprfx(z15.VnD(), p1.Merging(), z3.VnD()); __ subr(z15.VnD(), z15.VnD(), 42); - __ movprfx(z4.VnD(), p4.Merging(), z0.VnD()); - __ udot(z4.VnD(), z0.VnH(), z12.VnH()); + __ movprfx(z4.VnD(), p2.Zeroing(), z14.VnD()); + __ udot(z4.VnD(), z15.VnH(), z7.VnH(), 1); - // TODO: Enable once implemented. - // __ movprfx(z30.VnS(), p3.Merging(), z14.VnS()); - // __ udot(z30.VnS(), z1.VnH(), z29.VnH(), 1); + __ movprfx(z29.VnD(), p4.Zeroing(), z28.VnD()); + __ udot(z29.VnD(), z2.VnH(), z17.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z14.VnB(), p3.Merging(), z5.VnB()); - // __ umax(z14.VnB(), z14.VnB(), 42); + __ movprfx(z7.VnS(), p6.Merging(), z3.VnS()); + __ udot(z7.VnS(), z14.VnB(), z1.VnB(), 1); - // TODO: Enable once implemented. - // __ movprfx(z4.VnD(), p1.Zeroing(), z2.VnD()); - // __ umin(z4.VnD(), z4.VnD(), 42); + __ movprfx(z14.VnB(), p3.Merging(), z5.VnB()); + __ umax(z14.VnB(), z14.VnB(), 42); - // TODO: Enable once implemented. - // __ movprfx(z19.VnB(), p0.Zeroing(), z27.VnB()); - // __ uqadd(z19.VnB(), z19.VnB(), 42); + __ movprfx(z4.VnD(), p1.Zeroing(), z2.VnD()); + __ umin(z4.VnD(), z4.VnD(), 42); - // TODO: Enable once implemented. - // __ movprfx(z21.VnD(), p5.Zeroing(), z15.VnD()); - // __ uqdecd(z21.VnD(), MUL3); + __ movprfx(z19.VnB(), p0.Zeroing(), z27.VnB()); + __ uqadd(z19.VnB(), z19.VnB(), 42); - // TODO: Enable once implemented. - // __ movprfx(z30.VnH(), p1.Zeroing(), z20.VnH()); - // __ uqdech(z30.VnH(), VL2); + __ movprfx(z24.VnD(), p7.Zeroing(), z11.VnD()); + __ uqdecd(z24.VnD(), SVE_MUL3); + + __ movprfx(z24.VnH(), p4.Zeroing(), z18.VnH()); + __ uqdech(z24.VnH(), SVE_VL2); __ movprfx(z31.VnS(), p5.Zeroing(), z2.VnS()); __ uqdecp(z31.VnS(), p5); - // TODO: Enable once implemented. - // __ movprfx(z30.VnS(), p7.Merging(), z24.VnS()); - // __ uqdecw(z30.VnS(), ALL); + __ movprfx(z19.VnS(), p6.Merging(), z21.VnS()); + __ uqdecw(z19.VnS(), SVE_ALL); - // TODO: Enable once implemented. - // __ movprfx(z23.VnD(), p7.Zeroing(), z11.VnD()); - // __ uqincd(z23.VnD(), MUL3); + __ movprfx(z27.VnD(), p0.Merging(), z21.VnD()); + __ uqincd(z27.VnD(), SVE_MUL3); - // TODO: Enable once implemented. - // __ movprfx(z17.VnH(), p4.Merging(), z20.VnH()); - // __ uqinch(z17.VnH(), VL2); + __ movprfx(z13.VnH(), p4.Zeroing(), z12.VnH()); + __ uqinch(z13.VnH(), SVE_VL2); __ movprfx(z0.VnD(), p4.Zeroing(), z1.VnD()); __ uqincp(z0.VnD(), p4); - // TODO: Enable once implemented. - // __ movprfx(z24.VnS(), p4.Zeroing(), z1.VnS()); - // __ uqincw(z24.VnS(), ALL); + __ movprfx(z12.VnS(), p4.Merging(), z21.VnS()); + __ uqincw(z12.VnS(), SVE_ALL); - // TODO: Enable once implemented. - // __ movprfx(z9.VnD(), p0.Zeroing(), z16.VnD()); - // __ uqsub(z9.VnD(), z9.VnD(), 42); + __ movprfx(z9.VnD(), p0.Zeroing(), z16.VnD()); + __ uqsub(z9.VnD(), z9.VnD(), 42); } assm.FinalizeCode(); @@ -1389,43 +1218,41 @@ TEST(movprfx_negative_predication_fp) { { // We have to use the Assembler directly to generate movprfx, so we need // to manually reserve space for the code we're about to emit. - static const size_t kPairCount = 7; + static const size_t kPairCount = 9; CodeBufferCheckScope guard(&assm, kPairCount * 2 * kInstructionSize); - // TODO: Enable once implemented. - // __ movprfx(z9.VnH(), p1.Merging(), z18.VnH()); - // __ fcmla(z9.VnH(), z18.VnH(), z16.VnH(), 2, 180); + __ movprfx(z10.VnH(), p3.Zeroing(), z3.VnH()); + __ fcmla(z10.VnH(), z22.VnH(), z3.VnH(), 2, 180); - // TODO: Enable once implemented. - // __ movprfx(z10.VnS(), p6.Merging(), z15.VnS()); - // __ fcmla(z10.VnS(), z23.VnS(), z26.VnS(), 1, 270); + __ movprfx(z12.VnS(), p4.Merging(), z14.VnS()); + __ fcmla(z12.VnS(), z3.VnS(), z10.VnS(), 1, 270); - // TODO: Enable once implemented. - // __ movprfx(z10.VnH(), p6.Zeroing(), z9.VnH()); - // __ fmla(z10.VnH(), z16.VnH(), z3.VnH(), 7); + __ movprfx(z16.VnD(), p3.Zeroing(), z24.VnD()); + __ fmla(z16.VnD(), z24.VnD(), z8.VnD(), 1); - // TODO: Enable once implemented. - // __ movprfx(z6.VnS(), p1.Zeroing(), z9.VnS()); - // __ fmla(z6.VnS(), z16.VnS(), z9.VnS(), 1); + __ movprfx(z9.VnH(), p7.Zeroing(), z0.VnH()); + __ fmla(z9.VnH(), z8.VnH(), z0.VnH(), 7); - // TODO: Enable once implemented. - // __ movprfx(z10.VnH(), p7.Zeroing(), z13.VnH()); - // __ fmls(z10.VnH(), z13.VnH(), z6.VnH(), 4); + __ movprfx(z23.VnS(), p5.Merging(), z5.VnS()); + __ fmla(z23.VnS(), z7.VnS(), z5.VnS(), 3); - // TODO: Enable once implemented. - // __ movprfx(z9.VnD(), p3.Merging(), z6.VnD()); - // __ fmls(z9.VnD(), z2.VnD(), z6.VnD(), 1); + __ movprfx(z19.VnD(), p6.Zeroing(), z8.VnD()); + __ fmls(z19.VnD(), z27.VnD(), z13.VnD(), 1); + + __ movprfx(z25.VnH(), p7.Merging(), z24.VnH()); + __ fmls(z25.VnH(), z24.VnH(), z4.VnH(), 4); + + __ movprfx(z2.VnS(), p1.Zeroing(), z0.VnS()); + __ fmls(z2.VnS(), z9.VnS(), z0.VnS(), 3); // Note that ftsmul and ftssel _cannot_ take movprfx. - // TODO: Enable once implemented. - // __ movprfx(z22.VnD(), p6.Merging(), z16.VnD()); - // __ ftmad(z22.VnD(), z22.VnD(), z20.VnD(), 2); + __ movprfx(z22.VnD(), p6.Merging(), z16.VnD()); + __ ftmad(z22.VnD(), z22.VnD(), z20.VnD(), 2); } assm.FinalizeCode(); CheckAndMaybeDisassembleMovprfxPairs(assm.GetBuffer(), false); } -#endif TEST(movprfx_positive) { Assembler assm; @@ -1433,7 +1260,7 @@ TEST(movprfx_positive) { { // We have to use the Assembler directly to generate movprfx, so we need // to manually reserve space for the code we're about to emit. - static const size_t kPairCount = 115; + static const size_t kPairCount = 117; CodeBufferCheckScope guard(&assm, kPairCount * 2 * kInstructionSize); __ movprfx(z17, z28); @@ -1448,28 +1275,23 @@ TEST(movprfx_positive) { __ movprfx(z8.VnS(), p3.Zeroing(), z28.VnS()); __ and_(z8.VnS(), p3.Merging(), z8.VnS(), z31.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z20, z23); - // __ and_(z20.VnS(), z20.VnS(), 4); + __ movprfx(z20, z23); + __ and_(z20.VnS(), z20.VnS(), 4); - // TODO: Enable once implemented. - // __ movprfx(z24.VnD(), p5.Merging(), z11.VnD()); - // __ asr(z24.VnD(), p5.Merging(), z24.VnD(), 3); + __ movprfx(z24.VnD(), p5.Merging(), z11.VnD()); + __ asr(z24.VnD(), p5.Merging(), z24.VnD(), 3); __ movprfx(z1, z13); __ asr(z1.VnH(), p3.Merging(), z1.VnH(), z4.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z0.VnB(), p7.Zeroing(), z28.VnB()); - // __ asr(z0.VnB(), p7.Merging(), z0.VnB(), z28.VnD()); + __ movprfx(z0.VnB(), p7.Zeroing(), z28.VnB()); + __ asr(z0.VnB(), p7.Merging(), z0.VnB(), z28.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z15, z5); - // __ asr(z15.VnD(), p3.Merging(), z15.VnD(), z5.VnD()); + __ movprfx(z15, z5); + __ asr(z15.VnD(), p3.Merging(), z15.VnD(), z5.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z24.VnH(), p3.Merging(), z22.VnH()); - // __ asrd(z24.VnH(), p3.Merging(), z24.VnH(), 3); + __ movprfx(z24.VnH(), p3.Merging(), z22.VnH()); + __ asrd(z24.VnH(), p3.Merging(), z24.VnH(), 3); __ movprfx(z2.VnS(), p3.Zeroing(), z20.VnS()); __ asrr(z2.VnS(), p3.Merging(), z2.VnS(), z15.VnS()); @@ -1477,17 +1299,14 @@ TEST(movprfx_positive) { __ movprfx(z17.VnB(), p7.Merging(), z6.VnB()); __ bic(z17.VnB(), p7.Merging(), z17.VnB(), z25.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z31, z6); - // __ bic(z31.VnD(), z31.VnD(), 4); + __ movprfx(z31, z6); + __ bic(z31.VnD(), z31.VnD(), 4); - // TODO: Enable once implemented. - // __ movprfx(z20, z2); - // __ clasta(z20.VnB(), p4, z20.VnB(), z15.VnB()); + __ movprfx(z20, z2); + __ clasta(z20.VnB(), p4, z20.VnB(), z15.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z27, z11); - // __ clastb(z27.VnB(), p5, z27.VnB(), z6.VnB()); + __ movprfx(z27, z11); + __ clastb(z27.VnB(), p5, z27.VnB(), z6.VnB()); __ movprfx(z3.VnS(), p7.Zeroing(), z17.VnS()); __ cls(z3.VnS(), p7.Merging(), z0.VnS()); @@ -1504,58 +1323,47 @@ TEST(movprfx_positive) { __ movprfx(z5, z3); __ cpy(z5.VnD(), p1.Merging(), -42); - // TODO: Enable once implemented. - // __ movprfx(z0, z12); - // __ cpy(z0.VnB(), p1.Merging(), w0); + __ movprfx(z0, z12); + __ cpy(z0.VnB(), p1.Merging(), w0); - // TODO: Enable once implemented. - // __ movprfx(z27, z8); - // __ cpy(z27.VnB(), p0.Merging(), b0); + __ movprfx(z27, z8); + __ cpy(z27.VnB(), p0.Merging(), b0); - // TODO: Enable once implemented. - // __ movprfx(z18, z10); - // __ decd(z18.VnD(), MUL3); + __ movprfx(z20, z24); + __ decd(z20.VnD(), SVE_MUL3); - // TODO: Enable once implemented. - // __ movprfx(z0, z31); - // __ dech(z0.VnH(), VL2); + __ movprfx(z5, z28); + __ dech(z5.VnH(), SVE_VL2); __ movprfx(z7, z3); __ decp(z7.VnD(), p2); - // TODO: Enable once implemented. - // __ movprfx(z28, z25); - // __ decw(z28.VnS(), ALL); + __ movprfx(z4, z7); + __ decw(z4.VnS(), SVE_ALL); - // TODO: Enable once implemented. - // __ movprfx(z21, z10); - // __ eon_(z21.VnS(), z21.VnS(), 4); + __ movprfx(z3, z18); + __ eon(z3.VnS(), z3.VnS(), 4); __ movprfx(z4.VnD(), p0.Merging(), z10.VnD()); __ eor(z4.VnD(), p0.Merging(), z4.VnD(), z10.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z26, z29); - // __ eor_(z26.VnD(), z26.VnD(), 4); + __ movprfx(z15, z18); + __ eor(z15.VnH(), z15.VnH(), 4); - // TODO: Enable once implemented. - // __ movprfx(z30, z11); - // __ ext(z30.VnB(), z30.VnB(), z11.VnB(), 42); + __ movprfx(z30, z11); + __ ext(z30.VnB(), z30.VnB(), z11.VnB(), 42); - // TODO: Enable once implemented. - // __ movprfx(z29, z16); - // __ incd(z29.VnD(), MUL3); + __ movprfx(z19, z28); + __ incd(z19.VnD(), SVE_MUL3); - // TODO: Enable once implemented. - // __ movprfx(z10, z13); - // __ inch(z10.VnH(), VL2); + __ movprfx(z13, z7); + __ inch(z13.VnH(), SVE_VL2); __ movprfx(z14, z21); __ incp(z14.VnD(), p1); - // TODO: Enable once implemented. - // __ movprfx(z0, z15); - // __ incw(z0.VnS(), ALL); + __ movprfx(z26, z12); + __ incw(z26.VnS(), SVE_ALL); __ movprfx(z16, z2); __ insr(z16.VnB(), w16); @@ -1563,38 +1371,32 @@ TEST(movprfx_positive) { __ movprfx(z20, z26); __ insr(z20.VnB(), b0); - // TODO: Enable once implemented. - // __ movprfx(z30.VnD(), p0.Merging(), z23.VnD()); - // __ lsl(z30.VnD(), p0.Merging(), z30.VnD(), 3); + __ movprfx(z30.VnD(), p0.Merging(), z23.VnD()); + __ lsl(z30.VnD(), p0.Merging(), z30.VnD(), 3); __ movprfx(z28.VnS(), p2.Zeroing(), z6.VnS()); __ lsl(z28.VnS(), p2.Merging(), z28.VnS(), z6.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z15.VnH(), p6.Zeroing(), z3.VnH()); - // __ lsl(z15.VnH(), p6.Merging(), z15.VnH(), z3.VnD()); + __ movprfx(z15.VnH(), p6.Zeroing(), z3.VnH()); + __ lsl(z15.VnH(), p6.Merging(), z15.VnH(), z3.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z13.VnD(), p4.Zeroing(), z14.VnD()); - // __ lsl(z13.VnD(), p4.Merging(), z13.VnD(), z25.VnD()); + __ movprfx(z13.VnD(), p4.Zeroing(), z14.VnD()); + __ lsl(z13.VnD(), p4.Merging(), z13.VnD(), z25.VnD()); __ movprfx(z14, z5); __ lslr(z14.VnS(), p0.Merging(), z14.VnS(), z17.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z21, z1); - // __ lsr(z21.VnH(), p5.Merging(), z21.VnH(), 3); + __ movprfx(z21, z1); + __ lsr(z21.VnH(), p5.Merging(), z21.VnH(), 3); __ movprfx(z11.VnH(), p0.Zeroing(), z13.VnH()); __ lsr(z11.VnH(), p0.Merging(), z11.VnH(), z9.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z24, z29); - // __ lsr(z24.VnS(), p4.Merging(), z24.VnS(), z1.VnD()); + __ movprfx(z24, z29); + __ lsr(z24.VnS(), p4.Merging(), z24.VnS(), z1.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z1.VnD(), p6.Merging(), z9.VnD()); - // __ lsr(z1.VnD(), p6.Merging(), z1.VnD(), z9.VnD()); + __ movprfx(z1.VnD(), p6.Merging(), z9.VnD()); + __ lsr(z1.VnD(), p6.Merging(), z1.VnD(), z9.VnD()); __ movprfx(z22, z3); __ lsrr(z22.VnB(), p3.Merging(), z22.VnB(), z3.VnB()); @@ -1609,17 +1411,14 @@ TEST(movprfx_positive) { __ mls(z10.VnS(), p4.Merging(), z23.VnS(), z16.VnS()); // Aliases of cpy. - // TODO: Enable once implemented. - // __ movprfx(z4.VnH(), p5.Zeroing(), z2.VnH()); - // __ mov(z4.VnH(), p5.Merging(), -42); + __ movprfx(z4.VnH(), p5.Zeroing(), z2.VnH()); + __ mov(z4.VnH(), p5.Merging(), -42); - // TODO: Enable once implemented. - // __ movprfx(z2.VnB(), p3.Zeroing(), z24.VnB()); - // __ mov(z2.VnB(), p3.Merging(), w2); + __ movprfx(z2.VnB(), p3.Zeroing(), z24.VnB()); + __ mov(z2.VnB(), p3.Merging(), w2); - // TODO: Enable once implemented. - // __ movprfx(z27, z13); - // __ mov(z27.VnD(), p3.Merging(), d0); + __ movprfx(z27, z13); + __ mov(z27.VnD(), p3.Merging(), d0); __ movprfx(z18.VnB(), p5.Zeroing(), z11.VnB()); __ msb(z18.VnB(), p5.Merging(), z3.VnB(), z11.VnB()); @@ -1627,9 +1426,8 @@ TEST(movprfx_positive) { __ movprfx(z29, z16); __ mul(z29.VnS(), p6.Merging(), z29.VnS(), z9.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z21, z23); - // __ mul(z21.VnH(), z21.VnH(), 42); + __ movprfx(z21, z23); + __ mul(z21.VnH(), z21.VnH(), 42); __ movprfx(z7.VnS(), p4.Merging(), z14.VnS()); __ neg(z7.VnS(), p4.Merging(), z14.VnS()); @@ -1637,109 +1435,92 @@ TEST(movprfx_positive) { __ movprfx(z8.VnD(), p4.Zeroing(), z5.VnD()); __ not_(z8.VnD(), p4.Merging(), z5.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z14, z13); - // __ orn(z14.VnS(), z14.VnS(), 4); + __ movprfx(z14, z13); + __ orn(z14.VnS(), z14.VnS(), 4); - // TODO: Enable once implemented. - // __ movprfx(z14, z13); - // __ orn(z14.VnS(), z14.VnS(), 4); + __ movprfx(z14, z13); + __ orn(z14.VnS(), z14.VnS(), 4); __ movprfx(z27, z17); __ orr(z27.VnD(), p2.Merging(), z27.VnD(), z17.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z13.VnH(), p2.Zeroing(), z27.VnH()); - // __ rbit(z13.VnH(), p2.Merging(), z1.VnH()); + __ movprfx(z13.VnH(), p2.Zeroing(), z27.VnH()); + __ rbit(z13.VnH(), p2.Merging(), z1.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z1, z29); - // __ revb(z1.VnS(), p4.Merging(), z6.VnS()); + __ movprfx(z1, z29); + __ revb(z1.VnS(), p4.Merging(), z6.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z18.VnD(), p2.Zeroing(), z10.VnD()); - // __ revh(z18.VnD(), p2.Merging(), z16.VnD()); + __ movprfx(z18.VnD(), p2.Zeroing(), z10.VnD()); + __ revh(z18.VnD(), p2.Merging(), z16.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z2.VnD(), p1.Merging(), z10.VnD()); - // __ revw(z2.VnD(), p1.Merging(), z1.VnD()); + __ movprfx(z2.VnD(), p1.Merging(), z10.VnD()); + __ revw(z2.VnD(), p1.Merging(), z1.VnD()); __ movprfx(z28.VnS(), p7.Merging(), z11.VnS()); __ sabd(z28.VnS(), p7.Merging(), z28.VnS(), z11.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z22.VnS(), p0.Merging(), z20.VnS()); - // __ sdiv(z22.VnS(), p0.Merging(), z22.VnS(), z6.VnS()); + __ movprfx(z22.VnS(), p0.Merging(), z20.VnS()); + __ sdiv(z22.VnS(), p0.Merging(), z22.VnS(), z6.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z13.VnS(), p7.Merging(), z0.VnS()); - // __ sdivr(z13.VnS(), p7.Merging(), z13.VnS(), z2.VnS()); + __ movprfx(z13.VnS(), p7.Merging(), z0.VnS()); + __ sdivr(z13.VnS(), p7.Merging(), z13.VnS(), z2.VnS()); - __ movprfx(z23, z5); - __ sdot(z23.VnD(), z30.VnH(), z5.VnH()); + __ movprfx(z0, z12); + __ sdot(z0.VnD(), z10.VnH(), z12.VnH(), 1); - // TODO: Enable once implemented. - // __ movprfx(z19, z3); - // __ sdot(z19.VnS(), z14.VnH(), z3.VnH(), 1); + __ movprfx(z8, z15); + __ sdot(z8.VnS(), z15.VnB(), z12.VnB()); + + __ movprfx(z13, z0); + __ sdot(z13.VnS(), z10.VnB(), z0.VnB(), 1); __ movprfx(z11, z13); __ smax(z11.VnB(), p5.Merging(), z11.VnB(), z24.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z3, z17); - // __ smax(z3.VnD(), z3.VnD(), 42); + __ movprfx(z3, z17); + __ smax(z3.VnD(), z3.VnD(), 42); __ movprfx(z10, z29); __ smin(z10.VnD(), p4.Merging(), z10.VnD(), z29.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z13, z29); - // __ smin(z13.VnD(), z13.VnD(), 42); + __ movprfx(z13, z29); + __ smin(z13.VnD(), z13.VnD(), 42); __ movprfx(z6, z17); __ smulh(z6.VnS(), p7.Merging(), z6.VnS(), z31.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z3.VnH(), p0.Zeroing(), z13.VnH()); - // __ splice(z3.VnH(), p0.Merging(), z3.VnH(), z12.VnH()); + __ movprfx(z19, z20); + __ splice(z19.VnB(), p3, z19.VnB(), z20.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z0, z3); - // __ sqadd(z0.VnD(), z0.VnD(), 42); + __ movprfx(z0, z3); + __ sqadd(z0.VnD(), z0.VnD(), 42); - // TODO: Enable once implemented. - // __ movprfx(z21, z12); - // __ sqdecd(z21.VnD(), MUL3); + __ movprfx(z29, z5); + __ sqdecd(z29.VnD(), SVE_MUL3); - // TODO: Enable once implemented. - // __ movprfx(z12, z11); - // __ sqdech(z12.VnH(), VL2); + __ movprfx(z25, z11); + __ sqdech(z25.VnH(), SVE_VL2); __ movprfx(z16, z9); __ sqdecp(z16.VnS(), p1); - // TODO: Enable once implemented. - // __ movprfx(z14, z13); - // __ sqdecw(z14.VnS(), ALL); + __ movprfx(z8, z17); + __ sqdecw(z8.VnS(), SVE_ALL); - // TODO: Enable once implemented. - // __ movprfx(z30, z26); - // __ sqincd(z30.VnD(), MUL3); + __ movprfx(z4, z5); + __ sqincd(z4.VnD(), SVE_MUL3); - // TODO: Enable once implemented. - // __ movprfx(z7, z12); - // __ sqinch(z7.VnH(), VL2); + __ movprfx(z0, z17); + __ sqinch(z0.VnH(), SVE_VL2); __ movprfx(z7, z27); __ sqincp(z7.VnS(), p6); - // TODO: Enable once implemented. - // __ movprfx(z24, z27); - // __ sqincw(z24.VnS(), ALL); + __ movprfx(z10, z9); + __ sqincw(z10.VnS(), SVE_ALL); - // TODO: Enable once implemented. - // __ movprfx(z31, z22); - // __ sqsub(z31.VnB(), z31.VnB(), 42); + __ movprfx(z31, z22); + __ sqsub(z31.VnB(), z31.VnB(), 42); __ movprfx(z12.VnH(), p7.Zeroing(), z23.VnH()); __ sub(z12.VnH(), p7.Merging(), z12.VnH(), z23.VnH()); @@ -1756,97 +1537,83 @@ TEST(movprfx_positive) { __ movprfx(z5, z3); __ sxtb(z5.VnD(), p6.Merging(), z20.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z11, z17); - // __ sxth(z11.VnD(), p6.Merging(), z25.VnD()); + __ movprfx(z11, z17); + __ sxth(z11.VnD(), p6.Merging(), z25.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z26, z4); - // __ sxtw(z26.VnD(), p5.Merging(), z4.VnD()); + __ movprfx(z26, z4); + __ sxtw(z26.VnD(), p5.Merging(), z4.VnD()); __ movprfx(z15.VnD(), p0.Zeroing(), z8.VnD()); __ uabd(z15.VnD(), p0.Merging(), z15.VnD(), z20.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z21, z24); - // __ udiv(z21.VnD(), p3.Merging(), z21.VnD(), z24.VnD()); + __ movprfx(z21, z24); + __ udiv(z21.VnD(), p3.Merging(), z21.VnD(), z24.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z22, z10); - // __ udivr(z22.VnD(), p7.Merging(), z22.VnD(), z27.VnD()); + __ movprfx(z22, z10); + __ udivr(z22.VnD(), p7.Merging(), z22.VnD(), z27.VnD()); - __ movprfx(z19, z22); - __ udot(z19.VnD(), z22.VnH(), z12.VnH()); + __ movprfx(z27, z25); + __ udot(z27.VnD(), z29.VnH(), z3.VnH(), 1); - // TODO: Enable once implemented. - // __ movprfx(z26, z3); - // __ udot(z26.VnD(), z14.VnS(), z3.VnS(), 1); + __ movprfx(z29, z10); + __ udot(z29.VnS(), z10.VnB(), z21.VnB()); + + __ movprfx(z18, z0); + __ udot(z18.VnS(), z14.VnB(), z0.VnB(), 1); __ movprfx(z6, z30); __ umax(z6.VnS(), p2.Merging(), z6.VnS(), z27.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z31, z17); - // __ umax(z31.VnD(), z31.VnD(), 42); + __ movprfx(z31, z17); + __ umax(z31.VnD(), z31.VnD(), 42); __ movprfx(z27.VnS(), p0.Merging(), z20.VnS()); __ umin(z27.VnS(), p0.Merging(), z27.VnS(), z8.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z0, z11); - // __ umin(z0.VnH(), z0.VnH(), 42); + __ movprfx(z0, z11); + __ umin(z0.VnH(), z0.VnH(), 42); __ movprfx(z21, z17); __ umulh(z21.VnB(), p0.Merging(), z21.VnB(), z30.VnB()); - // TODO: Enable once implemented. - // __ movprfx(z9, z24); - // __ uqadd(z9.VnD(), z9.VnD(), 42); + __ movprfx(z9, z24); + __ uqadd(z9.VnD(), z9.VnD(), 42); - // TODO: Enable once implemented. - // __ movprfx(z25, z28); - // __ uqdecd(z25.VnD(), MUL3); + __ movprfx(z18, z13); + __ uqdecd(z18.VnD(), SVE_MUL3); - // TODO: Enable once implemented. - // __ movprfx(z25, z15); - // __ uqdech(z25.VnH(), VL2); + __ movprfx(z20, z23); + __ uqdech(z20.VnH(), SVE_VL2); __ movprfx(z12, z29); __ uqdecp(z12.VnS(), p7); - // TODO: Enable once implemented. - // __ movprfx(z4, z9); - // __ uqdecw(z4.VnS(), ALL); + __ movprfx(z24, z25); + __ uqdecw(z24.VnS(), SVE_ALL); - // TODO: Enable once implemented. - // __ movprfx(z28, z18); - // __ uqincd(z28.VnD(), MUL3); + __ movprfx(z13, z1); + __ uqincd(z13.VnD(), SVE_MUL3); - // TODO: Enable once implemented. - // __ movprfx(z2, z26); - // __ uqinch(z2.VnH(), VL2); + __ movprfx(z5, z19); + __ uqinch(z5.VnH(), SVE_VL2); __ movprfx(z6, z25); __ uqincp(z6.VnS(), p5); - // TODO: Enable once implemented. - // __ movprfx(z28, z4); - // __ uqincw(z28.VnS(), ALL); + __ movprfx(z12, z14); + __ uqincw(z12.VnS(), SVE_ALL); - // TODO: Enable once implemented. - // __ movprfx(z13, z6); - // __ uqsub(z13.VnH(), z13.VnH(), 42); + __ movprfx(z13, z6); + __ uqsub(z13.VnH(), z13.VnH(), 42); __ movprfx(z31, z3); __ uxtb(z31.VnS(), p0.Merging(), z3.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z18.VnD(), p4.Merging(), z25.VnD()); - // __ uxth(z18.VnD(), p4.Merging(), z25.VnD()); + __ movprfx(z18.VnD(), p4.Merging(), z25.VnD()); + __ uxth(z18.VnD(), p4.Merging(), z25.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z18.VnD(), p7.Merging(), z25.VnD()); - // __ uxtw(z18.VnD(), p7.Merging(), z25.VnD()); + __ movprfx(z18.VnD(), p7.Merging(), z25.VnD()); + __ uxtw(z18.VnD(), p7.Merging(), z25.VnD()); } assm.FinalizeCode(); @@ -1859,87 +1626,68 @@ TEST(movprfx_positive_fp) { { // We have to use the Assembler directly to generate movprfx, so we need // to manually reserve space for the code we're about to emit. - static const size_t kPairCount = 71; + static const size_t kPairCount = 73; CodeBufferCheckScope guard(&assm, kPairCount * 2 * kInstructionSize); - // TODO: Enable once implemented. - // __ movprfx(z18.VnS(), p6.Zeroing(), z20.VnS()); - // __ fabd(z18.VnS(), p6.Merging(), z18.VnS(), z19.VnS()); + __ movprfx(z18.VnS(), p6.Zeroing(), z20.VnS()); + __ fabd(z18.VnS(), p6.Merging(), z18.VnS(), z19.VnS()); __ movprfx(z28.VnD(), p4.Zeroing(), z24.VnD()); __ fabs(z28.VnD(), p4.Merging(), z24.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z12, z8); - // __ fadd(z12.VnS(), p2.Merging(), z12.VnS(), 0.5); + __ movprfx(z12, z8); + __ fadd(z12.VnS(), p2.Merging(), z12.VnS(), 0.5); - // TODO: Enable once implemented. - // __ movprfx(z0.VnS(), p1.Merging(), z9.VnS()); - // __ fadd(z0.VnS(), p1.Merging(), z0.VnS(), z9.VnS()); + __ movprfx(z0.VnS(), p1.Merging(), z9.VnS()); + __ fadd(z0.VnS(), p1.Merging(), z0.VnS(), z9.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z10.VnH(), p2.Merging(), z2.VnH()); - // __ fcadd(z10.VnH(), p2.Merging(), z10.VnH(), z20.VnH(), 90); + __ movprfx(z10.VnH(), p2.Merging(), z2.VnH()); + __ fcadd(z10.VnH(), p2.Merging(), z10.VnH(), z20.VnH(), 90); - // TODO: Enable once implemented. - // __ movprfx(z25, z19); - // __ fcmla(z25.VnH(), z19.VnH(), z14.VnH(), 2, 180); + __ movprfx(z21, z6); + __ fcmla(z21.VnH(), z31.VnH(), z6.VnH(), 2, 180); - // TODO: Enable once implemented. - // __ movprfx(z19, z26); - // __ fcmla(z19.VnS(), z5.VnS(), z27.VnS(), 1, 270); + __ movprfx(z16, z6); + __ fcmla(z16.VnS(), z11.VnS(), z6.VnS(), 1, 270); - // TODO: Enable once implemented. - // __ movprfx(z15.VnH(), p6.Merging(), z16.VnH()); - // __ fcpy(z15.VnH(), p6.Merging(), 1.25); + __ movprfx(z15.VnH(), p6.Merging(), z16.VnH()); + __ fcpy(z15.VnH(), p6.Merging(), 1.25); - // TODO: Enable once implemented. - // __ movprfx(z1, z14); - // __ fcvt(z1.VnD(), p2.Merging(), z4.VnH()); + __ movprfx(z1, z14); + __ fcvt(z1.VnD(), p2.Merging(), z4.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z25.VnD(), p6.Merging(), z1.VnD()); - // __ fcvt(z25.VnD(), p6.Merging(), z1.VnS()); + __ movprfx(z25.VnD(), p6.Merging(), z1.VnD()); + __ fcvt(z25.VnD(), p6.Merging(), z1.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z18.VnS(), p2.Merging(), z2.VnS()); - // __ fcvt(z18.VnH(), p2.Merging(), z7.VnS()); + __ movprfx(z18.VnS(), p2.Merging(), z2.VnS()); + __ fcvt(z18.VnH(), p2.Merging(), z7.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z21.VnD(), p5.Zeroing(), z26.VnD()); - // __ fcvt(z21.VnH(), p5.Merging(), z26.VnD()); + __ movprfx(z21.VnD(), p5.Zeroing(), z26.VnD()); + __ fcvt(z21.VnH(), p5.Merging(), z26.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z12.VnD(), p1.Merging(), z18.VnD()); - // __ fcvtzs(z12.VnD(), p1.Merging(), z18.VnH()); + __ movprfx(z12.VnD(), p1.Merging(), z18.VnD()); + __ fcvtzs(z12.VnD(), p1.Merging(), z18.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z3.VnS(), p2.Merging(), z0.VnS()); - // __ fcvtzs(z3.VnS(), p2.Merging(), z26.VnS()); + __ movprfx(z3.VnS(), p2.Merging(), z0.VnS()); + __ fcvtzs(z3.VnS(), p2.Merging(), z26.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z21.VnS(), p4.Merging(), z7.VnS()); - // __ fcvtzs(z21.VnS(), p4.Merging(), z7.VnH()); + __ movprfx(z21.VnS(), p4.Merging(), z7.VnS()); + __ fcvtzs(z21.VnS(), p4.Merging(), z7.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z16.VnD(), p3.Zeroing(), z4.VnD()); - // __ fcvtzs(z16.VnS(), p3.Merging(), z28.VnD()); + __ movprfx(z16.VnD(), p3.Zeroing(), z4.VnD()); + __ fcvtzs(z16.VnS(), p3.Merging(), z28.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z31.VnD(), p4.Merging(), z1.VnD()); - // __ fcvtzu(z31.VnD(), p4.Merging(), z1.VnH()); + __ movprfx(z31.VnD(), p4.Merging(), z1.VnD()); + __ fcvtzu(z31.VnD(), p4.Merging(), z1.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z23.VnH(), p0.Zeroing(), z28.VnH()); - // __ fcvtzu(z23.VnH(), p0.Merging(), z28.VnH()); + __ movprfx(z23.VnH(), p0.Zeroing(), z28.VnH()); + __ fcvtzu(z23.VnH(), p0.Merging(), z28.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z2, z12); - // __ fcvtzu(z2.VnD(), p3.Merging(), z28.VnS()); + __ movprfx(z2, z12); + __ fcvtzu(z2.VnD(), p3.Merging(), z28.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z4, z7); - // __ fcvtzu(z4.VnS(), p7.Merging(), z16.VnD()); + __ movprfx(z4, z7); + __ fcvtzu(z4.VnS(), p7.Merging(), z16.VnD()); __ movprfx(z13.VnS(), p3.Zeroing(), z23.VnS()); __ fdiv(z13.VnS(), p3.Merging(), z13.VnS(), z23.VnS()); @@ -1950,82 +1698,72 @@ TEST(movprfx_positive_fp) { __ movprfx(z31, z23); __ fmad(z31.VnS(), p5.Merging(), z23.VnS(), z11.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z14.VnH(), p7.Merging(), z21.VnH()); - // __ fmax(z14.VnH(), p7.Merging(), z14.VnH(), 0.0); + __ movprfx(z14.VnH(), p7.Merging(), z21.VnH()); + __ fmax(z14.VnH(), p7.Merging(), z14.VnH(), 0.0); - // TODO: Enable once implemented. - // __ movprfx(z17.VnS(), p4.Merging(), z9.VnS()); - // __ fmax(z17.VnS(), p4.Merging(), z17.VnS(), z9.VnS()); + __ movprfx(z17.VnS(), p4.Merging(), z9.VnS()); + __ fmax(z17.VnS(), p4.Merging(), z17.VnS(), z9.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z1.VnS(), p3.Zeroing(), z30.VnS()); - // __ fmaxnm(z1.VnS(), p3.Merging(), z1.VnS(), 0.0); + __ movprfx(z1.VnS(), p3.Zeroing(), z30.VnS()); + __ fmaxnm(z1.VnS(), p3.Merging(), z1.VnS(), 0.0); - // TODO: Enable once implemented. - // __ movprfx(z10.VnD(), p1.Zeroing(), z17.VnD()); - // __ fmaxnm(z10.VnD(), p1.Merging(), z10.VnD(), z17.VnD()); + __ movprfx(z10.VnD(), p1.Zeroing(), z17.VnD()); + __ fmaxnm(z10.VnD(), p1.Merging(), z10.VnD(), z17.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z3, z13); - // __ fmin(z3.VnS(), p0.Merging(), z3.VnS(), 0.0); + __ movprfx(z3, z13); + __ fmin(z3.VnS(), p0.Merging(), z3.VnS(), 0.0); - // TODO: Enable once implemented. - // __ movprfx(z15, z21); - // __ fmin(z15.VnS(), p4.Merging(), z15.VnS(), z21.VnS()); + __ movprfx(z15, z21); + __ fmin(z15.VnS(), p4.Merging(), z15.VnS(), z21.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z30.VnH(), p7.Zeroing(), z25.VnH()); - // __ fminnm(z30.VnH(), p7.Merging(), z30.VnH(), 0.0); + __ movprfx(z30.VnH(), p7.Zeroing(), z25.VnH()); + __ fminnm(z30.VnH(), p7.Merging(), z30.VnH(), 0.0); - // TODO: Enable once implemented. - // __ movprfx(z31, z15); - // __ fminnm(z31.VnD(), p5.Merging(), z31.VnD(), z25.VnD()); + __ movprfx(z31, z15); + __ fminnm(z31.VnD(), p5.Merging(), z31.VnD(), z25.VnD()); + + __ movprfx(z27, z28); + __ fmla(z27.VnD(), z28.VnD(), z12.VnD(), 1); __ movprfx(z26.VnH(), p6.Zeroing(), z13.VnH()); __ fmla(z26.VnH(), p6.Merging(), z13.VnH(), z7.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z9, z22); - // __ fmla(z9.VnH(), z22.VnH(), z24.VnH(), 7); + __ movprfx(z26, z10); + __ fmla(z26.VnH(), z10.VnH(), z1.VnH(), 7); - // TODO: Enable once implemented. - // __ movprfx(z9, z22); - // __ fmla(z9.VnS(), z24.VnS(), z22.VnS(), 1); + __ movprfx(z0, z1); + __ fmla(z0.VnS(), z25.VnS(), z1.VnS(), 3); + + __ movprfx(z7, z3); + __ fmls(z7.VnD(), z30.VnD(), z3.VnD(), 1); __ movprfx(z1, z24); __ fmls(z1.VnD(), p5.Merging(), z20.VnD(), z24.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z21, z17); - // __ fmls(z21.VnH(), z17.VnH(), z15.VnH(), 4); + __ movprfx(z19, z18); + __ fmls(z19.VnH(), z18.VnH(), z7.VnH(), 4); - // TODO: Enable once implemented. - // __ movprfx(z5, z30); - // __ fmls(z5.VnS(), z30.VnS(), z6.VnS(), 1); + __ movprfx(z0, z26); + __ fmls(z0.VnS(), z17.VnS(), z4.VnS(), 3); // TODO: Enable once implemented. - // __ movprfx(z8.VnS(), p3.Zeroing(), z19.VnS()); - // __ fmov(z8.VnS(), p3.Merging(), 0); + // __ movprfx(z19.VnS(), p7.Zeroing(), z6.VnS()); + // __ fmov(z19.VnS(), p7.Merging(), 0.0); - // TODO: Enable once implemented. - // __ movprfx(z21, z15); - // __ fmov(z21.VnH(), p7.Merging(), 2.5); + __ movprfx(z21, z15); + __ fmov(z21.VnH(), p7.Merging(), 2.5); __ movprfx(z23, z18); __ fmsb(z23.VnS(), p4.Merging(), z1.VnS(), z7.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z8, z28); - // __ fmul(z8.VnS(), p4.Merging(), z8.VnS(), 2.0); + __ movprfx(z8, z28); + __ fmul(z8.VnS(), p4.Merging(), z8.VnS(), 2.0); - // TODO: Enable once implemented. - // __ movprfx(z6.VnD(), p6.Merging(), z27.VnD()); - // __ fmul(z6.VnD(), p6.Merging(), z6.VnD(), z27.VnD()); + __ movprfx(z6.VnD(), p6.Merging(), z27.VnD()); + __ fmul(z6.VnD(), p6.Merging(), z6.VnD(), z27.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z6.VnH(), p0.Merging(), z19.VnH()); - // __ fmulx(z6.VnH(), p0.Merging(), z6.VnH(), z19.VnH()); + __ movprfx(z6.VnH(), p0.Merging(), z19.VnH()); + __ fmulx(z6.VnH(), p0.Merging(), z6.VnH(), z19.VnH()); __ movprfx(z5.VnH(), p0.Merging(), z1.VnH()); __ fneg(z5.VnH(), p0.Merging(), z1.VnH()); @@ -2043,103 +1781,81 @@ TEST(movprfx_positive_fp) { __ fnmsb(z9.VnD(), p2.Merging(), z7.VnD(), z23.VnD()); // Note that frecpe and frecps _cannot_ take movprfx. - // TODO: Enable once implemented. - // __ movprfx(z12.VnH(), p1.Zeroing(), z17.VnH()); - // __ frecpx(z12.VnH(), p1.Merging(), z4.VnH()); + __ movprfx(z12.VnH(), p1.Zeroing(), z17.VnH()); + __ frecpx(z12.VnH(), p1.Merging(), z4.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z28.VnS(), p4.Zeroing(), z27.VnS()); - // __ frinta(z28.VnS(), p4.Merging(), z24.VnS()); + __ movprfx(z28.VnS(), p4.Zeroing(), z27.VnS()); + __ frinta(z28.VnS(), p4.Merging(), z24.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z7.VnD(), p7.Merging(), z25.VnD()); - // __ frinti(z7.VnD(), p7.Merging(), z25.VnD()); + __ movprfx(z7.VnD(), p7.Merging(), z25.VnD()); + __ frinti(z7.VnD(), p7.Merging(), z25.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z10, z21); - // __ frintm(z10.VnD(), p5.Merging(), z26.VnD()); + __ movprfx(z10, z21); + __ frintm(z10.VnD(), p5.Merging(), z26.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z25, z21); - // __ frintn(z25.VnH(), p4.Merging(), z1.VnH()); + __ movprfx(z25, z21); + __ frintn(z25.VnH(), p4.Merging(), z1.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z25, z9); - // __ frintp(z25.VnH(), p1.Merging(), z9.VnH()); + __ movprfx(z25, z9); + __ frintp(z25.VnH(), p1.Merging(), z9.VnH()); - // TODO: Enable once implemented. - // __ movprfx(z30, z16); - // __ frintx(z30.VnS(), p1.Merging(), z16.VnS()); + __ movprfx(z30, z16); + __ frintx(z30.VnS(), p1.Merging(), z16.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z0.VnD(), p5.Merging(), z9.VnD()); - // __ frintz(z0.VnD(), p5.Merging(), z23.VnD()); + __ movprfx(z0.VnD(), p5.Merging(), z9.VnD()); + __ frintz(z0.VnD(), p5.Merging(), z23.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z11.VnD(), p7.Merging(), z2.VnD()); - // __ fscale(z11.VnD(), p7.Merging(), z11.VnD(), z2.VnD()); + __ movprfx(z11.VnD(), p7.Merging(), z2.VnD()); + __ fscale(z11.VnD(), p7.Merging(), z11.VnD(), z2.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z23.VnS(), p4.Merging(), z17.VnS()); - // __ fsqrt(z23.VnS(), p4.Merging(), z10.VnS()); + __ movprfx(z23.VnS(), p4.Merging(), z17.VnS()); + __ fsqrt(z23.VnS(), p4.Merging(), z10.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z0.VnD(), p2.Merging(), z26.VnD()); - // __ fsub(z0.VnD(), p2.Merging(), z0.VnD(), 1.0); + __ movprfx(z0.VnD(), p2.Merging(), z26.VnD()); + __ fsub(z0.VnD(), p2.Merging(), z0.VnD(), 1.0); - // TODO: Enable once implemented. - // __ movprfx(z28.VnD(), p1.Zeroing(), z16.VnD()); - // __ fsub(z28.VnD(), p1.Merging(), z28.VnD(), z16.VnD()); + __ movprfx(z28.VnD(), p1.Zeroing(), z16.VnD()); + __ fsub(z28.VnD(), p1.Merging(), z28.VnD(), z16.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z22, z27); - // __ fsubr(z22.VnD(), p4.Merging(), z22.VnD(), 1.0); + __ movprfx(z22, z27); + __ fsubr(z22.VnD(), p4.Merging(), z22.VnD(), 1.0); - // TODO: Enable once implemented. - // __ movprfx(z4.VnS(), p2.Merging(), z26.VnS()); - // __ fsubr(z4.VnS(), p2.Merging(), z4.VnS(), z26.VnS()); + __ movprfx(z4.VnS(), p2.Merging(), z26.VnS()); + __ fsubr(z4.VnS(), p2.Merging(), z4.VnS(), z26.VnS()); // Note that ftsmul and ftssel _cannot_ take movprfx. - // TODO: Enable once implemented. - // __ movprfx(z10, z4); - // __ ftmad(z10.VnS(), z10.VnS(), z4.VnS(), 2); + __ movprfx(z10, z4); + __ ftmad(z10.VnS(), z10.VnS(), z4.VnS(), 2); - // TODO: Enable once implemented. - // __ movprfx(z19.VnD(), p3.Zeroing(), z3.VnD()); - // __ scvtf(z19.VnD(), p3.Merging(), z24.VnS());; + __ movprfx(z2, z16); + __ scvtf(z2.VnD(), p1.Merging(), z16.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z9.VnH(), p5.Merging(), z28.VnH()); - // __ scvtf(z9.VnH(), p5.Merging(), z26.VnH());; + __ movprfx(z10, z20); + __ scvtf(z10.VnD(), p5.Merging(), z20.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z30.VnS(), p0.Zeroing(), z11.VnS()); - // __ scvtf(z30.VnH(), p0.Merging(), z11.VnS());; + __ movprfx(z29, z28); + __ scvtf(z29.VnS(), p0.Merging(), z31.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z6.VnD(), p5.Zeroing(), z13.VnD()); - // __ scvtf(z6.VnH(), p5.Merging(), z12.VnD());; + __ movprfx(z26.VnD(), p3.Merging(), z13.VnD()); + __ scvtf(z26.VnH(), p3.Merging(), z5.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z19.VnD(), p3.Zeroing(), z12.VnD()); - // __ ucvtf(z19.VnD(), p3.Merging(), z12.VnS());; + __ movprfx(z7.VnD(), p3.Zeroing(), z26.VnD()); + __ ucvtf(z7.VnD(), p3.Merging(), z26.VnS()); - // TODO: Enable once implemented. - // __ movprfx(z18.VnD(), p5.Merging(), z12.VnD()); - // __ ucvtf(z18.VnD(), p5.Merging(), z12.VnD());; + __ movprfx(z13, z17); + __ ucvtf(z13.VnD(), p7.Merging(), z17.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z9.VnS(), p1.Merging(), z4.VnS()); - // __ ucvtf(z9.VnH(), p1.Merging(), z4.VnS());; + __ movprfx(z24.VnD(), p1.Merging(), z31.VnD()); + __ ucvtf(z24.VnS(), p1.Merging(), z18.VnD()); - // TODO: Enable once implemented. - // __ movprfx(z9, z30); - // __ ucvtf(z9.VnH(), p7.Merging(), z19.VnD());; + __ movprfx(z17.VnD(), p4.Merging(), z22.VnD()); + __ ucvtf(z17.VnH(), p4.Merging(), z4.VnD()); } assm.FinalizeCode(); CheckAndMaybeDisassembleMovprfxPairs(assm.GetBuffer(), true); } + } // namespace aarch64 } // namespace vixl |