aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJacob Bramley <jacob.bramley@arm.com>2020-06-24 20:26:03 +0100
committerJacob Bramley <jacob.bramley@arm.com>2020-06-24 20:27:00 +0100
commit1f1ab9b9db73d908d73e0be6c19f81434ed86aaf (patch)
tree0029c574f29b75d9be97b32deca7f1cf140d2ad4 /src
parent15d78439305c43daf3c5b9a920c3848875554c31 (diff)
parentcb0cfc31e7337ade417022af35017ffffeea2d3a (diff)
downloadvixl-1f1ab9b9db73d908d73e0be6c19f81434ed86aaf.tar.gz
Merge branch 'master' into sve
Change-Id: If2ce450e490dd1ad4fcaba78985af84cc847986f
Diffstat (limited to 'src')
-rw-r--r--src/aarch64/assembler-aarch64.cc45
-rw-r--r--src/aarch64/assembler-aarch64.h60
-rw-r--r--src/aarch64/macro-assembler-aarch64.h2
3 files changed, 33 insertions, 74 deletions
diff --git a/src/aarch64/assembler-aarch64.cc b/src/aarch64/assembler-aarch64.cc
index 1d2629c8..77b94573 100644
--- a/src/aarch64/assembler-aarch64.cc
+++ b/src/aarch64/assembler-aarch64.cc
@@ -1141,7 +1141,13 @@ void Assembler::LoadStorePair(const CPURegister& rt,
addrmodeop = LoadStorePairPostIndexFixed;
}
}
- Emit(addrmodeop | memop);
+
+ Instr emitop = addrmodeop | memop;
+
+ // Only X registers may be specified for ldpsw.
+ VIXL_ASSERT(((emitop & LoadStorePairMask) != LDPSW_x) || rt.IsX());
+
+ Emit(emitop);
}
@@ -5635,7 +5641,7 @@ void Assembler::DataProcExtendedRegister(const Register& rd,
Instr Assembler::LoadStoreMemOperand(const MemOperand& addr,
- unsigned access_size,
+ unsigned access_size_in_bytes_log2,
LoadStoreScalingOption option) {
Instr base = RnSP(addr.GetBaseRegister());
int64_t offset = addr.GetOffset();
@@ -5645,21 +5651,22 @@ Instr Assembler::LoadStoreMemOperand(const MemOperand& addr,
(option == PreferUnscaledOffset) || (option == RequireUnscaledOffset);
if (prefer_unscaled && IsImmLSUnscaled(offset)) {
// Use the unscaled addressing mode.
- return base | LoadStoreUnscaledOffsetFixed |
- ImmLS(static_cast<int>(offset));
+ return base | LoadStoreUnscaledOffsetFixed | ImmLS(offset);
}
if ((option != RequireUnscaledOffset) &&
- IsImmLSScaled(offset, access_size)) {
+ IsImmLSScaled(offset, access_size_in_bytes_log2)) {
+ // We need `offset` to be positive for the shift to be well-defined.
+ // IsImmLSScaled should check this.
+ VIXL_ASSERT(offset >= 0);
// Use the scaled addressing mode.
return base | LoadStoreUnsignedOffsetFixed |
- ImmLSUnsigned(static_cast<int>(offset) >> access_size);
+ ImmLSUnsigned(offset >> access_size_in_bytes_log2);
}
if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) {
// Use the unscaled addressing mode.
- return base | LoadStoreUnscaledOffsetFixed |
- ImmLS(static_cast<int>(offset));
+ return base | LoadStoreUnscaledOffsetFixed | ImmLS(offset);
}
}
@@ -5680,17 +5687,17 @@ Instr Assembler::LoadStoreMemOperand(const MemOperand& addr,
// Shifts are encoded in one bit, indicating a left shift by the memory
// access size.
- VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size));
+ VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size_in_bytes_log2));
return base | LoadStoreRegisterOffsetFixed | Rm(addr.GetRegisterOffset()) |
ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0);
}
if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) {
- return base | LoadStorePreIndexFixed | ImmLS(static_cast<int>(offset));
+ return base | LoadStorePreIndexFixed | ImmLS(offset);
}
if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) {
- return base | LoadStorePostIndexFixed | ImmLS(static_cast<int>(offset));
+ return base | LoadStorePostIndexFixed | ImmLS(offset);
}
// If this point is reached, the MemOperand (addr) cannot be encoded.
@@ -5827,17 +5834,17 @@ bool Assembler::IsImmFP64(double imm) {
}
-bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) {
- VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
- return IsMultiple(offset, 1 << access_size) &&
- IsInt7(offset / (1 << access_size));
+bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size_in_bytes_log2) {
+ VIXL_ASSERT(access_size_in_bytes_log2 <= kQRegSizeInBytesLog2);
+ return IsMultiple(offset, 1 << access_size_in_bytes_log2) &&
+ IsInt7(offset / (1 << access_size_in_bytes_log2));
}
-bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) {
- VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
- return IsMultiple(offset, 1 << access_size) &&
- IsUint12(offset / (1 << access_size));
+bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size_in_bytes_log2) {
+ VIXL_ASSERT(access_size_in_bytes_log2 <= kQRegSizeInBytesLog2);
+ return IsMultiple(offset, 1 << access_size_in_bytes_log2) &&
+ IsUint12(offset / (1 << access_size_in_bytes_log2));
}
diff --git a/src/aarch64/assembler-aarch64.h b/src/aarch64/assembler-aarch64.h
index b575f454..00ac0114 100644
--- a/src/aarch64/assembler-aarch64.h
+++ b/src/aarch64/assembler-aarch64.h
@@ -1089,18 +1089,6 @@ class Assembler : public vixl::internal::AssemblerBase {
// zero [Armv8.3].
void pacdza(const Register& xd);
- // Pointer Authentication Code for Data address, using key A, with address in
- // x17 and modifier in x16 [Armv8.3].
- void pacda1716();
-
- // Pointer Authentication Code for Data address, using key A, with address in
- // LR and modifier in SP [Armv8.3].
- void pacdasp();
-
- // Pointer Authentication Code for Data address, using key A, with address in
- // LR and a modifier of zero [Armv8.3].
- void pacdaz();
-
// Pointer Authentication Code for Data address, using key B [Armv8.3].
void pacdb(const Register& xd, const Register& xn);
@@ -1108,18 +1096,6 @@ class Assembler : public vixl::internal::AssemblerBase {
// zero [Armv8.3].
void pacdzb(const Register& xd);
- // Pointer Authentication Code for Data address, using key B, with address in
- // x17 and modifier in x16 [Armv8.3].
- void pacdb1716();
-
- // Pointer Authentication Code for Data address, using key B, with address in
- // LR and modifier in SP [Armv8.3].
- void pacdbsp();
-
- // Pointer Authentication Code for Data address, using key B, with address in
- // LR and a modifier of zero [Armv8.3].
- void pacdbz();
-
// Pointer Authentication Code, using Generic key [Armv8.3].
void pacga(const Register& xd, const Register& xn, const Register& xm);
@@ -1167,36 +1143,12 @@ class Assembler : public vixl::internal::AssemblerBase {
// Authenticate Data address, using key A and a modifier of zero [Armv8.3].
void autdza(const Register& xd);
- // Authenticate Data address, using key A, with address in x17 and modifier in
- // x16 [Armv8.3].
- void autda1716();
-
- // Authenticate Data address, using key A, with address in LR and modifier in
- // SP [Armv8.3].
- void autdasp();
-
- // Authenticate Data address, using key A, with address in LR and a modifier
- // of zero [Armv8.3].
- void autdaz();
-
// Authenticate Data address, using key B [Armv8.3].
void autdb(const Register& xd, const Register& xn);
// Authenticate Data address, using key B and a modifier of zero [Armv8.3].
void autdzb(const Register& xd);
- // Authenticate Data address, using key B, with address in x17 and modifier in
- // x16 [Armv8.3].
- void autdb1716();
-
- // Authenticate Data address, using key B, with address in LR and modifier in
- // SP [Armv8.3].
- void autdbsp();
-
- // Authenticate Data address, using key B, with address in LR and a modifier
- // of zero [Armv8.3].
- void autdbz();
-
// Strip Pointer Authentication Code of Data address [Armv8.3].
void xpacd(const Register& xd);
@@ -6158,9 +6110,9 @@ class Assembler : public vixl::internal::AssemblerBase {
return TruncateToUint9(imm9) << ImmLS_offset;
}
- static Instr ImmLSPair(int64_t imm7, unsigned access_size) {
- VIXL_ASSERT(IsMultiple(imm7, 1 << access_size));
- int64_t scaled_imm7 = imm7 / (1 << access_size);
+ static Instr ImmLSPair(int64_t imm7, unsigned access_size_in_bytes_log2) {
+ VIXL_ASSERT(IsMultiple(imm7, 1 << access_size_in_bytes_log2));
+ int64_t scaled_imm7 = imm7 / (1 << access_size_in_bytes_log2);
VIXL_ASSERT(IsInt7(scaled_imm7));
return TruncateToUint7(scaled_imm7) << ImmLSPair_offset;
}
@@ -6292,8 +6244,8 @@ class Assembler : public vixl::internal::AssemblerBase {
unsigned* n = NULL,
unsigned* imm_s = NULL,
unsigned* imm_r = NULL);
- static bool IsImmLSPair(int64_t offset, unsigned access_size);
- static bool IsImmLSScaled(int64_t offset, unsigned access_size);
+ static bool IsImmLSPair(int64_t offset, unsigned access_size_in_bytes_log2);
+ static bool IsImmLSScaled(int64_t offset, unsigned access_size_in_bytes_log2);
static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmMovn(uint64_t imm, unsigned reg_size);
static bool IsImmMovz(uint64_t imm, unsigned reg_size);
@@ -7015,7 +6967,7 @@ class Assembler : public vixl::internal::AssemblerBase {
// Encode the specified MemOperand for the specified access size and scaling
// preference.
Instr LoadStoreMemOperand(const MemOperand& addr,
- unsigned access_size,
+ unsigned access_size_in_bytes_log2,
LoadStoreScalingOption option);
// Link the current (not-yet-emitted) instruction to the specified label, then
diff --git a/src/aarch64/macro-assembler-aarch64.h b/src/aarch64/macro-assembler-aarch64.h
index 7eba7187..916eb422 100644
--- a/src/aarch64/macro-assembler-aarch64.h
+++ b/src/aarch64/macro-assembler-aarch64.h
@@ -60,7 +60,7 @@
#define LSPAIR_MACRO_LIST(V) \
V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
- V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
+ V(Ldpsw, Register&, rt, rt2, LDPSW_x)
namespace vixl {
namespace aarch64 {