aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/aarch64/assembler-aarch64.cc12
-rw-r--r--src/aarch64/disasm-aarch64.cc2
-rw-r--r--src/aarch64/macro-assembler-aarch64.cc11
-rw-r--r--src/aarch64/operands-aarch64.cc10
-rw-r--r--src/aarch64/operands-aarch64.h7
-rw-r--r--tools/code_coverage.log1
6 files changed, 28 insertions, 15 deletions
diff --git a/src/aarch64/assembler-aarch64.cc b/src/aarch64/assembler-aarch64.cc
index 1304ede2..e4cacd46 100644
--- a/src/aarch64/assembler-aarch64.cc
+++ b/src/aarch64/assembler-aarch64.cc
@@ -1134,10 +1134,10 @@ void Assembler::LoadStorePair(const CPURegister& rt,
if (addr.IsImmediateOffset()) {
addrmodeop = LoadStorePairOffsetFixed;
} else {
- if (addr.IsPreIndex()) {
+ if (addr.IsImmediatePreIndex()) {
addrmodeop = LoadStorePairPreIndexFixed;
} else {
- VIXL_ASSERT(addr.IsPostIndex());
+ VIXL_ASSERT(addr.IsImmediatePostIndex());
addrmodeop = LoadStorePairPostIndexFixed;
}
}
@@ -5793,11 +5793,11 @@ Instr Assembler::LoadStoreMemOperand(const MemOperand& addr,
ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0);
}
- if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) {
+ if (addr.IsImmediatePreIndex() && IsImmLSUnscaled(offset)) {
return base | LoadStorePreIndexFixed | ImmLS(offset);
}
- if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) {
+ if (addr.IsImmediatePostIndex() && IsImmLSUnscaled(offset)) {
return base | LoadStorePostIndexFixed | ImmLS(offset);
}
@@ -5819,10 +5819,10 @@ void Assembler::LoadStorePAC(const Register& xt,
const MemOperand& addr,
LoadStorePACOp op) {
VIXL_ASSERT(xt.Is64Bits());
- VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsPreIndex());
+ VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsImmediatePreIndex());
Instr pac_op = op;
- if (addr.IsPreIndex()) {
+ if (addr.IsImmediatePreIndex()) {
pac_op |= LoadStorePACPreBit;
}
diff --git a/src/aarch64/disasm-aarch64.cc b/src/aarch64/disasm-aarch64.cc
index 72c71e76..15165b97 100644
--- a/src/aarch64/disasm-aarch64.cc
+++ b/src/aarch64/disasm-aarch64.cc
@@ -645,7 +645,7 @@ const Disassembler::FormToVisitorFnMap *Disassembler::GetFormToVisitorFnMap() {
{"usdot_asimdsame2_d", &Disassembler::VisitNEON3SameExtra},
};
return &form_to_visitor;
-}
+} // NOLINT(readability/fn_size)
Disassembler::Disassembler() {
buffer_size_ = 256;
diff --git a/src/aarch64/macro-assembler-aarch64.cc b/src/aarch64/macro-assembler-aarch64.cc
index 28b47aea..e18f846a 100644
--- a/src/aarch64/macro-assembler-aarch64.cc
+++ b/src/aarch64/macro-assembler-aarch64.cc
@@ -1912,6 +1912,9 @@ LS_MACRO_LIST(DEFINE_FUNCTION)
void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op) {
+ VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsImmediatePostIndex() ||
+ addr.IsImmediatePreIndex() || addr.IsRegisterOffset());
+
// Worst case is ldr/str pre/post index:
// * 1 instruction for ldr/str
// * up to 4 instructions to materialise the constant
@@ -1932,11 +1935,11 @@ void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
Register temp = temps.AcquireSameSizeAs(addr.GetBaseRegister());
Mov(temp, addr.GetOffset());
LoadStore(rt, MemOperand(addr.GetBaseRegister(), temp), op);
- } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
+ } else if (addr.IsImmediatePostIndex() && !IsImmLSUnscaled(offset)) {
// Post-index beyond unscaled addressing range.
LoadStore(rt, MemOperand(addr.GetBaseRegister()), op);
Add(addr.GetBaseRegister(), addr.GetBaseRegister(), Operand(offset));
- } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
+ } else if (addr.IsImmediatePreIndex() && !IsImmLSUnscaled(offset)) {
// Pre-index beyond unscaled addressing range.
Add(addr.GetBaseRegister(), addr.GetBaseRegister(), Operand(offset));
LoadStore(rt, MemOperand(addr.GetBaseRegister()), op);
@@ -1984,11 +1987,11 @@ void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
Register temp = temps.AcquireSameSizeAs(base);
Add(temp, base, offset);
LoadStorePair(rt, rt2, MemOperand(temp), op);
- } else if (addr.IsPostIndex()) {
+ } else if (addr.IsImmediatePostIndex()) {
LoadStorePair(rt, rt2, MemOperand(base), op);
Add(base, base, offset);
} else {
- VIXL_ASSERT(addr.IsPreIndex());
+ VIXL_ASSERT(addr.IsImmediatePreIndex());
Add(base, base, offset);
LoadStorePair(rt, rt2, MemOperand(base), op);
}
diff --git a/src/aarch64/operands-aarch64.cc b/src/aarch64/operands-aarch64.cc
index fa0162d9..8db129c9 100644
--- a/src/aarch64/operands-aarch64.cc
+++ b/src/aarch64/operands-aarch64.cc
@@ -360,12 +360,16 @@ bool MemOperand::IsRegisterOffset() const {
return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
}
-
bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
-
-
bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
+bool MemOperand::IsImmediatePreIndex() const {
+ return IsPreIndex() && regoffset_.Is(NoReg);
+}
+
+bool MemOperand::IsImmediatePostIndex() const {
+ return IsPostIndex() && regoffset_.Is(NoReg);
+}
void MemOperand::AddOffset(int64_t offset) {
VIXL_ASSERT(IsImmediateOffset());
diff --git a/src/aarch64/operands-aarch64.h b/src/aarch64/operands-aarch64.h
index b442cfd6..08ee4a61 100644
--- a/src/aarch64/operands-aarch64.h
+++ b/src/aarch64/operands-aarch64.h
@@ -434,9 +434,14 @@ class MemOperand {
bool IsImmediateOffset() const;
// True for register-offset (but not indexed) MemOperands.
bool IsRegisterOffset() const;
-
+ // True for immediate or register pre-indexed MemOperands.
bool IsPreIndex() const;
+ // True for immediate or register post-indexed MemOperands.
bool IsPostIndex() const;
+ // True for immediate pre-indexed MemOperands, [reg, #imm]!
+ bool IsImmediatePreIndex() const;
+ // True for immediate post-indexed MemOperands, [reg], #imm
+ bool IsImmediatePostIndex() const;
void AddOffset(int64_t offset);
diff --git a/tools/code_coverage.log b/tools/code_coverage.log
index fc2d2596..34f54494 100644
--- a/tools/code_coverage.log
+++ b/tools/code_coverage.log
@@ -4,3 +4,4 @@
1636647628 82.97% 97.54% 95.28%
1639684221 82.92% 97.51% 94.06%
1642688881 82.94% 97.51% 95.27%
+1646150629 82.94% 97.51% 95.36%