aboutsummaryrefslogtreecommitdiff
path: root/src/mips/macro-assembler-mips.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/mips/macro-assembler-mips.cc')
-rw-r--r--src/mips/macro-assembler-mips.cc746
1 files changed, 243 insertions, 503 deletions
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index c3abe4fa..a28c04a8 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1998,6 +1998,49 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
}
}
+void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ madd_s(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_s(scratch, fs, ft);
+ add_s(fd, fr, scratch);
+ }
+}
+
+void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ madd_d(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_d(scratch, fs, ft);
+ add_d(fd, fr, scratch);
+ }
+}
+
+void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ msub_s(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_s(scratch, fs, ft);
+ sub_s(fd, scratch, fr);
+ }
+}
+
+void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ msub_d(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_d(scratch, fs, ft);
+ sub_d(fd, scratch, fr);
+ }
+}
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1,
@@ -2325,186 +2368,6 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
}
}
-#define __ masm->
-
-static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
- FPURegister src1, FPURegister src2, Label* equal) {
- if (src1.is(src2)) {
- __ Move(dst, src1);
- return true;
- }
-
- Label other, compare_not_equal;
- FPURegister left, right;
- if (kind == MaxMinKind::kMin) {
- left = src1;
- right = src2;
- } else {
- left = src2;
- right = src1;
- }
-
- __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
- // Left and right hand side are equal, check for -0 vs. +0.
- __ FmoveHigh(t8, src1);
- __ Branch(&other, eq, t8, Operand(0x80000000));
- __ Move_d(dst, right);
- __ Branch(equal);
- __ bind(&other);
- __ Move_d(dst, left);
- __ Branch(equal);
- __ bind(&compare_not_equal);
- return false;
-}
-
-static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
- FPURegister src1, FPURegister src2, Label* equal) {
- if (src1.is(src2)) {
- __ Move(dst, src1);
- return true;
- }
-
- Label other, compare_not_equal;
- FPURegister left, right;
- if (kind == MaxMinKind::kMin) {
- left = src1;
- right = src2;
- } else {
- left = src2;
- right = src1;
- }
-
- __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
- // Left and right hand side are equal, check for -0 vs. +0.
- __ FmoveLow(t8, src1);
- __ Branch(&other, eq, t8, Operand(0x80000000));
- __ Move_s(dst, right);
- __ Branch(equal);
- __ bind(&other);
- __ Move_s(dst, left);
- __ Branch(equal);
- __ bind(&compare_not_equal);
- return false;
-}
-
-#undef __
-
-void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF64(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- min_d(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF64(&skip, nullptr, le, src1, src2);
- Move_d(dst, src2);
- } else if (dst.is(src2)) {
- BranchF64(&skip, nullptr, ge, src1, src2);
- Move_d(dst, src1);
- } else {
- Label right;
- BranchF64(&right, nullptr, gt, src1, src2);
- Move_d(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_d(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF64(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- max_d(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF64(&skip, nullptr, ge, src1, src2);
- Move_d(dst, src2);
- } else if (dst.is(src2)) {
- BranchF64(&skip, nullptr, le, src1, src2);
- Move_d(dst, src1);
- } else {
- Label right;
- BranchF64(&right, nullptr, lt, src1, src2);
- Move_d(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_d(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF32(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- min_s(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF32(&skip, nullptr, le, src1, src2);
- Move_s(dst, src2);
- } else if (dst.is(src2)) {
- BranchF32(&skip, nullptr, ge, src1, src2);
- Move_s(dst, src1);
- } else {
- Label right;
- BranchF32(&right, nullptr, gt, src1, src2);
- Move_s(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_s(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF32(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- max_s(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF32(&skip, nullptr, ge, src1, src2);
- Move_s(dst, src2);
- } else if (dst.is(src2)) {
- BranchF32(&skip, nullptr, le, src1, src2);
- Move_s(dst, src1);
- } else {
- Label right;
- BranchF32(&right, nullptr, lt, src1, src2);
- Move_s(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_s(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
void MacroAssembler::Clz(Register rd, Register rs) {
if (IsMipsArchVariant(kLoongson)) {
DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
@@ -4022,17 +3885,16 @@ void MacroAssembler::Push(Handle<Object> handle) {
push(at);
}
-
-void MacroAssembler::DebugBreak() {
- PrepareCEntryArgs(0);
- PrepareCEntryFunction(
- ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
- CEntryStub ces(isolate(), 1);
- DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+ // Check whether we need to drop frames to restart a function on the stack.
+ ExternalReference restart_fp =
+ ExternalReference::debug_restart_fp_address(isolate());
+ li(a1, Operand(restart_fp));
+ lw(a1, MemOperand(a1));
+ Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+ ne, a1, Operand(zero_reg));
}
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -4334,110 +4196,6 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
Addu(result, result, Operand(kHeapObjectTag));
}
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- sll(scratch1, length, 1); // Length in bytes, not chars.
- addiu(scratch1, scratch1,
- kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string
- // while observing object alignment.
- DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- DCHECK(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -4518,77 +4276,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Branch(&loop, ult, current_address, Operand(end_address));
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, ls, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* fail,
- int elements_offset) {
- DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
- scratch3));
- Label smi_value, done;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, turn potential sNaN into qNan.
- DoubleRegister double_result = f0;
- DoubleRegister double_scratch = f2;
-
- ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
- FPUCanonicalizeNaN(double_result, double_result);
-
- bind(&smi_value);
- Register untagged_value = scratch2;
- SmiUntag(untagged_value, value_reg);
- mtc1(untagged_value, double_scratch);
- cvt_d_w(double_result, double_scratch);
-
- bind(&done);
- Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
- Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- // scratch1 is now effective address of the double element
- sdc1(double_result, MemOperand(scratch1, 0));
-}
-
void MacroAssembler::CompareMapAndBranch(Register obj,
Register scratch,
Handle<Map> map,
@@ -4870,17 +4557,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- li(t0, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ li(t0, Operand(debug_hook_active));
lb(t0, MemOperand(t0));
- Branch(&skip_flooding, lt, t0, Operand(StepIn));
+ Branch(&skip_hook, eq, t0, Operand(zero_reg));
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4897,7 +4582,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -4911,7 +4596,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
@@ -4925,8 +4610,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(a1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -5051,32 +4736,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss) {
- // Get the prototype or initial map from the function.
- lw(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- LoadRoot(t8, Heap::kTheHoleValueRootIndex);
- Branch(miss, eq, result, Operand(t8));
-
- // If the function does not have an initial map, we're done.
- Label done;
- GetObjectType(result, scratch, scratch);
- Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
- // Get the prototype from the initial map.
- lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- bind(&done);
-}
-
-
void MacroAssembler::GetObjectType(Register object,
Register map,
Register type_reg) {
@@ -5603,27 +5262,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- lw(scratch, NativeContextMemOperand());
- lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- Branch(no_map_match, ne, map_in_out, Operand(at));
-
- // Use the transitioned cached map.
- lw(map_in_out,
- ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
lw(dst, NativeContextMemOperand());
lw(dst, ContextMemOperand(dst, index));
@@ -5646,7 +5284,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
void MacroAssembler::StubPrologue(StackFrame::Type type) {
- li(at, Operand(Smi::FromInt(type)));
+ li(at, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(at);
}
@@ -5661,7 +5299,7 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
- // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ // GetCodeAge() extracts the stub address from this instruction.
li(t9,
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
CONSTANT_SIZE);
@@ -5675,11 +5313,10 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
}
-
-void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- lw(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
- lw(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+ lw(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+ lw(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
@@ -5705,7 +5342,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
stack_offset -= kPointerSize;
sw(fp, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
- li(t9, Operand(Smi::FromInt(type)));
+ li(t9, Operand(StackFrame::TypeToMarker(type)));
sw(t9, MemOperand(sp, stack_offset));
if (type == StackFrame::INTERNAL) {
DCHECK_EQ(stack_offset, kPointerSize);
@@ -5762,7 +5399,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
- li(at, Operand(Smi::FromInt(frame_type)));
+ li(at, Operand(StackFrame::TypeToMarker(frame_type)));
sw(at, MemOperand(sp, 2 * kPointerSize));
// Set up new frame pointer.
addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
@@ -5862,21 +5499,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
addiu(sp, sp, 8);
}
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- sll(scratch1, length, kSmiTagSize);
- LoadRoot(scratch2, map_index);
- sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
- li(scratch1, Operand(String::kEmptyHashField));
- sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_MIPS
// Running on the real platform. Use the alignment as mandated by the local
@@ -5955,14 +5577,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst,
SmiUntag(dst, src);
}
-
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
- Register src,
- Label* non_smi_case) {
- JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
- SmiUntag(dst, src);
-}
-
void MacroAssembler::JumpIfSmi(Register value,
Label* smi_label,
Register scratch,
@@ -6157,6 +5771,179 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
scratch2, failure);
}
+void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ max_s(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF32(&return_right, nullptr, lt, src1, src2);
+ BranchF32(&return_left, nullptr, lt, src2, src1);
+
+ // Operands are equal, but check for +/-0.
+ mfc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_s(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_s(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ min_s(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF32(&return_left, nullptr, lt, src1, src2);
+ BranchF32(&return_right, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ mfc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_s(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_s(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ max_d(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF64(&return_right, nullptr, lt, src1, src2);
+ BranchF64(&return_left, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ Mfhc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_d(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_d(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float64MaxOutOfLine(DoubleRegister dst,
+ DoubleRegister src1,
+ DoubleRegister src2) {
+ add_d(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ min_d(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF64(&return_left, nullptr, lt, src1, src2);
+ BranchF64(&return_right, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ Mfhc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_d(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_d(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float64MinOutOfLine(DoubleRegister dst,
+ DoubleRegister src1,
+ DoubleRegister src2) {
+ add_d(dst, src1, src2);
+}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
@@ -6172,19 +5959,6 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
}
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, Operand(kFlatOneByteStringMask));
- Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
-}
-
-
static const int kRegisterPassedArguments = 4;
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -6622,40 +6396,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
return no_reg;
}
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
- Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!scratch1.is(scratch0));
- Factory* factory = isolate()->factory();
- Register current = scratch0;
- Label loop_again, end;
-
- // Scratch contained elements pointer.
- Move(current, object);
- lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
- lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
- Branch(&end, eq, current, Operand(factory->null_value()));
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
- lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
- lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
- lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
- Branch(&loop_again, ne, current, Operand(factory->null_value()));
-
- bind(&end);
-}
-
-
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
Register reg5, Register reg6, Register reg7, Register reg8,
Register reg9, Register reg10) {