aboutsummaryrefslogtreecommitdiff
path: root/src/arm64/code-stubs-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/arm64/code-stubs-arm64.cc')
-rw-r--r--src/arm64/code-stubs-arm64.cc1369
1 files changed, 30 insertions, 1339 deletions
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index c0d700ce..082565f2 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -33,17 +33,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
@@ -174,9 +163,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ Cmp(right_type, SIMD128_VALUE_TYPE);
- __ B(eq, slow);
} else if (cond == eq) {
__ JumpIfHeapNumber(right, &heap_number);
} else {
@@ -188,9 +174,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ Cmp(right_type, SIMD128_VALUE_TYPE);
- __ B(eq, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -590,8 +573,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cond == eq) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(lhs, rhs);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(cp);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -1147,10 +1133,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Fmov(fp_zero, 0.0);
// Build an entry frame (see layout below).
- int marker = type();
+ StackFrame::Type marker = type();
int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
__ Mov(x13, bad_frame_pointer);
- __ Mov(x12, Smi::FromInt(marker));
+ __ Mov(x12, StackFrame::TypeToMarker(marker));
__ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
__ Ldr(x10, MemOperand(x11));
@@ -1166,12 +1152,12 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ldr(x11, MemOperand(x10));
__ Cbnz(x11, &non_outermost_js);
__ Str(fp, MemOperand(x10));
- __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Mov(x12, StackFrame::OUTERMOST_JSENTRY_FRAME);
__ Push(x12);
__ B(&done);
__ Bind(&non_outermost_js);
// We spare one instruction by pushing xzr since the marker is 0.
- DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+ DCHECK(StackFrame::INNER_JSENTRY_FRAME == 0);
__ Push(xzr);
__ Bind(&done);
@@ -1253,7 +1239,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ Pop(x10);
- __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
__ B(ne, &non_outermost_js_2);
__ Mov(x11, ExternalReference(js_entry_sp));
__ Str(xzr, MemOperand(x11));
@@ -1276,56 +1262,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver = LoadDescriptor::ReceiverRegister();
- // Ensure that the vector and slot registers won't be clobbered before
- // calling the miss handler.
- DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
- LoadWithVectorDescriptor::SlotRegister()));
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
- x11, &miss);
-
- __ Bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
- // Return address is in lr.
- Label miss;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register index = LoadDescriptor::NameRegister();
- Register result = x0;
- Register scratch = x10;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
- result.is(LoadWithVectorDescriptor::SlotRegister()));
-
- // StringCharAtGenerator doesn't use the result register until it's passed
- // the different miss possibilities. If it did, we would have a conflict
- // when FLAG_vector_ics is true.
- StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- RECEIVER_IS_STRING);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
- __ Bind(&miss);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec);
@@ -1453,7 +1389,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
- // (8) Sliced string. Replace subject with parent. Go to (1).
+ // (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label check_underlying; // (1)
Label seq_string; // (4)
@@ -1487,6 +1423,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ Cmp(string_representation, kExternalStringTag);
@@ -1514,10 +1451,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// before entering the exit frame.
__ SmiUntag(x1, x10);
- // The third bit determines the string encoding in string_type.
- STATIC_ASSERT(kOneByteStringTag == 0x04);
+ // The fourth bit determines the string encoding in string_type.
+ STATIC_ASSERT(kOneByteStringTag == 0x08);
STATIC_ASSERT(kTwoByteStringTag == 0x00);
- STATIC_ASSERT(kStringEncodingMask == 0x04);
+ STATIC_ASSERT(kStringEncodingMask == 0x08);
// Find the code object based on the assumptions above.
// kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
@@ -1525,7 +1462,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
- // We will need the encoding later: Latin1 = 0x04
+ // We will need the encoding later: Latin1 = 0x08
// UC16 = 0x00
__ Ands(string_encoding, string_type, kStringEncodingMask);
__ CzeroX(x10, ne);
@@ -1573,10 +1510,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
// Handle UC16 encoding, two bytes make one character.
- // string_encoding: if Latin1: 0x04
+ // string_encoding: if Latin1: 0x08
// if UC16: 0x00
- STATIC_ASSERT(kStringEncodingMask == 0x04);
- __ Ubfx(string_encoding, string_encoding, 2, 1);
+ STATIC_ASSERT(kStringEncodingMask == 0x08);
+ __ Ubfx(string_encoding, string_encoding, 3, 1);
__ Eor(string_encoding, string_encoding, 1);
// string_encoding: if Latin1: 0
// if UC16: 1
@@ -1789,11 +1726,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
kShortExternalStringMask | kIsNotStringMask,
&runtime);
- // (8) Sliced string. Replace subject with parent.
+ // (8) Sliced or thin string. Replace subject with parent.
+ Label thin_string;
+ __ Cmp(string_representation, kThinStringTag);
+ __ B(eq, &thin_string);
__ Ldr(sliced_string_offset,
UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
__ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ B(&check_underlying); // Go to (1).
+
+ __ bind(&thin_string);
+ __ Ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+ __ B(&check_underlying); // Go to (1).
#endif
}
@@ -1834,9 +1778,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// index : slot in feedback vector (smi)
Label initialize, done, miss, megamorphic, not_array_function;
- DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+ DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
- DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+ DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state.
@@ -1851,7 +1795,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// function without changing the state.
// We don't know if feedback value is a WeakCell or a Symbol, but it's
// harmless to read at this position in a symbol (see static asserts in
- // type-feedback-vector.h).
+ // feedback-vector.h).
Label check_allocation_site;
__ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ Cmp(function, feedback_value);
@@ -1971,212 +1915,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
- Register slot) {
- __ Add(feedback_vector, feedback_vector,
- Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Add(feedback_vector, feedback_vector,
- Operand(FixedArray::kHeaderSize + kPointerSize));
- __ Ldr(slot, FieldMemOperand(feedback_vector, 0));
- __ Add(slot, slot, Operand(Smi::FromInt(1)));
- __ Str(slot, FieldMemOperand(feedback_vector, 0));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
- // x0 - number of arguments
- // x1 - function
- // x3 - slot id
- // x2 - vector
- // x4 - allocation site (loaded from vector[slot])
- Register function = x1;
- Register feedback_vector = x2;
- Register index = x3;
- Register allocation_site = x4;
- Register scratch = x5;
-
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
- __ Cmp(function, scratch);
- __ B(ne, miss);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, feedback_vector, index);
-
- // Set up arguments for the array constructor stub.
- Register allocation_site_arg = feedback_vector;
- Register new_target_arg = index;
- __ Mov(allocation_site_arg, allocation_site);
- __ Mov(new_target_arg, function);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("CallICStub");
-
- // x0 - number of arguments
- // x1 - function
- // x3 - slot id (Smi)
- // x2 - vector
- Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
- Register function = x1;
- Register feedback_vector = x2;
- Register index = x3;
-
- // The checks. First, does x1 match the recorded monomorphic target?
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
-
- // We don't know that we have a weak cell. We might have a private symbol
- // or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
- // computed, meaning that it can't appear to be a pointer. If the low bit is
- // 0, then hash is computed, but the 0 bit prevents the field from appearing
- // to be a pointer.
- STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
- WeakCell::kValueOffset &&
- WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
- __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
- __ Cmp(x5, function);
- __ B(ne, &extra_checks_or_miss);
-
- // The compare above could have been a SMI/SMI comparison. Guard against this
- // convincing us that we have a monomorphic JSFunction.
- __ JumpIfSmi(function, &extra_checks_or_miss);
-
- __ Bind(&call_function);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, feedback_vector, index);
-
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
- tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&extra_checks_or_miss);
- Label uninitialized, miss, not_allocation_site;
-
- __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &call);
-
- __ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &not_allocation_site);
-
- HandleArrayCase(masm, &miss);
-
- __ bind(&not_allocation_site);
-
- // The following cases attempt to handle MISS cases without going to the
- // runtime.
- if (FLAG_trace_ic) {
- __ jmp(&miss);
- }
-
- // TODO(mvstanton): the code below is effectively disabled. Investigate.
- __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
-
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(x4);
- __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
- __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
-
- __ Bind(&call);
-
- // Increment the call count for megamorphic function calls.
- IncrementCallCount(masm, feedback_vector, index);
-
- __ Bind(&call_count_incremented);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&uninitialized);
-
- // We are going monomorphic, provided we actually have a JSFunction.
- __ JumpIfSmi(function, &miss);
-
- // Goto miss case if we do not have a function.
- __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
-
- // Make sure the function is not the Array() function, which requires special
- // behavior on MISS.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
- __ Cmp(function, x5);
- __ B(eq, &miss);
-
- // Make sure the function belongs to the same native context.
- __ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
- __ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
- __ Ldr(x5, NativeContextMemOperand());
- __ Cmp(x4, x5);
- __ B(ne, &miss);
-
- // Store the function. Use a stub since we need a frame for allocation.
- // x2 - vector
- // x3 - slot
- // x1 - function
- // x0 - number of arguments
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateWeakCellStub create_stub(masm->isolate());
- __ SmiTag(x0);
- __ Push(x0);
- __ Push(feedback_vector, index);
-
- __ Push(cp, function);
- __ CallStub(&create_stub);
- __ Pop(cp, function);
-
- __ Pop(feedback_vector, index);
- __ Pop(x0);
- __ SmiUntag(x0);
- }
-
- __ B(&call_function);
-
- // We are here because tracing is on or we encountered a MISS case we can't
- // handle here.
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The runtime increments the call count in the vector for us.
- __ B(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
- ASM_LOCATION("CallICStub[Miss]");
-
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the number of arguments as Smi.
- __ SmiTag(x0);
-
- // Push the receiver and the function and feedback info.
- __ Push(x0, x1, x2, x3);
-
- // Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss);
-
- // Move result to edi and exit the internal frame.
- __ Mov(x1, x0);
-
- // Restore number of arguments.
- __ Pop(x0);
- __ SmiUntag(x0);
-}
-
-
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
if (check_mode_ == RECEIVER_IS_UNKNOWN) {
@@ -2262,38 +2000,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- __ JumpIfNotSmi(code_, &slow_case_);
- __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
- __ B(hi, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged one-byte char code.
- __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
- __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
- __ Bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ Bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ Push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode);
- __ Mov(result_, x0);
- call_helper.AfterCall(masm);
- __ B(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
// Inputs are in x0 (lhs) and x1 (rhs).
DCHECK_EQ(CompareICState::BOOLEAN, state());
@@ -2974,240 +2680,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(x2);
- CallICStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label load_smi_map, compare_map;
- Label start_polymorphic;
-
- Register cached_map = scratch1;
-
- __ Ldr(cached_map,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Cmp(receiver_map, cached_map);
- __ B(ne, &start_polymorphic);
- // found, now call handler.
- Register handler = feedback;
- __ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(feedback);
-
- Register length = scratch2;
- __ Bind(&start_polymorphic);
- __ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- __ Cmp(length, Operand(Smi::FromInt(2)));
- __ B(eq, miss);
- }
-
- Register too_far = length;
- Register pointer_reg = feedback;
-
- // +-----+------+------+-----+-----+ ... ----+
- // | map | len | wm0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ Add(too_far, feedback,
- Operand::UntagSmiAndScale(length, kPointerSizeLog2));
- __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(pointer_reg, feedback,
- FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
-
- __ Bind(&next_loop);
- __ Ldr(cached_map, MemOperand(pointer_reg));
- __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Cmp(receiver_map, cached_map);
- __ B(ne, &prepare_next);
- __ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
- __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(handler);
-
- __ Bind(&prepare_next);
- __ Add(pointer_reg, pointer_reg, kPointerSize * 2);
- __ Cmp(pointer_reg, too_far);
- __ B(lt, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register slot,
- Register scratch, Label* compare_map,
- Label* load_smi_map, Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(compare_map);
- Register cached_map = scratch;
- // Move the weak map into the weak_cell register.
- __ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
- __ Cmp(cached_map, receiver_map);
- __ B(ne, try_array);
-
- Register handler = feedback;
- __ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Ldr(handler,
- FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
- __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(handler);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
- Label transition_call;
-
- Register cached_map = scratch1;
- Register too_far = scratch2;
- Register pointer_reg = feedback;
-
- __ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
- // +-----+------+------+-----+-----+-----+ ... ----+
- // | map | len | wm0 | wt0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ Add(too_far, feedback,
- Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
- __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(pointer_reg, feedback,
- FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
-
- __ Bind(&next_loop);
- __ Ldr(cached_map, MemOperand(pointer_reg));
- __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Cmp(receiver_map, cached_map);
- __ B(ne, &prepare_next);
- // Is it a transitioning store?
- __ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
- __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
- __ B(ne, &transition_call);
-
- __ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
- __ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(pointer_reg);
-
- __ Bind(&transition_call);
- __ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
- __ JumpIfSmi(too_far, miss);
-
- __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
- // Load the map into the correct register.
- DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(feedback, too_far);
- __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(receiver_map);
-
- __ Bind(&prepare_next);
- __ Add(pointer_reg, pointer_reg, kPointerSize * 3);
- __ Cmp(pointer_reg, too_far);
- __ B(lt, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // x1
- Register key = StoreWithVectorDescriptor::NameRegister(); // x2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // x3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // x4
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0)); // x0
- Register feedback = x5;
- Register receiver_map = x6;
- Register scratch1 = x7;
-
- __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ Bind(&try_array);
- // Is it a fixed array?
- __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
-
- // We have a polymorphic element handler.
- Label try_poly_name;
- HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
-
- __ Bind(&not_array);
- // Is it generic?
- __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
- &try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ Bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ Cmp(key, feedback);
- __ B(ne, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Ldr(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
-
- __ Bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ Bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@@ -3309,91 +2781,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
__ Blr(lr);
}
-
-// Probe the name dictionary in the 'elements' register.
-// Jump to the 'done' label if a property with the given name is found.
-// Jump to the 'miss' label otherwise.
-//
-// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
-// 'elements' and 'name' registers are preserved on miss.
-void NameDictionaryLookupStub::GeneratePositiveLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- DCHECK(!AreAliased(elements, name, scratch1, scratch2));
-
- // Assert that name contains a string.
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
- __ Sub(scratch1, scratch1, 1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Add(scratch2, scratch2, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
-
- // Scale the index by multiplying by the element size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- UseScratchRegisterScope temps(masm);
- Register scratch3 = temps.AcquireX();
- __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
- __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
- __ Cmp(name, scratch3);
- __ B(eq, done);
- }
-
- // The inlined probes didn't find the entry.
- // Call the complete stub to scan the whole dictionary.
-
- CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
- spill_list.Combine(lr);
- spill_list.Remove(scratch1);
- spill_list.Remove(scratch2);
-
- __ PushCPURegList(spill_list);
-
- if (name.is(x0)) {
- DCHECK(!elements.is(x1));
- __ Mov(x1, name);
- __ Mov(x0, elements);
- } else {
- __ Mov(x0, elements);
- __ Mov(x1, name);
- }
-
- Label not_found;
- NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ Cbz(x0, &not_found);
- __ Mov(scratch2, x2); // Move entry index into scratch2.
- __ PopCPURegList(spill_list);
- __ B(done);
-
- __ Bind(&not_found);
- __ PopCPURegList(spill_list);
- __ B(miss);
-}
-
-
void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
@@ -3875,702 +3262,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : target
- // -- x3 : new target
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(x1);
- __ AssertReceiver(x3);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object);
-
- // Load the initial map and verify that it's in fact a map.
- __ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(x2, &new_object);
- __ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object);
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset));
- __ CompareAndBranch(x0, x1, ne, &new_object);
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset));
- __ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS);
- __ Bind(&done_allocate);
-
- // Initialize the JSObject fields.
- STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
- __ Str(x2, FieldMemOperand(x0, JSObject::kMapOffset));
- __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
- STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
- STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
- __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
- __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ Add(x1, x0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
- // ----------- S t a t e -------------
- // -- x0 : result (tagged)
- // -- x1 : result fields (untagged)
- // -- x5 : result end (untagged)
- // -- x2 : initial map
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ LoadRoot(x6, Heap::kUndefinedValueRootIndex);
- __ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset));
- __ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask,
- &slack_tracking);
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(x1, x5, x6);
- __ Ret();
- }
- __ Bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift);
- __ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset));
-
- // Initialize the in-object fields with undefined.
- __ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset));
- __ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2));
- __ InitializeFieldsWithFiller(x1, x4, x6);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(x1, x5, x6);
-
- // Check if we can finalize the instance size.
- Label finalize;
- STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
- __ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize);
- __ Ret();
-
- // Finalize the instance size.
- __ Bind(&finalize);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x0, x2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(x0);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ Bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ Mov(x4,
- Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
- __ Push(x2, x4);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(x2);
- }
- __ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
- __ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Sub(x5, x5, kHeapObjectTag); // Subtract the tag from end.
- __ B(&done_allocate);
-
- // Fall back to %NewObject.
- __ Bind(&new_object);
- __ Push(x1, x3);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(x1);
-
- // Make x2 point to the JavaScript frame.
- __ Mov(x2, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
- __ Cmp(x3, x1);
- __ B(eq, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ Bind(&ok);
- }
-
- // Check if we have rest parameters (only possible if we have an
- // arguments adaptor frame below the function frame).
- Label no_rest_parameters;
- __ Ldr(x2, MemOperand(x2, CommonFrameConstants::kCallerFPOffset));
- __ Ldr(x3, MemOperand(x2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &no_rest_parameters);
-
- // Check if the arguments adaptor frame contains more arguments than
- // specified by the function's internal formal parameter count.
- Label rest_parameters;
- __ Ldrsw(x0, UntagSmiMemOperand(
- x2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(
- x3, FieldMemOperand(x3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Subs(x0, x0, x3);
- __ B(gt, &rest_parameters);
-
- // Return an empty rest parameter array.
- __ Bind(&no_rest_parameters);
- {
- // ----------- S t a t e -------------
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Allocate an empty rest parameter array.
- Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, NO_ALLOCATION_FLAGS);
- __ Bind(&done_allocate);
-
- // Setup the rest parameter array in x0.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
- __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
- __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
- __ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset));
- __ Mov(x1, Smi::kZero);
- __ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace.
- __ Bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(Smi::FromInt(JSArray::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- }
- __ B(&done_allocate);
- }
-
- __ Bind(&rest_parameters);
- {
- // Compute the pointer to the first rest parameter (skippping the receiver).
- __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
- __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- x0 : number of rest parameters
- // -- x1 : function
- // -- x2 : pointer to first rest parameters
- // -- lr : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ Mov(x6, JSArray::kSize + FixedArray::kHeaderSize);
- __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
- __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
- __ Bind(&done_allocate);
-
- // Compute arguments.length in x6.
- __ SmiTag(x6, x0);
-
- // Setup the elements array in x3.
- __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
- __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
- __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
- __ Add(x4, x3, FixedArray::kHeaderSize);
- {
- Label loop, done_loop;
- __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
- __ Bind(&loop);
- __ Cmp(x4, x0);
- __ B(eq, &done_loop);
- __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
- __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
- __ Sub(x2, x2, Operand(1 * kPointerSize));
- __ Add(x4, x4, Operand(1 * kPointerSize));
- __ B(&loop);
- __ Bind(&done_loop);
- }
-
- // Setup the rest parameter array in x0.
- __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
- __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
- __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
- __ Str(x3, FieldMemOperand(x0, JSArray::kElementsOffset));
- __ Str(x6, FieldMemOperand(x0, JSArray::kLengthOffset));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ Bind(&allocate);
- __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
- __ B(gt, &too_big_for_new_space);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(x0);
- __ SmiTag(x6);
- __ Push(x0, x2, x6);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Mov(x3, x0);
- __ Pop(x2, x0);
- __ SmiUntag(x0);
- }
- __ B(&done_allocate);
-
- // Fall back to %NewRestParameter.
- __ Bind(&too_big_for_new_space);
- __ Push(x1);
- __ TailCallRuntime(Runtime::kNewRestParameter);
- }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(x1);
-
- // Make x6 point to the JavaScript frame.
- __ Mov(x6, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ Ldr(x6, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ Ldr(x3, MemOperand(x6, StandardFrameConstants::kFunctionOffset));
- __ Cmp(x3, x1);
- __ B(eq, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ Bind(&ok);
- }
-
- // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(
- x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Add(x3, x6, Operand(x2, LSL, kPointerSizeLog2));
- __ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ SmiTag(x2);
-
- // x1 : function
- // x2 : number of parameters (tagged)
- // x3 : parameters pointer
- // x6 : JavaScript frame pointer
- //
- // Returns pointer to result object in x0.
-
- // Make an untagged copy of the parameter count.
- // Note: arg_count_smi is an alias of param_count_smi.
- Register function = x1;
- Register arg_count_smi = x2;
- Register param_count_smi = x2;
- Register recv_arg = x3;
- Register param_count = x7;
- __ SmiUntag(param_count, param_count_smi);
-
- // Check if the calling frame is an arguments adaptor frame.
- Register caller_fp = x11;
- Register caller_ctx = x12;
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ Ldr(caller_fp, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(
- caller_ctx,
- MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
-
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped params, min(params, args) (uninit)
- // x7 param_count number of function parameters
- // x11 caller_fp caller's frame pointer
- // x14 arg_count number of function arguments (uninit)
-
- Register arg_count = x14;
- Register mapped_params = x4;
- __ Mov(arg_count, param_count);
- __ Mov(mapped_params, param_count);
- __ B(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ Bind(&adaptor_frame);
- __ Ldr(arg_count_smi,
- MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(arg_count, arg_count_smi);
- __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
- __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
-
- // Compute the mapped parameter count = min(param_count, arg_count)
- __ Cmp(param_count, arg_count);
- __ Csel(mapped_params, param_count, arg_count, lt);
-
- __ Bind(&try_allocate);
-
- // x0 alloc_obj pointer to allocated objects: param map, backing
- // store, arguments (uninit)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x7 param_count number of function parameters
- // x10 size size of objects to allocate (uninit)
- // x14 arg_count number of function arguments
-
- // Compute the size of backing store, parameter map, and arguments object.
- // 1. Parameter map, has two extra words containing context and backing
- // store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
-
- // Calculate the parameter map size, assuming it exists.
- Register size = x10;
- __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
- __ Add(size, size, kParameterMapHeaderSize);
-
- // If there are no mapped parameters, set the running size total to zero.
- // Otherwise, use the parameter map size calculated earlier.
- __ Cmp(mapped_params, 0);
- __ CzeroX(size, eq);
-
- // 2. Add the size of the backing store and arguments object.
- __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
- __ Add(size, size, FixedArray::kHeaderSize + JSSloppyArgumentsObject::kSize);
-
- // Do the allocation of all three objects in one go. Assign this to x0, as it
- // will be returned to the caller.
- Register alloc_obj = x0;
- __ Allocate(size, alloc_obj, x11, x12, &runtime, NO_ALLOCATION_FLAGS);
-
- // Get the arguments boilerplate from the current (global) context.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x7 param_count number of function parameters
- // x11 sloppy_args_map offset to args (or aliased args) map (uninit)
- // x14 arg_count number of function arguments
-
- Register global_ctx = x10;
- Register sloppy_args_map = x11;
- Register aliased_args_map = x10;
- __ Ldr(global_ctx, NativeContextMemOperand());
-
- __ Ldr(sloppy_args_map,
- ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Ldr(
- aliased_args_map,
- ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
- __ Cmp(mapped_params, 0);
- __ CmovX(sloppy_args_map, aliased_args_map, ne);
-
- // Copy the JS object part.
- __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
- __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
- __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- __ AssertNotSmi(function);
- __ Str(function,
- FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kCalleeOffset));
-
- // Use the length and set that as an in-object property.
- __ Str(arg_count_smi,
- FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, "elements" will point there, otherwise
- // it will point to the backing store.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x5 elements pointer to parameter map or backing store (uninit)
- // x6 backing_store pointer to backing store (uninit)
- // x7 param_count number of function parameters
- // x14 arg_count number of function arguments
-
- Register elements = x5;
- __ Add(elements, alloc_obj, JSSloppyArgumentsObject::kSize);
- __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ Cmp(mapped_params, 0);
- // Set up backing store address, because it is needed later for filling in
- // the unmapped arguments.
- Register backing_store = x6;
- __ CmovX(backing_store, elements, eq);
- __ B(eq, &skip_parameter_map);
-
- __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
- __ Add(x10, mapped_params, 2);
- __ SmiTag(x10);
- __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Str(cp, FieldMemOperand(elements,
- FixedArray::kHeaderSize + 0 * kPointerSize));
- __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
- __ Add(x10, x10, kParameterMapHeaderSize);
- __ Str(x10, FieldMemOperand(elements,
- FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. Then index the context,
- // where parameters are stored in reverse order, at:
- //
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
- //
- // The mapped parameter thus needs to get indices:
- //
- // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
- // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
- //
- // We loop from right to left.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x5 elements pointer to parameter map or backing store (uninit)
- // x6 backing_store pointer to backing store (uninit)
- // x7 param_count number of function parameters
- // x11 loop_count parameter loop counter (uninit)
- // x12 index parameter index (smi, uninit)
- // x13 the_hole hole value (uninit)
- // x14 arg_count number of function arguments
-
- Register loop_count = x11;
- Register index = x12;
- Register the_hole = x13;
- Label parameters_loop, parameters_test;
- __ Mov(loop_count, mapped_params);
- __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
- __ Sub(index, index, mapped_params);
- __ SmiTag(index);
- __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
- __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
- __ Add(backing_store, backing_store, kParameterMapHeaderSize);
-
- __ B(&parameters_test);
-
- __ Bind(&parameters_loop);
- __ Sub(loop_count, loop_count, 1);
- __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
- __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
- __ Str(index, MemOperand(elements, x10));
- __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
- __ Str(the_hole, MemOperand(backing_store, x10));
- __ Add(index, index, Smi::FromInt(1));
- __ Bind(&parameters_test);
- __ Cbnz(loop_count, &parameters_loop);
-
- __ Bind(&skip_parameter_map);
- // Copy arguments header and remaining slots (if there are any.)
- __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
- __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
- __ Str(arg_count_smi, FieldMemOperand(backing_store,
- FixedArray::kLengthOffset));
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x6 backing_store pointer to backing store (uninit)
- // x14 arg_count number of function arguments
-
- Label arguments_loop, arguments_test;
- __ Mov(x10, mapped_params);
- __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
- __ B(&arguments_test);
-
- __ Bind(&arguments_loop);
- __ Sub(recv_arg, recv_arg, kPointerSize);
- __ Ldr(x11, MemOperand(recv_arg));
- __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
- __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
- __ Add(x10, x10, 1);
-
- __ Bind(&arguments_test);
- __ Cmp(x10, arg_count);
- __ B(lt, &arguments_loop);
-
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ Bind(&runtime);
- __ Push(function, recv_arg, arg_count_smi);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : function
- // -- cp : context
- // -- fp : frame pointer
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(x1);
-
- // Make x2 point to the JavaScript frame.
- __ Mov(x2, fp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
- __ Cmp(x3, x1);
- __ B(eq, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ Bind(&ok);
- }
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &arguments_adaptor);
- {
- __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(x0, FieldMemOperand(
- x4, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
- __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
- }
- __ B(&arguments_done);
- __ Bind(&arguments_adaptor);
- {
- __ Ldrsw(x0, UntagSmiMemOperand(
- x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Add(x2, x3, Operand(x0, LSL, kPointerSizeLog2));
- __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
- }
- __ Bind(&arguments_done);
-
- // ----------- S t a t e -------------
- // -- cp : context
- // -- x0 : number of rest parameters
- // -- x1 : function
- // -- x2 : pointer to first rest parameters
- // -- lr : return address
- // -----------------------------------
-
- // Allocate space for the strict arguments object plus the backing store.
- Label allocate, done_allocate;
- __ Mov(x6, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
- __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
- __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
- __ Bind(&done_allocate);
-
- // Compute arguments.length in x6.
- __ SmiTag(x6, x0);
-
- // Setup the elements array in x3.
- __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
- __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
- __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
- __ Add(x4, x3, FixedArray::kHeaderSize);
- {
- Label loop, done_loop;
- __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
- __ Bind(&loop);
- __ Cmp(x4, x0);
- __ B(eq, &done_loop);
- __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
- __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
- __ Sub(x2, x2, Operand(1 * kPointerSize));
- __ Add(x4, x4, Operand(1 * kPointerSize));
- __ B(&loop);
- __ Bind(&done_loop);
- }
-
- // Setup the strict arguments object in x0.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, x1);
- __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kMapOffset));
- __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kPropertiesOffset));
- __ Str(x3, FieldMemOperand(x0, JSStrictArgumentsObject::kElementsOffset));
- __ Str(x6, FieldMemOperand(x0, JSStrictArgumentsObject::kLengthOffset));
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ Bind(&allocate);
- __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
- __ B(gt, &too_big_for_new_space);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(x0);
- __ SmiTag(x6);
- __ Push(x0, x2, x6);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Mov(x3, x0);
- __ Pop(x2, x0);
- __ SmiUntag(x0);
- }
- __ B(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ Bind(&too_big_for_new_space);
- __ Push(x1);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
// The number of register that CallApiFunctionAndReturn will need to save on
// the stack. The space for these registers need to be allocated in the
// ExitFrame before calling CallApiFunctionAndReturn.