aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2010-07-22 14:51:16 +0100
committerBen Murdoch <benm@google.com>2010-07-22 14:51:16 +0100
commit3bec4d28b1f388dbc06a9c4276e1a03e86c52b04 (patch)
tree538bb9cb5e3664733f56ba3292342ccc426eb9f9 /src
parent2794f167cd167a39859e9be5be3b05bdb5feb10a (diff)
downloadv8-3bec4d28b1f388dbc06a9c4276e1a03e86c52b04.tar.gz
Update V8 to r5091 as required by WebKit r63859.
Change-Id: I8e35d765e6f6c7f89eccff900e1cabe2d5dd6110
Diffstat (limited to 'src')
-rw-r--r--src/accessors.cc4
-rw-r--r--src/api.cc73
-rw-r--r--src/arm/assembler-arm.cc108
-rw-r--r--src/arm/assembler-arm.h20
-rw-r--r--src/arm/codegen-arm.cc217
-rw-r--r--src/arm/codegen-arm.h1
-rw-r--r--src/arm/constants-arm.cc20
-rw-r--r--src/arm/constants-arm.h3
-rw-r--r--src/arm/disasm-arm.cc14
-rw-r--r--src/arm/full-codegen-arm.cc36
-rw-r--r--src/arm/macro-assembler-arm.cc82
-rw-r--r--src/arm/macro-assembler-arm.h18
-rw-r--r--src/arm/regexp-macro-assembler-arm.cc1
-rw-r--r--src/arm/simulator-arm.cc9
-rw-r--r--src/arm/stub-cache-arm.cc305
-rw-r--r--src/bootstrapper.cc3
-rw-r--r--src/builtins.cc2
-rw-r--r--src/code-stubs.cc6
-rw-r--r--src/codegen.cc4
-rw-r--r--src/codegen.h29
-rwxr-xr-xsrc/compiler.cc36
-rw-r--r--src/contexts.cc22
-rw-r--r--src/debug.cc118
-rw-r--r--src/debug.h29
-rw-r--r--src/factory.cc11
-rw-r--r--src/factory.h9
-rw-r--r--src/frames.cc5
-rw-r--r--src/full-codegen.cc2
-rw-r--r--src/full-codegen.h1
-rw-r--r--src/globals.h2
-rw-r--r--src/heap-profiler.cc7
-rw-r--r--src/heap-profiler.h16
-rw-r--r--src/heap.cc470
-rw-r--r--src/heap.h24
-rw-r--r--src/ia32/assembler-ia32.cc1
-rw-r--r--src/ia32/codegen-ia32.cc351
-rw-r--r--src/ia32/codegen-ia32.h12
-rw-r--r--src/ia32/full-codegen-ia32.cc27
-rw-r--r--src/ia32/regexp-macro-assembler-ia32.cc1
-rw-r--r--src/ia32/stub-cache-ia32.cc131
-rw-r--r--src/ic.cc5
-rw-r--r--src/json.js2
-rw-r--r--src/macros.py3
-rw-r--r--src/mark-compact.cc10
-rw-r--r--src/messages.cc11
-rw-r--r--src/messages.h3
-rw-r--r--src/messages.js11
-rw-r--r--src/mips/codegen-mips.cc5
-rw-r--r--src/mips/codegen-mips.h1
-rw-r--r--src/objects-debug.cc1
-rw-r--r--src/objects-inl.h34
-rw-r--r--src/objects.cc46
-rw-r--r--src/objects.h43
-rw-r--r--src/parser.cc4
-rw-r--r--src/platform-openbsd.cc6
-rw-r--r--src/profile-generator.cc310
-rw-r--r--src/profile-generator.h128
-rw-r--r--src/runtime.cc84
-rw-r--r--src/runtime.h1
-rw-r--r--src/runtime.js26
-rwxr-xr-xsrc/scanner.cc15
-rw-r--r--src/scopeinfo.cc326
-rw-r--r--src/scopeinfo.h119
-rw-r--r--src/serialize.cc10
-rw-r--r--src/stub-cache.cc2
-rw-r--r--src/stub-cache.h20
-rw-r--r--src/top.cc53
-rw-r--r--src/top.h10
-rw-r--r--src/v8natives.js110
-rw-r--r--src/version.cc4
-rw-r--r--src/vm-state-inl.h9
-rw-r--r--src/vm-state.cc2
-rw-r--r--src/vm-state.h10
-rw-r--r--src/x64/assembler-x64.cc1
-rw-r--r--src/x64/codegen-x64.cc9750
-rw-r--r--src/x64/codegen-x64.h12
-rw-r--r--src/x64/full-codegen-x64.cc26
-rw-r--r--src/x64/macro-assembler-x64.cc95
-rw-r--r--src/x64/macro-assembler-x64.h18
-rw-r--r--src/x64/regexp-macro-assembler-x64.cc1
-rw-r--r--src/x64/stub-cache-x64.cc303
81 files changed, 7658 insertions, 6202 deletions
diff --git a/src/accessors.cc b/src/accessors.cc
index e41db947..ed0bbd7a 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -549,8 +549,8 @@ Object* Accessors::FunctionGetArguments(Object* object, void*) {
if (frame->function() != *function) continue;
// If there is an arguments variable in the stack, we return that.
- int index = ScopeInfo<>::StackSlotIndex(frame->code(),
- Heap::arguments_symbol());
+ int index = function->shared()->scope_info()->
+ StackSlotIndex(Heap::arguments_symbol());
if (index >= 0) {
Handle<Object> arguments = Handle<Object>(frame->GetExpression(index));
if (!arguments->IsTheHole()) return *arguments;
diff --git a/src/api.cc b/src/api.cc
index 0f64dd45..48c64b31 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1438,6 +1438,22 @@ v8::Handle<Value> Message::GetScriptData() const {
}
+v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
+ if (IsDeadCheck("v8::Message::GetStackTrace()")) {
+ return Local<v8::StackTrace>();
+ }
+ ENTER_V8;
+ HandleScope scope;
+ i::Handle<i::JSObject> obj =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::Object> stackFramesObj = GetProperty(obj, "stackFrames");
+ if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
+ i::Handle<i::JSArray> stackTrace =
+ i::Handle<i::JSArray>::cast(stackFramesObj);
+ return scope.Close(Utils::StackTraceToLocal(stackTrace));
+}
+
+
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> recv,
int argc,
@@ -1583,7 +1599,9 @@ Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
StackTraceOptions options) {
if (IsDeadCheck("v8::StackTrace::CurrentStackTrace()")) Local<StackTrace>();
ENTER_V8;
- return i::Top::CaptureCurrentStackTrace(frame_limit, options);
+ i::Handle<i::JSArray> stackTrace =
+ i::Top::CaptureCurrentStackTrace(frame_limit, options);
+ return Utils::StackTraceToLocal(stackTrace);
}
@@ -3782,6 +3800,17 @@ void V8::RemoveMessageListeners(MessageCallback that) {
}
+void V8::SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options) {
+ i::Top::SetCaptureStackTraceForUncaughtExceptions(
+ capture,
+ frame_limit,
+ options);
+}
+
+
void V8::SetCounterFunction(CounterLookupCallback callback) {
if (IsDeadCheck("v8::V8::SetCounterFunction()")) return;
i::StatsTable::SetCounterFunction(callback);
@@ -4184,6 +4213,12 @@ void Debug::DebugBreak() {
}
+void Debug::DebugBreakForCommand(ClientData* data) {
+ if (!i::V8::IsRunning()) return;
+ i::Debugger::EnqueueDebugCommand(data);
+}
+
+
static v8::Debug::MessageHandler message_handler = NULL;
static void MessageHandlerWrapper(const v8::Debug::Message& message) {
@@ -4526,6 +4561,12 @@ Handle<String> HeapGraphNode::GetName() const {
}
+uint64_t HeapGraphNode::GetId() const {
+ IsDeadCheck("v8::HeapGraphNode::GetId");
+ return reinterpret_cast<const i::HeapEntry*>(this)->id();
+}
+
+
int HeapGraphNode::GetSelfSize() const {
IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
return reinterpret_cast<const i::HeapEntry*>(this)->self_size();
@@ -4589,6 +4630,22 @@ const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
}
+const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
+ IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot");
+ const i::HeapSnapshotsDiff* diff =
+ reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
+ return reinterpret_cast<const HeapGraphNode*>(diff->additions_root());
+}
+
+
+const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
+ IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot");
+ const i::HeapSnapshotsDiff* diff =
+ reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
+ return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root());
+}
+
+
unsigned HeapSnapshot::GetUid() const {
IsDeadCheck("v8::HeapSnapshot::GetUid");
return reinterpret_cast<const i::HeapSnapshot*>(this)->uid();
@@ -4604,7 +4661,7 @@ Handle<String> HeapSnapshot::GetTitle() const {
}
-const HeapGraphNode* HeapSnapshot::GetHead() const {
+const HeapGraphNode* HeapSnapshot::GetRoot() const {
IsDeadCheck("v8::HeapSnapshot::GetHead");
const i::HeapSnapshot* snapshot =
reinterpret_cast<const i::HeapSnapshot*>(this);
@@ -4612,6 +4669,18 @@ const HeapGraphNode* HeapSnapshot::GetHead() const {
}
+const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
+ const HeapSnapshot* snapshot) const {
+ IsDeadCheck("v8::HeapSnapshot::CompareWith");
+ i::HeapSnapshot* snapshot1 = const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(this));
+ i::HeapSnapshot* snapshot2 = const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(snapshot));
+ return reinterpret_cast<const HeapSnapshotsDiff*>(
+ snapshot1->CompareWith(snapshot2));
+}
+
+
int HeapProfiler::GetSnapshotsCount() {
IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount");
return i::HeapProfiler::GetSnapshotsCount();
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index f5ff43a6..0dc6b771 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1801,11 +1801,119 @@ void Assembler::vstr(const DwVfpRegister src,
}
+static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
+ uint64_t i;
+ memcpy(&i, &d, 8);
+
+ *lo = i & 0xffffffff;
+ *hi = i >> 32;
+}
+
+// Only works for little endian floating point formats.
+// We don't support VFP on the mixed endian floating point platform.
+static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+
+ // VMOV can accept an immediate of the form:
+ //
+ // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
+ //
+ // The immediate is encoded using an 8-bit quantity, comprised of two
+ // 4-bit fields. For an 8-bit immediate of the form:
+ //
+ // [abcdefgh]
+ //
+ // where a is the MSB and h is the LSB, an immediate 64-bit double can be
+ // created of the form:
+ //
+ // [aBbbbbbb,bbcdefgh,00000000,00000000,
+ // 00000000,00000000,00000000,00000000]
+ //
+ // where B = ~b.
+ //
+
+ uint32_t lo, hi;
+ DoubleAsTwoUInt32(d, &lo, &hi);
+
+ // The most obvious constraint is the long block of zeroes.
+ if ((lo != 0) || ((hi & 0xffff) != 0)) {
+ return false;
+ }
+
+ // Bits 62:55 must be all clear or all set.
+ if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
+ return false;
+ }
+
+ // Bit 63 must be NOT bit 62.
+ if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
+ return false;
+ }
+
+ // Create the encoded immediate in the form:
+ // [00000000,0000abcd,00000000,0000efgh]
+ *encoding = (hi >> 16) & 0xf; // Low nybble.
+ *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
+ *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
+
+ return true;
+}
+
+
+void Assembler::vmov(const DwVfpRegister dst,
+ double imm,
+ const Condition cond) {
+ // Dd = immediate
+ // Instruction details available in ARM DDI 0406B, A8-640.
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+
+ uint32_t enc;
+ if (FitsVMOVDoubleImmediate(imm, &enc)) {
+ // The double can be encoded in the instruction.
+ emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
+ } else {
+ // Synthesise the double from ARM immediates. This could be implemented
+ // using vldr from a constant pool.
+ uint32_t lo, hi;
+ DoubleAsTwoUInt32(imm, &lo, &hi);
+
+ if (lo == hi) {
+ // If the lo and hi parts of the double are equal, the literal is easier
+ // to create. This is the case with 0.0.
+ mov(ip, Operand(lo));
+ vmov(dst, ip, ip);
+ } else {
+ // Move the low part of the double into the lower of the corresponsing S
+ // registers of D register dst.
+ mov(ip, Operand(lo));
+ vmov(dst.low(), ip, cond);
+
+ // Move the high part of the double into the higher of the corresponsing S
+ // registers of D register dst.
+ mov(ip, Operand(hi));
+ vmov(dst.high(), ip, cond);
+ }
+ }
+}
+
+
+void Assembler::vmov(const SwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond) {
+ // Sd = Sm
+ // Instruction details available in ARM DDI 0406B, A8-642.
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0xB*B20 |
+ dst.code()*B12 | 0x5*B9 | B6 | src.code());
+}
+
+
void Assembler::vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
// Dd = Dm
// Instruction details available in ARM DDI 0406B, A8-642.
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xB*B20 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
}
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 6a4fb23e..226fb874 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -130,6 +130,20 @@ struct DwVfpRegister {
// Supporting d0 to d15, can be later extended to d31.
bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ SwVfpRegister low() const {
+ SwVfpRegister reg;
+ reg.code_ = code_ * 2;
+
+ ASSERT(reg.is_valid());
+ return reg;
+ }
+ SwVfpRegister high() const {
+ SwVfpRegister reg;
+ reg.code_ = (code_ * 2) + 1;
+
+ ASSERT(reg.is_valid());
+ return reg;
+ }
int code() const {
ASSERT(is_valid());
return code_;
@@ -932,6 +946,12 @@ class Assembler : public Malloced {
const Condition cond = al);
void vmov(const DwVfpRegister dst,
+ double imm,
+ const Condition cond = al);
+ void vmov(const SwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond = al);
+ void vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index fa6efcd3..0b2081bd 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -54,11 +54,15 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Condition cc,
bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
Label* lhs_not_nan,
Label* slow,
bool strict);
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
static void MultiplyByKnownInt(MacroAssembler* masm,
Register source,
Register destination,
@@ -1404,11 +1408,7 @@ void CodeGenerator::Comparison(Condition cc,
// Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack.
- if (!rhs.is(r0)) {
- __ Swap(rhs, lhs, ip);
- }
-
- CompareStub stub(cc, strict);
+ CompareStub stub(cc, strict, kBothCouldBeNaN, true, lhs, rhs);
frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0));
exit.Jump();
@@ -4343,9 +4343,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
__ bind(&powi);
// Load 1.0 into d0.
- __ mov(scratch2, Operand(0x3ff00000));
- __ mov(scratch1, Operand(0));
- __ vmov(d0, scratch1, scratch2);
+ __ vmov(d0, 1.0);
// Get the absolute untagged value of the exponent and use that for the
// calculation.
@@ -4405,9 +4403,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
AVOID_NANS_AND_INFINITIES);
// Load 1.0 into d2.
- __ mov(scratch2, Operand(0x3ff00000));
- __ mov(scratch1, Operand(0));
- __ vmov(d2, scratch1, scratch2);
+ __ vmov(d2, 1.0);
// Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x).
__ vdiv(d0, d2, d0);
@@ -4764,6 +4760,24 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
+ // typeof(arg) == function).
+ // It includes undetectable objects (as opposed to IsObject).
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Register value = frame_->PopToRegister();
+ __ tst(value, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+ // Check that this is an object.
+ __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
+ __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
+ cc_reg_ = ge;
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@@ -4874,12 +4888,8 @@ void CodeGenerator::GenerateRandomHeapNumber(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ push(r0);
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r4, Operand(r0));
__ bind(&heapnumber_allocated);
@@ -6976,7 +6986,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// undefined >= undefined should fail.
__ mov(r0, Operand(LESS));
}
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
}
}
}
@@ -6990,7 +7000,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
} else {
__ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
}
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
if (cc != eq || !never_nan_nan) {
// For less and greater we don't have to check for NaN since the result of
@@ -7022,14 +7032,14 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// value if it's a NaN.
if (cc != eq) {
// All-zero means Infinity means equal.
- __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
+ __ Ret(eq);
if (cc == le) {
__ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
} else {
__ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
}
}
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
}
// No fall through here.
}
@@ -7040,43 +7050,50 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// See comment at call site.
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
Label* lhs_not_nan,
Label* slow,
bool strict) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
Label rhs_is_smi;
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(rhs, Operand(kSmiTagMask));
__ b(eq, &rhs_is_smi);
// Lhs is a Smi. Check whether the rhs is a heap number.
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
// If rhs is not a number and lhs is a Smi then strict equality cannot
- // succeed. Return non-equal (r0 is already not zero)
- __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
+ // succeed. Return non-equal
+ // If rhs is r0 then there is already a non zero value in it.
+ if (!rhs.is(r0)) {
+ __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
+ }
+ __ Ret(ne);
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
__ b(ne, slow);
}
- // Lhs (r1) is a smi, rhs (r0) is a number.
+ // Lhs is a smi, rhs is a number.
if (CpuFeatures::IsSupported(VFP3)) {
- // Convert lhs to a double in d7 .
+ // Convert lhs to a double in d7.
CpuFeatures::Scope scope(VFP3);
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
+ __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
// Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
} else {
__ push(lr);
// Convert lhs to a double in r2, r3.
- __ mov(r7, Operand(r1));
+ __ mov(r7, Operand(lhs));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
// Load rhs to a double in r0, r1.
- __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ pop(lr);
}
@@ -7086,34 +7103,35 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ bind(&rhs_is_smi);
// Rhs is a smi. Check whether the non-smi lhs is a heap number.
- __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
// If lhs is not a number and rhs is a smi then strict equality cannot
// succeed. Return non-equal.
- __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
- __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
+ // If lhs is r0 then there is already a non zero value in it.
+ if (!lhs.is(r0)) {
+ __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
+ }
+ __ Ret(ne);
} else {
// Smi compared non-strictly with a non-smi non-heap-number. Call
// the runtime.
__ b(ne, slow);
}
- // Rhs (r0) is a smi, lhs (r1) is a heap number.
+ // Rhs is a smi, lhs is a heap number.
if (CpuFeatures::IsSupported(VFP3)) {
- // Convert rhs to a double in d6 .
CpuFeatures::Scope scope(VFP3);
// Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ sub(r7, lhs, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
+ // Convert rhs to a double in d6 .
+ __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
} else {
__ push(lr);
// Load lhs to a double in r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
// Convert rhs to a double in r0, r1.
- __ mov(r7, Operand(r0));
+ __ mov(r7, Operand(rhs));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
@@ -7167,7 +7185,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
} else {
__ mov(r0, Operand(LESS));
}
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
__ bind(&neither_is_nan);
}
@@ -7188,11 +7206,11 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
__ cmp(rhs_mantissa, Operand(lhs_mantissa));
__ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
// Return non-zero if the numbers are unequal.
- __ mov(pc, Operand(lr), LeaveCC, ne);
+ __ Ret(ne);
__ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
// If exponents are equal then return 0.
- __ mov(pc, Operand(lr), LeaveCC, eq);
+ __ Ret(eq);
// Exponents are unequal. The only way we can return that the numbers
// are equal is if one is -0 and the other is 0. We already dealt
@@ -7202,11 +7220,11 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
// equal.
__ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
__ mov(r0, Operand(r4), LeaveCC, ne);
- __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
+ __ Ret(ne);
// Now they are equal if and only if the lhs exponent is zero in its
// low 31 bits.
__ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
- __ mov(pc, Operand(lr));
+ __ Ret();
} else {
// Call a native function to do a comparison between two non-NaNs.
// Call C routine that may not cause GC or other trouble.
@@ -7219,7 +7237,12 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
// See comment at call site.
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
// If either operand is a JSObject or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
@@ -7227,20 +7250,20 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_JS_OBJECT_TYPE.
- __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
__ b(lt, &first_non_object);
// Return non-zero (r0 is not zero)
Label return_not_equal;
__ bind(&return_not_equal);
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
__ cmp(r2, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
- __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
__ b(ge, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -7259,12 +7282,17 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
// See comment at call site.
static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
Label* both_loaded_as_doubles,
Label* not_heap_numbers,
Label* slow) {
- __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
__ b(ne, not_heap_numbers);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
__ cmp(r2, r3);
__ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
@@ -7272,13 +7300,13 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// for that.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ sub(r7, lhs, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
} else {
- __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
}
__ jmp(both_loaded_as_doubles);
}
@@ -7286,9 +7314,14 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Fast negative check for symbol-to-symbol equality.
static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
Label* possible_strings,
Label* not_both_strings) {
- // r2 is object type of r0.
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ // r2 is object type of rhs.
// Ensure that no non-strings have the symbol bit set.
Label object_test;
ASSERT(kSymbolTag != 0);
@@ -7296,31 +7329,31 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
__ b(ne, &object_test);
__ tst(r2, Operand(kIsSymbolMask));
__ b(eq, possible_strings);
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+ __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, not_both_strings);
__ tst(r3, Operand(kIsSymbolMask));
__ b(eq, possible_strings);
// Both are symbols. We already checked they weren't the same pointer
// so they are not equal.
- __ mov(r0, Operand(1)); // Non-zero indicates not equal.
- __ mov(pc, Operand(lr)); // Return.
+ __ mov(r0, Operand(NOT_EQUAL));
+ __ Ret();
__ bind(&object_test);
__ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, not_both_strings);
- __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, not_both_strings);
- // If both objects are undetectable, they are equal. Otherwise, they
+ // If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
// equal to undefined.
- __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
__ and_(r0, r2, Operand(r3));
__ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
__ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
}
@@ -7442,10 +7475,13 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
}
-// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
+// On entry lhs_ and rhs_ are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
@@ -7460,7 +7496,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// be strictly equal if the other is a HeapNumber.
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, r0, Operand(r1));
+ __ and_(r2, lhs_, Operand(rhs_));
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -7472,7 +7508,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// comparison. If VFP3 is supported the double values of the numbers have
// been loaded into d7 and d6. Otherwise, the double values have been loaded
// into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
@@ -7489,7 +7525,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ mov(r0, Operand(EQUAL), LeaveCC, eq);
__ mov(r0, Operand(LESS), LeaveCC, lt);
__ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ mov(pc, Operand(lr));
+ __ Ret();
__ bind(&nan);
// If one of the sides was a NaN then the v flag is set. Load r0 with
@@ -7500,7 +7536,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
} else {
__ mov(r0, Operand(LESS));
}
- __ mov(pc, Operand(lr));
+ __ Ret();
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds lhs_not_nan.
@@ -7512,11 +7548,11 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
- // and neither of them is a Smi. The objects are in r0 and r1.
+ // and neither of them is a Smi. The objects are in rhs_ and lhs_.
if (strict_) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm);
+ EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
}
Label check_for_symbols;
@@ -7524,8 +7560,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
// that case. If the inputs are not doubles then jumps to check_for_symbols.
- // In this case r2 will contain the type of r0. Never falls through.
+ // In this case r2 will contain the type of rhs_. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
+ lhs_,
+ rhs_,
&both_loaded_as_doubles,
&check_for_symbols,
&flat_string_check);
@@ -7536,20 +7574,20 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (cc_ == eq && !strict_) {
// Returns an answer for two symbols or two detectable objects.
// Otherwise jumps to string case or not both strings case.
- // Assumes that r2 is the type of r0 on entry.
- EmitCheckForSymbolsOrObjects(masm, &flat_string_check, &slow);
+ // Assumes that r2 is the type of rhs_ on entry.
+ EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
__ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- r1,
- r0,
+ lhs_,
+ rhs_,
r2,
r3,
r4,
@@ -7558,7 +7596,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
- __ Push(r1, r0);
+ __ Push(lhs_, rhs_);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
if (cc_ == eq) {
@@ -10059,6 +10097,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
@@ -10075,6 +10116,9 @@ const char* CompareStub::GetName() {
default: cc_name = "UnknownCondition"; break;
}
+ const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
+ const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
+
const char* strict_name = "";
if (strict_ && (cc_ == eq || cc_ == ne)) {
strict_name = "_STRICT";
@@ -10091,8 +10135,10 @@ const char* CompareStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s",
+ "CompareStub_%s%s%s%s%s%s",
cc_name,
+ lhs_name,
+ rhs_name,
strict_name,
never_nan_nan_name,
include_number_compare_name);
@@ -10104,8 +10150,11 @@ int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the
// condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 13));
+ ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
+ | RegisterField::encode(lhs_.is(r0))
| StrictField::encode(strict_)
| NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
| IncludeNumberCompareField::encode(include_number_compare_);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 855723d9..83685d88 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -475,6 +475,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 002e4c13..3df7b4e0 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -37,6 +37,26 @@ namespace arm {
namespace v8i = v8::internal;
+double Instr::DoubleImmedVmov() const {
+ // Reconstruct a double from the immediate encoded in the vmov instruction.
+ //
+ // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
+ // double: [aBbbbbbb,bbcdefgh,00000000,00000000,
+ // 00000000,00000000,00000000,00000000]
+ //
+ // where B = ~b. Only the high 16 bits are affected.
+ uint64_t high16;
+ high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
+ high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
+ high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
+ high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
+
+ uint64_t imm = high16 << 48;
+ double d;
+ memcpy(&d, &imm, 8);
+ return d;
+}
+
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index fa9adbd7..2ac9a413 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -333,6 +333,9 @@ class Instr {
inline bool HasH() const { return HField() == 1; }
inline bool HasLink() const { return LinkField() == 1; }
+ // Decoding the double immediate in the vmov instruction.
+ double DoubleImmedVmov() const;
+
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instr.
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index a52417be..37401ed2 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -412,6 +412,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
PrintCondition(instr);
return 4;
}
+ case 'd': { // 'd: vmov double immediate.
+ double d = instr->DoubleImmedVmov();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%g", d);
+ return 1;
+ }
case 'f': { // 'f: bitfield instructions - v7 and above.
uint32_t lsbit = instr->Bits(11, 7);
uint32_t width = instr->Bits(20, 16) + 1;
@@ -1052,7 +1058,7 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
if (instr->SzField() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'Dm");
} else {
- Unknown(instr); // Not used by V8.
+ Format(instr, "vmov.f32'cond 'Sd, 'Sm");
}
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
@@ -1066,6 +1072,12 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
DecodeVCMP(instr);
} else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
+ } else if (instr->Opc3Field() == 0x0) {
+ if (instr->SzField() == 0x1) {
+ Format(instr, "vmov.f64'cond 'Dd, 'd");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
} else {
Unknown(instr); // Not used by V8.
}
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 080cb833..7d90ed9c 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -822,8 +822,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// the smi vs. smi case to be handled before it is called.
Label slow_case;
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
- __ mov(r2, r1);
- __ orr(r2, r2, r0);
+ __ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow_case);
__ cmp(r1, r0);
@@ -832,9 +831,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ b(clause->body_target()->entry_label());
__ bind(&slow_case);
- CompareStub stub(eq, true);
+ CompareStub stub(eq, true, kBothCouldBeNaN, true, r1, r0);
__ CallStub(&stub);
- __ tst(r0, r0);
+ __ cmp(r0, Operand(0));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
@@ -1909,6 +1908,25 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ BranchOnSmi(r0, if_false);
+ __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, if_true);
+ __ b(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2161,12 +2179,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ push(r0);
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r4, Operand(r0));
__ bind(&heapnumber_allocated);
@@ -3092,7 +3106,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ jmp(if_false);
__ bind(&slow_case);
- CompareStub stub(cc, strict);
+ CompareStub stub(cc, strict, kBothCouldBeNaN, true, r1, r0);
__ CallStub(&stub);
__ cmp(r0, Operand(0));
__ b(cc, if_true);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 2896cc96..f251b31f 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -873,88 +873,6 @@ void MacroAssembler::PopTryHandler() {
}
-Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between scratch and the other
- // registers.
- ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- str(reg, MemOperand(sp));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- while (object != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Get the map of the current object.
- ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- cmp(scratch, Operand(Handle<Map>(object->map())));
-
- // Branch on the result of the map check.
- b(ne, miss);
-
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- // Restore scratch register to be the map of the object. In the
- // new space case below, we load the prototype from the map in
- // the scratch register.
- ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
-
- reg = holder_reg; // from now the object is in holder_reg
- JSObject* prototype = JSObject::cast(object->GetPrototype());
- if (Heap::InNewSpace(prototype)) {
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- ldr(reg, FieldMemOperand(scratch, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- mov(reg, Operand(Handle<JSObject>(prototype)));
- }
-
- if (save_at_depth == depth) {
- str(reg, MemOperand(sp));
- }
-
- // Go to the next object in the prototype chain.
- object = prototype;
- }
-
- // Check the holder map.
- ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- cmp(scratch, Operand(Handle<Map>(object->map())));
- b(ne, miss);
-
- // Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object and return
- // the holder register.
- ASSERT(object == holder);
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- }
- return reg;
-}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index f1f7de7f..156e1326 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -316,24 +316,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support
- // Generates code that verifies that the maps of objects in the
- // prototype chain of object hasn't changed since the code was
- // generated and branches to the miss label if any map has. If
- // necessary the function also generates code for security check
- // in case of global object holders. The scratch and holder
- // registers are always clobbered, but the object register is only
- // clobbered if it the same as the holder register. The function
- // returns a register containing the holder - either object_reg or
- // holder_reg.
- // The function can optionally (when save_at_depth !=
- // kInvalidProtoDepth) save the object at the given depth by moving
- // it to [sp].
- Register CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss);
-
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, whereas both scratch registers are clobbered.
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index e8910f48..c67c7aac 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -799,7 +799,6 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
Handle<Code> code = Factory::NewCode(code_desc,
- NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
PROFILE(RegExpCodeCreateEvent(*code, *source));
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index f09ce003..3345e455 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2281,7 +2281,7 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
if (instr->SzField() == 0x1) {
set_d_register_from_double(vd, get_double_from_d_register(vm));
} else {
- UNREACHABLE(); // Not used by V8.
+ set_s_register_from_float(vd, get_float_from_s_register(vm));
}
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
@@ -2298,6 +2298,13 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = sqrt(dm_value);
set_d_register_from_double(vd, dd_value);
+ } else if (instr->Opc3Field() == 0x0) {
+ // vmov immediate.
+ if (instr->SzField() == 0x1) {
+ set_d_register_from_double(vd, instr->DoubleImmedVmov());
+ } else {
+ UNREACHABLE(); // Not used by v8.
+ }
} else {
UNREACHABLE(); // Not used by V8.
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 0e649ccd..ff3007c4 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -83,6 +83,119 @@ static void ProbeTable(MacroAssembler* masm,
}
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsSymbol());
+ __ IncrementCounter(&Counters::negative_lookups, 1, scratch0, scratch1);
+ __ IncrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+ __ b(ne, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ Register tmp = properties;
+ __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+ __ cmp(map, tmp);
+ __ b(ne, miss_label);
+
+ // Restore the temporarily used register.
+ __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up.
+ static const int kProbes = 4;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch1;
+ // Capacity is smi 2^n.
+ __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
+ __ sub(index, index, Operand(1));
+ __ and_(index, index, Operand(
+ Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ Register entity_name = scratch1;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ Register tmp = properties;
+ __ add(tmp, properties, Operand(index, LSL, 1));
+ __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ ASSERT(!tmp.is(entity_name));
+ __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+ __ cmp(entity_name, tmp);
+ if (i != kProbes - 1) {
+ __ b(eq, &done);
+
+ // Stop if found the property.
+ __ cmp(entity_name, Operand(Handle<String>(name)));
+ __ b(eq, miss_label);
+
+ // Check if the entry name is not a symbol.
+ __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ ldrb(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ tst(entity_name, Operand(kIsSymbolMask));
+ __ b(eq, miss_label);
+
+ // Restore the properties.
+ __ ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ } else {
+ // Give up probing if still not found the undefined value.
+ __ b(ne, miss_label);
+ }
+ }
+ __ bind(&done);
+ __ DecrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1);
+}
+
+
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@@ -517,6 +630,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -532,6 +646,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
receiver,
scratch1,
scratch2,
+ scratch3,
holder,
lookup,
name,
@@ -543,6 +658,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
receiver,
scratch1,
scratch2,
+ scratch3,
name,
holder,
miss);
@@ -555,6 +671,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
@@ -596,7 +713,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver,
interceptor_holder, scratch1,
- scratch2, name, depth1, miss);
+ scratch2, scratch3, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -612,7 +729,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
- scratch2, name, depth2, miss);
+ scratch2, scratch3, name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -648,12 +765,13 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
JSObject* interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
+ scratch1, scratch2, scratch3, name,
miss_label);
// Call a runtime function to load the interceptor property.
@@ -738,36 +856,134 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
String* name,
int save_at_depth,
- Label* miss,
- Register extra) {
- // Check that the maps haven't changed.
- Register result =
- masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
- save_at_depth, miss);
+ Label* miss) {
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ __ str(reg, MemOperand(sp));
+ }
+
+ // Check the maps in the prototype chain.
+ // Traverse the prototype chain from the object and do map checks.
+ JSObject* current = object;
+ while (current != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+ JSObject* prototype = JSObject::cast(current->GetPrototype());
+ if (!current->HasFastProperties() &&
+ !current->IsJSGlobalObject() &&
+ !current->IsJSGlobalProxy()) {
+ if (!name->IsSymbol()) {
+ Object* lookup_result = Heap::LookupSymbol(name);
+ if (lookup_result->IsFailure()) {
+ set_failure(Failure::cast(lookup_result));
+ return reg;
+ } else {
+ name = String::cast(lookup_result);
+ }
+ }
+ ASSERT(current->property_dictionary()->FindEntry(name) ==
+ StringDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
+ __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // from now the object is in holder_reg
+ __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // Get the map of the current object.
+ __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ cmp(scratch1, Operand(Handle<Map>(current->map())));
+
+ // Branch on the result of the map check.
+ __ b(ne, miss);
+
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ // Restore scratch register to be the map of the object. In the
+ // new space case below, we load the prototype from the map in
+ // the scratch register.
+ __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ reg = holder_reg; // from now the object is in holder_reg
+ if (Heap::InNewSpace(prototype)) {
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ mov(reg, Operand(Handle<JSObject>(prototype)));
+ }
+ }
+
+ if (save_at_depth == depth) {
+ __ str(reg, MemOperand(sp));
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ }
+
+ // Check the holder map.
+ __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ cmp(scratch1, Operand(Handle<Map>(current->map())));
+ __ b(ne, miss);
+
+ // Log the check depth.
+ LOG(IntEvent("check-maps-depth", depth + 1));
+
+ // Perform security check for access to the global object and return
+ // the holder register.
+ ASSERT(current == holder);
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
- while (object != holder) {
- if (object->IsGlobalObject()) {
+ current = object;
+ while (current != holder) {
+ if (current->IsGlobalObject()) {
Object* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(object),
+ GlobalObject::cast(current),
name,
- scratch,
+ scratch1,
miss);
if (cell->IsFailure()) {
set_failure(Failure::cast(cell));
- return result;
+ return reg;
}
}
- object = JSObject::cast(object->GetPrototype());
+ current = JSObject::cast(current->GetPrototype());
}
// Return the register containing the holder.
- return result;
+ return reg;
}
@@ -776,6 +992,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss) {
@@ -785,7 +1002,8 @@ void StubCompiler::GenerateLoadField(JSObject* object,
// Check that the maps haven't changed.
Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+ CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+ name, miss);
GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
__ Ret();
}
@@ -796,6 +1014,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss) {
@@ -805,7 +1024,8 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
// Check that the maps haven't changed.
Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+ CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
__ mov(r0, Operand(Handle<Object>(value)));
@@ -819,6 +1039,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
@@ -829,7 +1050,8 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
// Check that the maps haven't changed.
Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+ CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+ name, miss);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // Receiver.
@@ -854,6 +1076,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
@@ -881,7 +1104,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3,
+ name, miss);
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
// Save necessary data before invoking an interceptor.
@@ -930,6 +1154,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
lookup->holder(),
scratch1,
scratch2,
+ scratch3,
name,
miss);
}
@@ -975,7 +1200,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3,
+ name, miss);
PushInterceptorArguments(masm(), receiver, holder_reg,
name_reg, interceptor_holder);
@@ -1053,7 +1279,7 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
__ b(eq, &miss);
// Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, r0, holder, r1, r3, name, &miss);
+ Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
GenerateCallFunction(masm(), object, arguments(), &miss);
@@ -1098,7 +1324,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ b(eq, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
+ CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss);
if (object->IsGlobalObject()) {
__ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
@@ -1149,7 +1375,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ b(eq, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
+ CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss);
if (object->IsGlobalObject()) {
__ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
@@ -1246,7 +1472,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
}
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, name,
+ CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
depth, &miss);
// Patch the receiver on the stack with the global proxy if
@@ -1270,7 +1496,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, name, &miss);
+ r1, r4, name, &miss);
}
break;
@@ -1290,7 +1516,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, name, &miss);
+ r1, r4, name, &miss);
}
break;
}
@@ -1313,7 +1539,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, name, &miss);
+ r1, r4, name, &miss);
}
break;
}
@@ -1372,6 +1598,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
r1,
r3,
r4,
+ r0,
&miss);
// Move returned value, the function to call, to r1.
@@ -1418,7 +1645,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
// Check that the maps haven't changed.
- CheckPrototypes(object, r0, holder, r3, r1, name, &miss);
+ CheckPrototypes(object, r0, holder, r3, r1, r4, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
@@ -1642,7 +1869,7 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
__ b(eq, &miss);
// Check the maps of the full prototype chain.
- CheckPrototypes(object, r0, last, r3, r1, name, &miss);
+ CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@@ -1679,7 +1906,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
// -----------------------------------
Label miss;
- GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss);
+ GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1700,7 +1927,7 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Label miss;
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
+ bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1723,7 +1950,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
// -----------------------------------
Label miss;
- GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss);
+ GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1751,6 +1978,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
r2,
r3,
r1,
+ r4,
name,
&miss);
__ bind(&miss);
@@ -1782,7 +2010,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
// Check that the map of the global has not changed.
- CheckPrototypes(object, r0, holder, r3, r4, name, &miss);
+ CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
@@ -1823,7 +2051,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss);
+ GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1847,7 +2075,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ b(ne, &miss);
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
+ bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1873,7 +2101,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss);
+ GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1905,6 +2133,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
r0,
r2,
r3,
+ r4,
name,
&miss);
__ bind(&miss);
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index bbd69eca..e1d4489d 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -812,6 +812,9 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->set_instance_size(
initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_instance_descriptors(*descriptors);
+ initial_map->set_scavenger(
+ Heap::GetScavenger(initial_map->instance_type(),
+ initial_map->instance_size()));
}
{ // -- J S O N
diff --git a/src/builtins.cc b/src/builtins.cc
index ad52ea18..3a0393ef 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1475,7 +1475,7 @@ void Builtins::Setup(bool create_heap_objects) {
// During startup it's OK to always allocate and defer GC to later.
// This simplifies things because we don't need to retry.
AlwaysAllocateScope __scope__;
- code = Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
+ code = Heap::CreateCode(desc, flags, masm.CodeObject());
if (code->IsFailure()) {
v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
}
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 9d5969bb..78062b40 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -102,8 +102,7 @@ Handle<Code> CodeStub::GetCode() {
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
- Handle<Code> new_object =
- Factory::NewCode(desc, NULL, flags, masm.CodeObject());
+ Handle<Code> new_object = Factory::NewCode(desc, flags, masm.CodeObject());
RecordCodeGeneration(*new_object, &masm);
if (has_custom_cache()) {
@@ -140,8 +139,7 @@ Object* CodeStub::TryGetCode() {
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
- Object* new_object =
- Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
+ Object* new_object = Heap::CreateCode(desc, flags, masm.CodeObject());
if (new_object->IsFailure()) return new_object;
code = Code::cast(new_object);
RecordCodeGeneration(code, &masm);
diff --git a/src/codegen.cc b/src/codegen.cc
index 8864c95a..84b73a4e 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -162,9 +162,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
// Allocate and install the code.
CodeDesc desc;
masm->GetCode(&desc);
- ZoneScopeInfo sinfo(info->scope());
- Handle<Code> code =
- Factory::NewCode(desc, &sinfo, flags, masm->CodeObject());
+ Handle<Code> code = Factory::NewCode(desc, flags, masm->CodeObject());
#ifdef ENABLE_DISASSEMBLER
bool print_code = Bootstrapper::IsActive()
diff --git a/src/codegen.h b/src/codegen.h
index 783bef00..7a4b8581 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -120,6 +120,7 @@ namespace internal {
F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \
+ F(IsSpecObject, 1, 1) \
F(StringAdd, 2, 1) \
F(SubString, 3, 1) \
F(StringCompare, 2, 1) \
@@ -180,7 +181,6 @@ class CodeGeneratorScope BASE_EMBEDDED {
CodeGenerator* previous_;
};
-
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// State of used registers in a virtual frame.
@@ -461,11 +461,15 @@ class CompareStub: public CodeStub {
CompareStub(Condition cc,
bool strict,
NaNInformation nan_info = kBothCouldBeNaN,
- bool include_number_compare = true) :
+ bool include_number_compare = true,
+ Register lhs = no_reg,
+ Register rhs = no_reg) :
cc_(cc),
strict_(strict),
never_nan_nan_(nan_info == kCantBothBeNaN),
include_number_compare_(include_number_compare),
+ lhs_(lhs),
+ rhs_(rhs),
name_(NULL) { }
void Generate(MacroAssembler* masm);
@@ -483,12 +487,19 @@ class CompareStub: public CodeStub {
// comparison code is used when the number comparison has been inlined, and
// the stub will be called if one of the operands is not a number.
bool include_number_compare_;
-
- // Encoding of the minor key CCCCCCCCCCCCCCNS.
+ // Register holding the left hand side of the comparison if the stub gives
+ // a choice, no_reg otherwise.
+ Register lhs_;
+ // Register holding the right hand side of the comparison if the stub gives
+ // a choice, no_reg otherwise.
+ Register rhs_;
+
+ // Encoding of the minor key CCCCCCCCCCCCRCNS.
class StrictField: public BitField<bool, 0, 1> {};
class NeverNanNanField: public BitField<bool, 1, 1> {};
class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
- class ConditionField: public BitField<int, 3, 13> {};
+ class RegisterField: public BitField<bool, 3, 1> {};
+ class ConditionField: public BitField<int, 4, 12> {};
Major MajorKey() { return Compare; }
@@ -507,11 +518,17 @@ class CompareStub: public CodeStub {
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s), "
- "(never_nan_nan %s), (number_compare %s)\n",
+ "(never_nan_nan %s), (number_compare %s) ",
static_cast<int>(cc_),
strict_ ? "true" : "false",
never_nan_nan_ ? "true" : "false",
include_number_compare_ ? "included" : "not included");
+
+ if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
+ PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
+ } else {
+ PrintF("\n");
+ }
}
#endif
};
diff --git a/src/compiler.cc b/src/compiler.cc
index ebb97435..d87d9da8 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -40,6 +40,7 @@
#include "oprofile-agent.h"
#include "rewriter.h"
#include "scopes.h"
+#include "scopeinfo.h"
namespace v8 {
namespace internal {
@@ -156,7 +157,12 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info) {
Handle<Context> context = Handle<Context>::null();
- return MakeCode(context, info);
+ Handle<Code> code = MakeCode(context, info);
+ if (!info->shared_info().is_null()) {
+ info->shared_info()->set_scope_info(
+ *SerializedScopeInfo::Create(info->scope()));
+ }
+ return code;
}
#endif
@@ -252,9 +258,11 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
// Allocate function.
Handle<SharedFunctionInfo> result =
- Factory::NewSharedFunctionInfo(lit->name(),
- lit->materialized_literal_count(),
- code);
+ Factory::NewSharedFunctionInfo(
+ lit->name(),
+ lit->materialized_literal_count(),
+ code,
+ SerializedScopeInfo::Create(info.scope()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
@@ -275,9 +283,6 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
}
-static StaticResource<SafeStringInputBuffer> safe_string_input_buffer;
-
-
Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<Object> script_name,
int line_offset,
@@ -306,9 +311,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
// No cache entry found. Do pre-parsing and compile the script.
ScriptDataImpl* pre_data = input_pre_data;
if (pre_data == NULL && source_length >= FLAG_min_preparse_length) {
- Access<SafeStringInputBuffer> buf(&safe_string_input_buffer);
- buf->Reset(source.location());
- pre_data = PreParse(source, buf.value(), extension);
+ pre_data = PreParse(source, NULL, extension);
}
// Create a script object describing the script to be compiled.
@@ -445,7 +448,12 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
info->script(),
code);
- // Update the shared function info with the compiled code.
+ // Update the shared function info with the compiled code and the scope info.
+ // Please note, that the order of the sharedfunction initialization is
+ // important since set_scope_info might trigger a GC, causing the ASSERT
+ // below to be invalid if the code was flushed. By settting the code
+ // object last we avoid this.
+ shared->set_scope_info(*SerializedScopeInfo::Create(info->scope()));
shared->set_code(*code);
// Set the expected number of properties for instances.
@@ -481,6 +489,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
bool allow_lazy = literal->AllowsLazyCompilation() &&
!LiveEditFunctionTracker::IsActive();
+ Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+
// Generate code
Handle<Code> code;
if (FLAG_lazy && allow_lazy) {
@@ -562,13 +572,15 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
literal->start_position(),
script,
code);
+ scope_info = SerializedScopeInfo::Create(info.scope());
}
// Create a shared function info object.
Handle<SharedFunctionInfo> result =
Factory::NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(),
- code);
+ code,
+ scope_info);
SetFunctionInfo(result, literal, false, script);
// Set the expected number of properties for instances and return
diff --git a/src/contexts.cc b/src/contexts.cc
index 19920d22..723354fc 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -120,9 +120,10 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// we have context-local slots
// check non-parameter locals in context
- Handle<Code> code(context->closure()->code());
+ Handle<SerializedScopeInfo> scope_info(
+ context->closure()->shared()->scope_info());
Variable::Mode mode;
- int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode);
+ int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) {
// slot found
@@ -150,13 +151,11 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
// check parameter locals in context
- int param_index = ScopeInfo<>::ParameterIndex(*code, *name);
+ int param_index = scope_info->ParameterIndex(*name);
if (param_index >= 0) {
// slot found.
int index =
- ScopeInfo<>::ContextSlotIndex(*code,
- Heap::arguments_shadow_symbol(),
- NULL);
+ scope_info->ContextSlotIndex(Heap::arguments_shadow_symbol(), NULL);
ASSERT(index >= 0); // arguments must exist and be in the heap context
Handle<JSObject> arguments(JSObject::cast(context->get(index)));
ASSERT(arguments->HasLocalProperty(Heap::length_symbol()));
@@ -170,7 +169,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// check intermediate context (holding only the function name variable)
if (follow_context_chain) {
- int index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name);
+ int index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) {
// slot found
if (FLAG_trace_contexts) {
@@ -216,18 +215,19 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
ASSERT(context->is_function_context());
// Check non-parameter locals.
- Handle<Code> code(context->closure()->code());
+ Handle<SerializedScopeInfo> scope_info(
+ context->closure()->shared()->scope_info());
Variable::Mode mode;
- int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode);
+ int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) return false;
// Check parameter locals.
- int param_index = ScopeInfo<>::ParameterIndex(*code, *name);
+ int param_index = scope_info->ParameterIndex(*name);
if (param_index >= 0) return false;
// Check context only holding the function name variable.
- index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name);
+ index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) return false;
context = Context::cast(context->closure()->context());
}
diff --git a/src/debug.cc b/src/debug.cc
index b8e0252a..72881351 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -759,7 +759,7 @@ bool Debug::CompileDebuggerScript(int index) {
if (caught_exception) {
Handle<Object> message = MessageHandler::MakeMessageObject(
"error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
- Handle<String>());
+ Handle<String>(), Handle<JSArray>());
MessageHandler::ReportMessage(NULL, message);
return false;
}
@@ -1882,6 +1882,7 @@ int Debugger::host_dispatch_micros_ = 100 * 1000;
DebuggerAgent* Debugger::agent_ = NULL;
LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
Semaphore* Debugger::command_received_ = OS::CreateSemaphore(0);
+LockingCommandMessageQueue Debugger::event_command_queue_(kQueueInitialSize);
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
@@ -2207,39 +2208,75 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
event_data,
auto_continue);
}
- // Notify registered debug event listener. This can be either a C or a
- // JavaScript function.
- if (!event_listener_.is_null()) {
- if (event_listener_->IsProxy()) {
- // C debug event listener.
- Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
- v8::Debug::EventCallback2 callback =
- FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
- EventDetailsImpl event_details(
- event,
- Handle<JSObject>::cast(exec_state),
- event_data,
- event_listener_data_);
- callback(event_details);
- } else {
- // JavaScript debug event listener.
- ASSERT(event_listener_->IsJSFunction());
- Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
-
- // Invoke the JavaScript debug event listener.
- const int argc = 4;
- Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
- exec_state.location(),
- Handle<Object>::cast(event_data).location(),
- event_listener_data_.location() };
- Handle<Object> result = Execution::TryCall(fun, Top::global(),
- argc, argv, &caught_exception);
- // Silently ignore exceptions from debug event listeners.
+ // Notify registered debug event listener. This can be either a C or
+ // a JavaScript function. Don't call event listener for v8::Break
+ // here, if it's only a debug command -- they will be processed later.
+ if ((event != v8::Break || !auto_continue) && !event_listener_.is_null()) {
+ CallEventCallback(event, exec_state, event_data, NULL);
+ }
+ // Process pending debug commands.
+ if (event == v8::Break) {
+ while (!event_command_queue_.IsEmpty()) {
+ CommandMessage command = event_command_queue_.Get();
+ if (!event_listener_.is_null()) {
+ CallEventCallback(v8::BreakForCommand,
+ exec_state,
+ event_data,
+ command.client_data());
+ }
+ command.Dispose();
}
}
}
+void Debugger::CallEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data) {
+ if (event_listener_->IsProxy()) {
+ CallCEventCallback(event, exec_state, event_data, client_data);
+ } else {
+ CallJSEventCallback(event, exec_state, event_data);
+ }
+}
+
+
+void Debugger::CallCEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data) {
+ Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
+ v8::Debug::EventCallback2 callback =
+ FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
+ EventDetailsImpl event_details(
+ event,
+ Handle<JSObject>::cast(exec_state),
+ Handle<JSObject>::cast(event_data),
+ event_listener_data_,
+ client_data);
+ callback(event_details);
+}
+
+
+void Debugger::CallJSEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data) {
+ ASSERT(event_listener_->IsJSFunction());
+ Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
+
+ // Invoke the JavaScript debug event listener.
+ const int argc = 4;
+ Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
+ exec_state.location(),
+ Handle<Object>::cast(event_data).location(),
+ event_listener_data_.location() };
+ bool caught_exception = false;
+ Execution::TryCall(fun, Top::global(), argc, argv, &caught_exception);
+ // Silently ignore exceptions from debug event listeners.
+}
+
+
Handle<Context> Debugger::GetDebugContext() {
never_unload_debugger_ = true;
EnterDebugger debugger;
@@ -2273,6 +2310,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
bool sendEventMessage = false;
switch (event) {
case v8::Break:
+ case v8::BreakForCommand:
sendEventMessage = !auto_continue;
break;
case v8::Exception:
@@ -2560,6 +2598,17 @@ bool Debugger::HasCommands() {
}
+void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
+ CommandMessage message = CommandMessage::New(Vector<uint16_t>(), client_data);
+ event_command_queue_.Put(message);
+
+ // Set the debug command break flag to have the command processed.
+ if (!Debug::InDebugger()) {
+ StackGuard::DebugCommand();
+ }
+}
+
+
bool Debugger::IsDebuggerActive() {
ScopedLock with(debugger_access_);
@@ -2761,11 +2810,13 @@ v8::Debug::ClientData* MessageImpl::GetClientData() const {
EventDetailsImpl::EventDetailsImpl(DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
- Handle<Object> callback_data)
+ Handle<Object> callback_data,
+ v8::Debug::ClientData* client_data)
: event_(event),
exec_state_(exec_state),
event_data_(event_data),
- callback_data_(callback_data) {}
+ callback_data_(callback_data),
+ client_data_(client_data) {}
DebugEvent EventDetailsImpl::GetEvent() const {
@@ -2793,6 +2844,11 @@ v8::Handle<v8::Value> EventDetailsImpl::GetCallbackData() const {
}
+v8::Debug::ClientData* EventDetailsImpl::GetClientData() const {
+ return client_data_;
+}
+
+
CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
client_data_(NULL) {
}
diff --git a/src/debug.h b/src/debug.h
index fb926927..7bb4a428 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -566,18 +566,21 @@ class EventDetailsImpl : public v8::Debug::EventDetails {
EventDetailsImpl(DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
- Handle<Object> callback_data);
+ Handle<Object> callback_data,
+ v8::Debug::ClientData* client_data);
virtual DebugEvent GetEvent() const;
virtual v8::Handle<v8::Object> GetExecutionState() const;
virtual v8::Handle<v8::Object> GetEventData() const;
virtual v8::Handle<v8::Context> GetEventContext() const;
virtual v8::Handle<v8::Value> GetCallbackData() const;
+ virtual v8::Debug::ClientData* GetClientData() const;
private:
DebugEvent event_; // Debug event causing the break.
- Handle<JSObject> exec_state_; // Current execution state.
- Handle<JSObject> event_data_; // Data associated with the event.
- Handle<Object> callback_data_; // User data passed with the callback when
- // it was registered.
+ Handle<JSObject> exec_state_; // Current execution state.
+ Handle<JSObject> event_data_; // Data associated with the event.
+ Handle<Object> callback_data_; // User data passed with the callback
+ // when it was registered.
+ v8::Debug::ClientData* client_data_; // Data passed to DebugBreakForCommand.
};
@@ -706,6 +709,9 @@ class Debugger {
// Check whether there are commands in the command queue.
static bool HasCommands();
+ // Enqueue a debugger command to the command queue for event listeners.
+ static void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL);
+
static Handle<Object> Call(Handle<JSFunction> fun,
Handle<Object> data,
bool* pending_exception);
@@ -753,6 +759,17 @@ class Debugger {
static bool IsDebuggerActive();
private:
+ static void CallEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data);
+ static void CallCEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data);
+ static void CallJSEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data);
static void ListenersChanged();
static Mutex* debugger_access_; // Mutex guarding debugger variables.
@@ -775,6 +792,8 @@ class Debugger {
static LockingCommandMessageQueue command_queue_;
static Semaphore* command_received_; // Signaled for each command received.
+ static LockingCommandMessageQueue event_command_queue_;
+
friend class EnterDebugger;
};
diff --git a/src/factory.cc b/src/factory.cc
index 39e881ac..d6533838 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -277,6 +277,8 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
copy->set_inobject_properties(inobject_properties);
copy->set_unused_property_fields(inobject_properties);
copy->set_instance_size(copy->instance_size() + instance_size_delta);
+ copy->set_scavenger(Heap::GetScavenger(copy->instance_type(),
+ copy->instance_size()));
return copy;
}
@@ -541,10 +543,9 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> Factory::NewCode(const CodeDesc& desc,
- ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_ref) {
- CALL_HEAP_FUNCTION(Heap::CreateCode(desc, sinfo, flags, self_ref), Code);
+ CALL_HEAP_FUNCTION(Heap::CreateCode(desc, flags, self_ref), Code);
}
@@ -680,9 +681,13 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- Handle<String> name, int number_of_literals, Handle<Code> code) {
+ Handle<String> name,
+ int number_of_literals,
+ Handle<Code> code,
+ Handle<SerializedScopeInfo> scope_info) {
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
shared->set_code(*code);
+ shared->set_scope_info(*scope_info);
int literals_array_size = number_of_literals;
// If the function contains object, regexp or array literals,
// allocate extra space for a literals array prefix containing the
diff --git a/src/factory.h b/src/factory.h
index 56deda5a..22511121 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -34,9 +34,6 @@
namespace v8 {
namespace internal {
-// Forward declarations.
-class ZoneScopeInfo;
-
// Interface for handle based allocation.
class Factory : public AllStatic {
@@ -241,7 +238,6 @@ class Factory : public AllStatic {
PretenureFlag pretenure = TENURED);
static Handle<Code> NewCode(const CodeDesc& desc,
- ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_reference);
@@ -352,7 +348,10 @@ class Factory : public AllStatic {
}
static Handle<SharedFunctionInfo> NewSharedFunctionInfo(
- Handle<String> name, int number_of_literals, Handle<Code> code);
+ Handle<String> name,
+ int number_of_literals,
+ Handle<Code> code,
+ Handle<SerializedScopeInfo> scope_info);
static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
static Handle<NumberDictionary> DictionaryAtNumberPut(
diff --git a/src/frames.cc b/src/frames.cc
index 67a20d3c..bdd5100e 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -532,8 +532,11 @@ void JavaScriptFrame::Print(StringStream* accumulator,
if (IsConstructor()) accumulator->Add("new ");
accumulator->PrintFunction(function, receiver, &code);
+ Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+
if (function->IsJSFunction()) {
Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
+ scope_info = Handle<SerializedScopeInfo>(shared->scope_info());
Object* script_obj = shared->script();
if (script_obj->IsScript()) {
Handle<Script> script(Script::cast(script_obj));
@@ -561,7 +564,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// Get scope information for nicer output, if possible. If code is
// NULL, or doesn't contain scope info, info will return 0 for the
// number of parameters, stack slots, or context slots.
- ScopeInfo<PreallocatedStorage> info(code);
+ ScopeInfo<PreallocatedStorage> info(*scope_info);
// Print the parameters.
int parameters_count = ComputeParametersCount();
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index b64a1790..8a8b39be 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -857,6 +857,8 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
EmitIsNonNegativeSmi(expr->arguments());
} else if (strcmp("_IsObject", *name->ToCString()) == 0) {
EmitIsObject(expr->arguments());
+ } else if (strcmp("_IsSpecObject", *name->ToCString()) == 0) {
+ EmitIsSpecObject(expr->arguments());
} else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
EmitIsUndetectableObject(expr->arguments());
} else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 3d562324..b056cee9 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -402,6 +402,7 @@ class FullCodeGenerator: public AstVisitor {
void EmitIsSmi(ZoneList<Expression*>* arguments);
void EmitIsNonNegativeSmi(ZoneList<Expression*>* arguments);
void EmitIsObject(ZoneList<Expression*>* arguments);
+ void EmitIsSpecObject(ZoneList<Expression*>* arguments);
void EmitIsUndetectableObject(ZoneList<Expression*>* arguments);
void EmitIsFunction(ZoneList<Expression*>* arguments);
void EmitIsArray(ZoneList<Expression*>* arguments);
diff --git a/src/globals.h b/src/globals.h
index aea88586..6f985eb0 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -326,6 +326,7 @@ class RegExpCompiler;
class RegExpVisitor;
class Scope;
template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
+class SerializedScopeInfo;
class Script;
class Slot;
class Smi;
@@ -345,7 +346,6 @@ class ObjectGroup;
class TickSample;
class VirtualMemory;
class Mutex;
-class ZoneScopeInfo;
typedef bool (*WeakSlotCallback)(Object** pointer);
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 73b9748f..92ded7b3 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -364,6 +364,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
HeapSnapshotGenerator generator(result);
generator.GenerateSnapshot();
+ snapshots_->SnapshotGenerationFinished();
return result;
}
@@ -391,6 +392,12 @@ HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
}
+void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
+ ASSERT(singleton_ != NULL);
+ singleton_->snapshots_->ObjectMoveEvent(from, to);
+}
+
+
const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index b593b992..dac488e9 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -38,7 +38,15 @@ namespace internal {
class HeapSnapshot;
class HeapSnapshotsCollection;
-#endif
+#define HEAP_PROFILE(Call) \
+ do { \
+ if (v8::internal::HeapProfiler::is_profiling()) { \
+ v8::internal::HeapProfiler::Call; \
+ } \
+ } while (false)
+#else
+#define HEAP_PROFILE(Call) ((void) 0)
+#endif // ENABLE_LOGGING_AND_PROFILING
// The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
@@ -54,6 +62,12 @@ class HeapProfiler {
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
+ static void ObjectMoveEvent(Address from, Address to);
+
+ static INLINE(bool is_profiling()) {
+ return singleton_ != NULL && singleton_->snapshots_->is_tracking_objects();
+ }
+
// Obsolete interface.
// Write a single heap sample to the log file.
static void WriteSample();
diff --git a/src/heap.cc b/src/heap.cc
index 1b625897..dc410273 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -799,34 +799,34 @@ class ScavengeVisitor: public ObjectVisitor {
};
-// A queue of pointers and maps of to-be-promoted objects during a
-// scavenge collection.
+// A queue of objects promoted during scavenge. Each object is accompanied
+// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
public:
void Initialize(Address start_address) {
- front_ = rear_ = reinterpret_cast<HeapObject**>(start_address);
+ front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
}
bool is_empty() { return front_ <= rear_; }
- void insert(HeapObject* object, Map* map) {
- *(--rear_) = object;
- *(--rear_) = map;
+ void insert(HeapObject* target, int size) {
+ *(--rear_) = reinterpret_cast<intptr_t>(target);
+ *(--rear_) = size;
// Assert no overflow into live objects.
ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
}
- void remove(HeapObject** object, Map** map) {
- *object = *(--front_);
- *map = Map::cast(*(--front_));
+ void remove(HeapObject** target, int* size) {
+ *target = reinterpret_cast<HeapObject*>(*(--front_));
+ *size = static_cast<int>(*(--front_));
// Assert no underflow.
ASSERT(front_ >= rear_);
}
private:
// The front of the queue is higher in memory than the rear.
- HeapObject** front_;
- HeapObject** rear_;
+ intptr_t* front_;
+ intptr_t* rear_;
};
@@ -1041,31 +1041,26 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// queue is empty.
while (new_space_front < new_space_.top()) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
- object->Iterate(scavenge_visitor);
- new_space_front += object->Size();
+ Map* map = object->map();
+ int size = object->SizeFromMap(map);
+ object->IterateBody(map->instance_type(), size, scavenge_visitor);
+ new_space_front += size;
}
// Promote and process all the to-be-promoted objects.
while (!promotion_queue.is_empty()) {
- HeapObject* source;
- Map* map;
- promotion_queue.remove(&source, &map);
- // Copy the from-space object to its new location (given by the
- // forwarding address) and fix its map.
- HeapObject* target = source->map_word().ToForwardingAddress();
- int size = source->SizeFromMap(map);
- CopyBlock(target->address(), source->address(), size);
- target->set_map(map);
-
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- // Update NewSpace stats if necessary.
- RecordCopiedObject(target);
-#endif
- // Visit the newly copied object for pointers to new space.
+ HeapObject* target;
+ int size;
+ promotion_queue.remove(&target, &size);
+
+ // Promoted object might be already partially visited
+ // during dirty regions iteration. Thus we search specificly
+ // for pointers to from semispace instead of looking for pointers
+ // to new space.
ASSERT(!target->IsMap());
- IterateAndMarkPointersToNewSpace(target->address(),
- target->address() + size,
- &ScavengePointer);
+ IterateAndMarkPointersToFromSpace(target->address(),
+ target->address() + size,
+ &ScavengePointer);
}
// Take another spin if there are now unswept objects in new space
@@ -1077,7 +1072,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-void Heap::RecordCopiedObject(HeapObject* obj) {
+static void RecordCopiedObject(HeapObject* obj) {
bool should_record = false;
#ifdef DEBUG
should_record = FLAG_heap_stats;
@@ -1086,22 +1081,24 @@ void Heap::RecordCopiedObject(HeapObject* obj) {
should_record = should_record || FLAG_log_gc;
#endif
if (should_record) {
- if (new_space_.Contains(obj)) {
- new_space_.RecordAllocation(obj);
+ if (Heap::new_space()->Contains(obj)) {
+ Heap::new_space()->RecordAllocation(obj);
} else {
- new_space_.RecordPromotion(obj);
+ Heap::new_space()->RecordPromotion(obj);
}
}
}
#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-
-HeapObject* Heap::MigrateObject(HeapObject* source,
- HeapObject* target,
- int size) {
+// Helper function used by CopyObject to copy a source object to an
+// allocated target object and update the forwarding pointer in the source
+// object. Returns the target object.
+inline static HeapObject* MigrateObject(HeapObject* source,
+ HeapObject* target,
+ int size) {
// Copy the content of source to target.
- CopyBlock(target->address(), source->address(), size);
+ Heap::CopyBlock(target->address(), source->address(), size);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
@@ -1110,122 +1107,287 @@ HeapObject* Heap::MigrateObject(HeapObject* source,
// Update NewSpace stats if necessary.
RecordCopiedObject(target);
#endif
+ HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
return target;
}
-static inline bool IsShortcutCandidate(HeapObject* object, Map* map) {
- STATIC_ASSERT(kNotStringTag != 0 && kSymbolTag != 0);
- ASSERT(object->map() == map);
- InstanceType type = map->instance_type();
- if ((type & kShortcutTypeMask) != kShortcutTypeTag) return false;
- ASSERT(object->IsString() && !object->IsSymbol());
- return ConsString::cast(object)->unchecked_second() == Heap::empty_string();
+enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
+enum SizeRestriction { SMALL, UNKNOWN_SIZE };
+
+
+template<ObjectContents object_contents, SizeRestriction size_restriction>
+static inline void EvacuateObject(Map* map,
+ HeapObject** slot,
+ HeapObject* object,
+ int object_size) {
+ ASSERT((size_restriction != SMALL) ||
+ (object_size <= Page::kMaxHeapObjectSize));
+ ASSERT(object->Size() == object_size);
+
+ if (Heap::ShouldBePromoted(object->address(), object_size)) {
+ Object* result;
+
+ if ((size_restriction != SMALL) &&
+ (object_size > Page::kMaxHeapObjectSize)) {
+ result = Heap::lo_space()->AllocateRawFixedArray(object_size);
+ } else {
+ if (object_contents == DATA_OBJECT) {
+ result = Heap::old_data_space()->AllocateRaw(object_size);
+ } else {
+ result = Heap::old_pointer_space()->AllocateRaw(object_size);
+ }
+ }
+
+ if (!result->IsFailure()) {
+ HeapObject* target = HeapObject::cast(result);
+ *slot = MigrateObject(object, target, object_size);
+
+ if (object_contents == POINTER_OBJECT) {
+ promotion_queue.insert(target, object_size);
+ }
+
+ Heap::tracer()->increment_promoted_objects_size(object_size);
+ return;
+ }
+ }
+ Object* result = Heap::new_space()->AllocateRaw(object_size);
+ ASSERT(!result->IsFailure());
+ *slot = MigrateObject(object, HeapObject::cast(result), object_size);
+ return;
}
-void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- ASSERT(InFromSpace(object));
- MapWord first_word = object->map_word();
- ASSERT(!first_word.IsForwardingAddress());
+template<int object_size_in_words, ObjectContents object_contents>
+static inline void EvacuateObjectOfFixedSize(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ const int object_size = object_size_in_words << kPointerSizeLog2;
+ EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
+}
+
- // Optimization: Bypass flattened ConsString objects.
- if (IsShortcutCandidate(object, first_word.ToMap())) {
- object = HeapObject::cast(ConsString::cast(object)->unchecked_first());
- *p = object;
- // After patching *p we have to repeat the checks that object is in the
- // active semispace of the young generation and not already copied.
- if (!InNewSpace(object)) return;
- first_word = object->map_word();
+template<ObjectContents object_contents>
+static inline void EvacuateObjectOfFixedSize(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = map->instance_size();
+ EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
+}
+
+
+static inline void EvacuateFixedArray(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = FixedArray::cast(object)->FixedArraySize();
+ EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+}
+
+
+static inline void EvacuateByteArray(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = ByteArray::cast(object)->ByteArraySize();
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+}
+
+
+static Scavenger GetScavengerForSize(int object_size,
+ ObjectContents object_contents) {
+ ASSERT(IsAligned(object_size, kPointerSize));
+ ASSERT(object_size < Page::kMaxHeapObjectSize);
+
+ switch (object_size >> kPointerSizeLog2) {
+#define CASE(n) \
+ case n: \
+ if (object_contents == DATA_OBJECT) { \
+ return static_cast<Scavenger>( \
+ &EvacuateObjectOfFixedSize<n, DATA_OBJECT>); \
+ } else { \
+ return static_cast<Scavenger>( \
+ &EvacuateObjectOfFixedSize<n, POINTER_OBJECT>); \
+ }
+
+ CASE(1);
+ CASE(2);
+ CASE(3);
+ CASE(4);
+ CASE(5);
+ CASE(6);
+ CASE(7);
+ CASE(8);
+ CASE(9);
+ CASE(10);
+ CASE(11);
+ CASE(12);
+ CASE(13);
+ CASE(14);
+ CASE(15);
+ CASE(16);
+ default:
+ if (object_contents == DATA_OBJECT) {
+ return static_cast<Scavenger>(&EvacuateObjectOfFixedSize<DATA_OBJECT>);
+ } else {
+ return static_cast<Scavenger>(
+ &EvacuateObjectOfFixedSize<POINTER_OBJECT>);
+ }
+
+#undef CASE
+ }
+}
+
+
+static inline void EvacuateSeqAsciiString(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = SeqAsciiString::cast(object)->
+ SeqAsciiStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+}
+
+
+static inline void EvacuateSeqTwoByteString(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = SeqTwoByteString::cast(object)->
+ SeqTwoByteStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+}
+
+
+static inline bool IsShortcutCandidate(int type) {
+ return ((type & kShortcutTypeMask) == kShortcutTypeTag);
+}
+
+
+static inline void EvacuateShortcutCandidate(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ ASSERT(IsShortcutCandidate(map->instance_type()));
+
+ if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
+ HeapObject* first =
+ HeapObject::cast(ConsString::cast(object)->unchecked_first());
+
+ *slot = first;
+
+ if (!Heap::InNewSpace(first)) {
+ object->set_map_word(MapWord::FromForwardingAddress(first));
+ return;
+ }
+
+ MapWord first_word = first->map_word();
if (first_word.IsForwardingAddress()) {
- *p = first_word.ToForwardingAddress();
+ HeapObject* target = first_word.ToForwardingAddress();
+
+ *slot = target;
+ object->set_map_word(MapWord::FromForwardingAddress(target));
return;
}
+
+ first->map()->Scavenge(slot, first);
+ object->set_map_word(MapWord::FromForwardingAddress(*slot));
+ return;
}
- int object_size = object->SizeFromMap(first_word.ToMap());
- // We rely on live objects in new space to be at least two pointers,
- // so we can store the from-space address and map pointer of promoted
- // objects in the to space.
- ASSERT(object_size >= 2 * kPointerSize);
+ int object_size = ConsString::kSize;
+ EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
+}
- // If the object should be promoted, we try to copy it to old space.
- if (ShouldBePromoted(object->address(), object_size)) {
- Object* result;
- if (object_size > MaxObjectSizeInPagedSpace()) {
- result = lo_space_->AllocateRawFixedArray(object_size);
- if (!result->IsFailure()) {
- HeapObject* target = HeapObject::cast(result);
-
- if (object->IsFixedArray()) {
- // Save the from-space object pointer and its map pointer at the
- // top of the to space to be swept and copied later. Write the
- // forwarding address over the map word of the from-space
- // object.
- promotion_queue.insert(object, first_word.ToMap());
- object->set_map_word(MapWord::FromForwardingAddress(target));
-
- // Give the space allocated for the result a proper map by
- // treating it as a free list node (not linked into the free
- // list).
- FreeListNode* node = FreeListNode::FromAddress(target->address());
- node->set_size(object_size);
-
- *p = target;
+
+Scavenger Heap::GetScavenger(int instance_type, int instance_size) {
+ if (instance_type < FIRST_NONSTRING_TYPE) {
+ switch (instance_type & kStringRepresentationMask) {
+ case kSeqStringTag:
+ if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
+ return &EvacuateSeqAsciiString;
} else {
- // In large object space only fixed arrays might possibly contain
- // intergenerational references.
- // All other objects can be copied immediately and not revisited.
- *p = MigrateObject(object, target, object_size);
+ return &EvacuateSeqTwoByteString;
}
- tracer()->increment_promoted_objects_size(object_size);
- return;
- }
- } else {
- OldSpace* target_space = Heap::TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space_ ||
- target_space == Heap::old_data_space_);
- result = target_space->AllocateRaw(object_size);
- if (!result->IsFailure()) {
- HeapObject* target = HeapObject::cast(result);
- if (target_space == Heap::old_pointer_space_) {
- // Save the from-space object pointer and its map pointer at the
- // top of the to space to be swept and copied later. Write the
- // forwarding address over the map word of the from-space
- // object.
- promotion_queue.insert(object, first_word.ToMap());
- object->set_map_word(MapWord::FromForwardingAddress(target));
-
- // Give the space allocated for the result a proper map by
- // treating it as a free list node (not linked into the free
- // list).
- FreeListNode* node = FreeListNode::FromAddress(target->address());
- node->set_size(object_size);
-
- *p = target;
+ case kConsStringTag:
+ if (IsShortcutCandidate(instance_type)) {
+ return &EvacuateShortcutCandidate;
} else {
- // Objects promoted to the data space can be copied immediately
- // and not revisited---we will never sweep that space for
- // pointers and the copied objects do not contain pointers to
- // new space objects.
- *p = MigrateObject(object, target, object_size);
-#ifdef DEBUG
- VerifyNonPointerSpacePointersVisitor v;
- (*p)->Iterate(&v);
-#endif
+ ASSERT(instance_size == ConsString::kSize);
+ return GetScavengerForSize(ConsString::kSize, POINTER_OBJECT);
}
- tracer()->increment_promoted_objects_size(object_size);
- return;
- }
+
+ case kExternalStringTag:
+ ASSERT(instance_size == ExternalString::kSize);
+ return GetScavengerForSize(ExternalString::kSize, DATA_OBJECT);
}
+ UNREACHABLE();
+ }
+
+ switch (instance_type) {
+ case BYTE_ARRAY_TYPE:
+ return reinterpret_cast<Scavenger>(&EvacuateByteArray);
+
+ case FIXED_ARRAY_TYPE:
+ return reinterpret_cast<Scavenger>(&EvacuateFixedArray);
+
+ case JS_OBJECT_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_FUNCTION_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_BUILTINS_OBJECT_TYPE:
+ return GetScavengerForSize(instance_size, POINTER_OBJECT);
+
+ case ODDBALL_TYPE:
+ return NULL;
+
+ case PROXY_TYPE:
+ return GetScavengerForSize(Proxy::kSize, DATA_OBJECT);
+
+ case MAP_TYPE:
+ return NULL;
+
+ case CODE_TYPE:
+ return NULL;
+
+ case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ return NULL;
+
+ case HEAP_NUMBER_TYPE:
+ case FILLER_TYPE:
+ case PIXEL_ARRAY_TYPE:
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ case EXTERNAL_INT_ARRAY_TYPE:
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return GetScavengerForSize(instance_size, DATA_OBJECT);
+
+ case SHARED_FUNCTION_INFO_TYPE:
+ return GetScavengerForSize(SharedFunctionInfo::kAlignedSize,
+ POINTER_OBJECT);
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ return GetScavengerForSize(instance_size, POINTER_OBJECT);
+ default:
+ UNREACHABLE();
+ return NULL;
}
- // The object should remain in new space or the old space allocation failed.
- Object* result = new_space_.AllocateRaw(object_size);
- // Failed allocation at this point is utterly unexpected.
- ASSERT(!result->IsFailure());
- *p = MigrateObject(object, HeapObject::cast(result), object_size);
+}
+
+
+void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
+ ASSERT(InFromSpace(object));
+ MapWord first_word = object->map_word();
+ ASSERT(!first_word.IsForwardingAddress());
+ Map* map = first_word.ToMap();
+ map->Scavenge(p, object);
}
@@ -1243,6 +1405,8 @@ Object* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
+ reinterpret_cast<Map*>(result)->
+ set_scavenger(GetScavenger(instance_type, instance_size));
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
@@ -1259,6 +1423,7 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
Map* map = reinterpret_cast<Map*>(result);
map->set_map(meta_map());
map->set_instance_type(instance_type);
+ map->set_scavenger(GetScavenger(instance_type, instance_size));
map->set_prototype(null_value());
map->set_constructor(null_value());
map->set_instance_size(instance_size);
@@ -1891,6 +2056,7 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_name(name);
Code* illegal = Builtins::builtin(Builtins::Illegal);
share->set_code(illegal);
+ share->set_scope_info(SerializedScopeInfo::Empty());
Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
share->set_construct_stub(construct_stub);
share->set_expected_nof_properties(0);
@@ -2315,14 +2481,8 @@ static void FlushCodeForFunction(SharedFunctionInfo* function_info) {
ThreadManager::IterateArchivedThreads(&threadvisitor);
if (threadvisitor.FoundCode()) return;
- // Check that there are heap allocated locals in the scopeinfo. If
- // there is, we are potentially using eval and need the scopeinfo
- // for variable resolution.
- if (ScopeInfo<>::HasHeapAllocatedLocals(function_info->code()))
- return;
-
- HandleScope scope;
// Compute the lazy compilable version of the code.
+ HandleScope scope;
function_info->set_code(*ComputeLazyCompile(function_info->length()));
}
@@ -2348,7 +2508,6 @@ void Heap::FlushCode() {
Object* Heap::CreateCode(const CodeDesc& desc,
- ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_reference) {
// Allocate ByteArray before the Code object, so that we do not risk
@@ -2358,9 +2517,7 @@ Object* Heap::CreateCode(const CodeDesc& desc,
// Compute size
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
- int sinfo_size = 0;
- if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
- int obj_size = Code::SizeFor(body_size, sinfo_size);
+ int obj_size = Code::SizeFor(body_size);
ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
Object* result;
if (obj_size > MaxObjectSizeInPagedSpace()) {
@@ -2377,7 +2534,6 @@ Object* Heap::CreateCode(const CodeDesc& desc,
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(ByteArray::cast(reloc_info));
- code->set_sinfo_size(sinfo_size);
code->set_flags(flags);
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
@@ -2390,7 +2546,6 @@ Object* Heap::CreateCode(const CodeDesc& desc,
// objects. These pointers can include references to the code object itself,
// through the self_reference parameter.
code->CopyFrom(desc);
- if (sinfo != NULL) sinfo->Serialize(code); // write scope info
#ifdef DEBUG
code->Verify();
@@ -2431,9 +2586,7 @@ Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
- int sinfo_size = code->sinfo_size();
-
- int new_obj_size = Code::SizeFor(new_body_size, sinfo_size);
+ int new_obj_size = Code::SizeFor(new_body_size);
Address old_addr = code->address();
@@ -2460,8 +2613,6 @@ Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
// Copy patched rinfo.
memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
- // Copy sinfo.
- memcpy(new_code->sinfo_start(), code->sinfo_start(), code->sinfo_size());
// Relocate the copy.
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
@@ -3657,7 +3808,7 @@ bool Heap::IteratePointersInDirtyMapsRegion(
Max(start, prev_map + Map::kPointerFieldsBeginOffset);
Address pointer_fields_end =
- Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
+ Min(prev_map + Map::kPointerFieldsEndOffset, end);
contains_pointers_to_new_space =
IteratePointersInDirtyRegion(pointer_fields_start,
@@ -3675,10 +3826,11 @@ bool Heap::IteratePointersInDirtyMapsRegion(
if (map_aligned_end != end) {
ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
- Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
+ Address pointer_fields_start =
+ map_aligned_end + Map::kPointerFieldsBeginOffset;
Address pointer_fields_end =
- Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
+ Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
contains_pointers_to_new_space =
IteratePointersInDirtyRegion(pointer_fields_start,
@@ -3691,9 +3843,9 @@ bool Heap::IteratePointersInDirtyMapsRegion(
}
-void Heap::IterateAndMarkPointersToNewSpace(Address start,
- Address end,
- ObjectSlotCallback callback) {
+void Heap::IterateAndMarkPointersToFromSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback) {
Address slot_address = start;
Page* page = Page::FromAddress(start);
@@ -3701,7 +3853,7 @@ void Heap::IterateAndMarkPointersToNewSpace(Address start,
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- if (Heap::InNewSpace(*slot)) {
+ if (Heap::InFromSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
callback(reinterpret_cast<HeapObject**>(slot));
if (Heap::InNewSpace(*slot)) {
diff --git a/src/heap.h b/src/heap.h
index df3ba0ea..18991b4c 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -36,8 +36,6 @@
namespace v8 {
namespace internal {
-// Forward declarations.
-class ZoneScopeInfo;
// Defines all the roots in Heap.
#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
@@ -626,7 +624,6 @@ class Heap : public AllStatic {
// object by containing this pointer.
// Please note this function does not perform a garbage collection.
static Object* CreateCode(const CodeDesc& desc,
- ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_reference);
@@ -774,11 +771,12 @@ class Heap : public AllStatic {
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback);
- // Iterate pointers to new space found in memory interval from start to end.
+ // Iterate pointers to from semispace of new space found in memory interval
+ // from start to end.
// Update dirty marks for page containing start address.
- static void IterateAndMarkPointersToNewSpace(Address start,
- Address end,
- ObjectSlotCallback callback);
+ static void IterateAndMarkPointersToFromSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Return true if pointers to new space was found.
@@ -985,6 +983,8 @@ class Heap : public AllStatic {
static void RecordStats(HeapStats* stats);
+ static Scavenger GetScavenger(int instance_type, int instance_size);
+
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
@@ -1232,17 +1232,7 @@ class Heap : public AllStatic {
set_instanceof_cache_function(the_hole_value());
}
- // Helper function used by CopyObject to copy a source object to an
- // allocated target object and update the forwarding pointer in the source
- // object. Returns the target object.
- static inline HeapObject* MigrateObject(HeapObject* source,
- HeapObject* target,
- int size);
-
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- // Record the copy of an object in the NewSpace's statistics.
- static void RecordCopiedObject(HeapObject* obj);
-
// Record statistics before and after garbage collection.
static void ReportStatisticsBeforeGC();
static void ReportStatisticsAfterGC();
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index ce2099da..e011237d 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -121,7 +121,6 @@ void CpuFeatures::Probe() {
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Code>::null());
if (!code->IsCode()) return;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 0f720747..20fbfa35 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -34,12 +34,9 @@
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
-#include "jsregexp.h"
#include "parser.h"
#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
#include "register-allocator-inl.h"
-#include "runtime.h"
#include "scopes.h"
#include "virtual-frame-inl.h"
@@ -143,7 +140,7 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
-// CodeGenerator implementation
+// CodeGenerator implementation.
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
@@ -374,12 +371,11 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
// Adjust for function-level loop nesting.
- ASSERT_EQ(info->loop_nesting(), loop_nesting_);
+ ASSERT_EQ(loop_nesting_, info->loop_nesting());
loop_nesting_ = 0;
// Code generation state must be reset.
ASSERT(state_ == NULL);
- ASSERT(loop_nesting() == 0);
ASSERT(!function_return_is_shadowed_);
function_return_.Unuse();
DeleteFrame();
@@ -646,7 +642,6 @@ void CodeGenerator::Load(Expression* expr) {
} else {
JumpTarget true_target;
JumpTarget false_target;
-
ControlDestination dest(&true_target, &false_target, true);
LoadCondition(expr, &dest, false);
@@ -784,9 +779,9 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
JumpTarget done;
bool skip_arguments = false;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has already
- // been written to. This can happen if the a function has a local
- // variable named 'arguments'.
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
Result probe = frame_->Pop();
if (probe.is_constant()) {
@@ -1434,8 +1429,8 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
} else {
unsigned_left >>= shift_amount;
}
- ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed.
- answer_object = Smi::FromInt(unsigned_left); // Converted to signed.
+ ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
+ answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
break;
}
default:
@@ -1919,12 +1914,12 @@ class DeferredInlineSmiOperationReversed: public DeferredCode {
void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub igostub(
+ GenericBinaryOpStub stub(
op_,
overwrite_mode_,
NO_SMI_CODE_IN_STUB,
TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, value_, src_);
+ stub.GenerateCall(masm_, value_, src_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -2424,6 +2419,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
break;
}
// Fall through if we did not find a power of 2 on the right hand side!
+ // The next case must be the default.
default: {
Result constant_operand(value);
@@ -2487,8 +2483,7 @@ void CodeGenerator::Comparison(AstNode* node,
}
ASSERT(cc == less || cc == equal || cc == greater_equal);
- // If either side is a constant of some sort, we can probably optimize the
- // comparison.
+ // If either side is a constant smi, optimize the comparison.
bool left_side_constant_smi = false;
bool left_side_constant_null = false;
bool left_side_constant_1_char_string = false;
@@ -2513,114 +2508,11 @@ void CodeGenerator::Comparison(AstNode* node,
}
if (left_side_constant_smi || right_side_constant_smi) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side.handle())->value();
- int right_value = Smi::cast(*right_side.handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are Smis.
- left_side.ToRegister();
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
-
- if (left_side.is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_side.reg());
- }
- } else {
- JumpTarget is_smi;
- __ test(left_side.reg(), Immediate(kSmiTagMask));
- is_smi.Branch(zero, taken);
-
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- if (!is_loop_condition &&
- CpuFeatures::IsSupported(SSE2) &&
- right_val->IsSmi()) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- CpuFeatures::Scope use_sse2(SSE2);
- JumpTarget not_number;
- __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- not_number.Branch(not_equal, &left_side);
- __ movdbl(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = Smi::cast(*right_val)->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ mov(temp.reg(), Immediate(value));
- __ cvtsi2sd(xmm0, Operand(temp.reg()));
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, &left_side);
- left_side.Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(&left_side);
- }
-
- // Setup and call the compare stub.
- CompareStub stub(cc, strict, kCantBothBeNaN);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ cmp(result.reg(), 0);
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_smi.Bind();
- }
-
- left_side = Result(left_reg);
- right_side = Result(right_val);
- // Test smi equality and comparison by signed int comparison.
- if (IsUnsafeSmi(right_side.handle())) {
- right_side.ToRegister();
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- } else {
- __ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
- }
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
-
+ bool is_loop_condition = (node->AsExpression() != NULL) &&
+ node->AsExpression()->is_loop_condition();
+ ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
+ left_side_constant_smi, right_side_constant_smi,
+ is_loop_condition);
} else if (cc == equal &&
(left_side_constant_null || right_side_constant_null)) {
// To make null checks efficient, we check if either the left side or
@@ -2780,13 +2672,14 @@ void CodeGenerator::Comparison(AstNode* node,
}
} else {
// Neither side is a constant Smi, constant 1-char string or constant null.
- // If either side is a non-smi constant, or known to be a heap number skip
- // the smi check.
+ // If either side is a non-smi constant, or known to be a heap number,
+ // skip the smi check.
bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
(right_side.is_constant() && !right_side.handle()->IsSmi()) ||
left_side.type_info().IsDouble() ||
right_side.type_info().IsDouble();
+
NaNInformation nan_info =
(CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
kBothCouldBeNaN :
@@ -2811,14 +2704,15 @@ void CodeGenerator::Comparison(AstNode* node,
right_side.ToRegister();
if (known_non_smi) {
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
+ // Inlined equality check:
+ // If at least one of the objects is not NaN, then if the objects
+ // are identical, they are equal.
if (nan_info == kCantBothBeNaN && cc == equal) {
__ cmp(left_side.reg(), Operand(right_side.reg()));
dest->true_target()->Branch(equal);
}
- // Inline number comparison.
+ // Inlined number comparison:
if (inline_number_compare) {
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
@@ -2856,7 +2750,7 @@ void CodeGenerator::Comparison(AstNode* node,
dest->true_target()->Branch(equal);
}
- // Inline number comparison.
+ // Inlined number comparison:
if (inline_number_compare) {
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
@@ -2882,6 +2776,139 @@ void CodeGenerator::Comparison(AstNode* node,
}
+void CodeGenerator::ConstantSmiComparison(Condition cc,
+ bool strict,
+ ControlDestination* dest,
+ Result* left_side,
+ Result* right_side,
+ bool left_side_constant_smi,
+ bool right_side_constant_smi,
+ bool is_loop_condition) {
+ if (left_side_constant_smi && right_side_constant_smi) {
+ // Trivial case, comparing two constants.
+ int left_value = Smi::cast(*left_side->handle())->value();
+ int right_value = Smi::cast(*right_side->handle())->value();
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Only one side is a constant Smi.
+ // If left side is a constant Smi, reverse the operands.
+ // Since one side is a constant Smi, conversion order does not matter.
+ if (left_side_constant_smi) {
+ Result* temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may re-introduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant Smi, inlining the case
+ // where both sides are Smis.
+ left_side->ToRegister();
+ Register left_reg = left_side->reg();
+ Handle<Object> right_val = right_side->handle();
+
+ if (left_side->is_smi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left_reg);
+ }
+ // Test smi equality and comparison by signed int comparison.
+ if (IsUnsafeSmi(right_side->handle())) {
+ right_side->ToRegister();
+ __ cmp(left_reg, Operand(right_side->reg()));
+ } else {
+ __ cmp(Operand(left_reg), Immediate(right_side->handle()));
+ }
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->Split(cc);
+ } else {
+ // Only the case where the left side could possibly be a non-smi is left.
+ JumpTarget is_smi;
+ if (cc == equal) {
+ // We can do the equality comparison before the smi check.
+ __ cmp(Operand(left_reg), Immediate(right_side->handle()));
+ dest->true_target()->Branch(equal);
+ __ test(left_reg, Immediate(kSmiTagMask));
+ dest->false_target()->Branch(zero);
+ } else {
+ // Do the smi check, then the comparison.
+ JumpTarget is_not_smi;
+ __ test(left_reg, Immediate(kSmiTagMask));
+ is_smi.Branch(zero, left_side, right_side);
+ }
+
+ // Jump or fall through to here if we are comparing a non-smi to a
+ // constant smi. If the non-smi is a heap number and this is not
+ // a loop condition, inline the floating point code.
+ if (!is_loop_condition && CpuFeatures::IsSupported(SSE2)) {
+ // Right side is a constant smi and left side has been checked
+ // not to be a smi.
+ CpuFeatures::Scope use_sse2(SSE2);
+ JumpTarget not_number;
+ __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ not_number.Branch(not_equal, left_side);
+ __ movdbl(xmm1,
+ FieldOperand(left_reg, HeapNumber::kValueOffset));
+ int value = Smi::cast(*right_val)->value();
+ if (value == 0) {
+ __ xorpd(xmm0, xmm0);
+ } else {
+ Result temp = allocator()->Allocate();
+ __ mov(temp.reg(), Immediate(value));
+ __ cvtsi2sd(xmm0, Operand(temp.reg()));
+ temp.Unuse();
+ }
+ __ ucomisd(xmm1, xmm0);
+ // Jump to builtin for NaN.
+ not_number.Branch(parity_even, left_side);
+ left_side->Unuse();
+ dest->true_target()->Branch(DoubleCondition(cc));
+ dest->false_target()->Jump();
+ not_number.Bind(left_side);
+ }
+
+ // Setup and call the compare stub.
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, left_side, right_side);
+ result.ToRegister();
+ __ test(result.reg(), Operand(result.reg()));
+ result.Unuse();
+ if (cc == equal) {
+ dest->Split(cc);
+ } else {
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ // It is important for performance for this case to be at the end.
+ is_smi.Bind(left_side, right_side);
+ if (IsUnsafeSmi(right_side->handle())) {
+ right_side->ToRegister();
+ __ cmp(left_reg, Operand(right_side->reg()));
+ } else {
+ __ cmp(Operand(left_reg), Immediate(right_side->handle()));
+ }
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->Split(cc);
+ }
+ }
+ }
+}
+
+
// Check that the comparison operand is a number. Jump to not_numbers jump
// target passing the left and right result if the operand is not a number.
static void CheckComparisonOperand(MacroAssembler* masm_,
@@ -2941,19 +2968,19 @@ static void LoadComparisonOperand(MacroAssembler* masm_,
// target passing the left and right result if the operand is not a number.
static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
Result* operand,
- XMMRegister reg,
+ XMMRegister xmm_reg,
Result* left_side,
Result* right_side,
JumpTarget* not_numbers) {
Label done;
if (operand->type_info().IsDouble()) {
// Operand is known to be a heap number, just load it.
- __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
} else if (operand->type_info().IsSmi()) {
// Operand is known to be a smi. Convert it to double and keep the original
// smi.
__ SmiUntag(operand->reg());
- __ cvtsi2sd(reg, Operand(operand->reg()));
+ __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
__ SmiTag(operand->reg());
} else {
// Operand type not known, check for smi or heap number.
@@ -2965,13 +2992,13 @@ static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
Immediate(Factory::heap_number_map()));
not_numbers->Branch(not_equal, left_side, right_side, taken);
}
- __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&smi);
// Comvert smi to float and keep the original smi.
__ SmiUntag(operand->reg());
- __ cvtsi2sd(reg, Operand(operand->reg()));
+ __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
__ SmiTag(operand->reg());
__ jmp(&done);
}
@@ -3568,8 +3595,10 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
return_value->ToRegister(eax);
// Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
+#endif
// Leave the frame and return popping the arguments and the
// receiver.
@@ -3690,7 +3719,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
}
}
-
// The last instruction emitted was a jump, either to the default
// clause or the break target, or else to a case body from the loop
// that compiles the tests.
@@ -3778,8 +3806,8 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
// Compile the test.
switch (info) {
case ALWAYS_TRUE:
- // If control flow can fall off the end of the body, jump back to
- // the top and bind the break target at the exit.
+ // If control flow can fall off the end of the body, jump back
+ // to the top and bind the break target at the exit.
if (has_valid_frame()) {
node->continue_target()->Jump();
}
@@ -3815,6 +3843,8 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
}
DecrementLoopNesting();
+ node->continue_target()->Unuse();
+ node->break_target()->Unuse();
}
@@ -3899,8 +3929,8 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
break;
case DONT_KNOW:
if (test_at_bottom) {
- // If we have chosen to recompile the test at the bottom, then
- // it is the continue target.
+ // If we have chosen to recompile the test at the bottom,
+ // then it is the continue target.
if (node->continue_target()->is_linked()) {
node->continue_target()->Bind();
}
@@ -4016,6 +4046,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
loop.Bind();
}
+
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
@@ -4125,8 +4156,8 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
break;
}
- // The break target may be already bound (by the condition), or
- // there may not be a valid frame. Bind it only if needed.
+ // The break target may be already bound (by the condition), or there
+ // may not be a valid frame. Bind it only if needed.
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
@@ -6406,6 +6437,27 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
}
+ void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
+ // typeof(arg) == function).
+ // It includes undetectable objects (as opposed to IsObject).
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(equal);
+
+ // Check that this is an object.
+ frame_->Spill(value.reg());
+ __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, value.reg());
+ value.Unuse();
+ destination()->Split(above_equal);
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@@ -6678,11 +6730,8 @@ void CodeGenerator::GenerateRandomHeapNumber(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ push(Immediate(Smi::FromInt(0)));
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(edi, eax);
__ bind(&heapnumber_allocated);
@@ -11638,6 +11687,8 @@ static int NegativeComparisonResult(Condition cc) {
void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
Label check_unequal_objects, done;
// NOTICE! This code is only reached after a smi-fast-case check, so
@@ -12531,8 +12582,10 @@ int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the
// condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
| StrictField::encode(strict_)
| NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
| IncludeNumberCompareField::encode(include_number_compare_);
@@ -12542,6 +12595,8 @@ int CompareStub::MinorKey() {
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index a432c13f..24f9957f 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -560,6 +560,17 @@ class CodeGenerator: public AstVisitor {
Condition cc,
bool strict,
ControlDestination* destination);
+
+ // If at least one of the sides is a constant smi, generate optimized code.
+ void ConstantSmiComparison(Condition cc,
+ bool strict,
+ ControlDestination* destination,
+ Result* left_side,
+ Result* right_side,
+ bool left_side_constant_smi,
+ bool right_side_constant_smi,
+ bool is_loop_condition);
+
void GenerateInlineNumberComparison(Result* left_side,
Result* right_side,
Condition cc,
@@ -621,6 +632,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 2ca11057..b2ff1fd9 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1985,6 +1985,26 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(equal, if_false);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
+ __ j(above_equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2242,11 +2262,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ push(Immediate(Smi::FromInt(0)));
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(edi, eax);
__ bind(&heapnumber_allocated);
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index b0de8275..a7930fb1 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -872,7 +872,6 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
Handle<Code> code = Factory::NewCode(code_desc,
- NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
PROFILE(RegExpCodeCreateEvent(*code, *source));
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 26361d10..e81fbc7b 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -111,7 +111,7 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
Register receiver,
String* name,
Register r0,
- Register extra) {
+ Register r1) {
ASSERT(name->IsSymbol());
__ IncrementCounter(&Counters::negative_lookups, 1);
__ IncrementCounter(&Counters::negative_lookups_miss, 1);
@@ -121,11 +121,13 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
// Bail out if the receiver has a named interceptor or requires access checks.
- __ test(FieldOperand(r0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
+ __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
+ kInterceptorOrAccessCheckNeededMask);
__ j(not_zero, miss_label, not_taken);
+ // Check that receiver is a JSObject.
__ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
__ j(below, miss_label, not_taken);
@@ -158,10 +160,7 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
for (int i = 0; i < kProbes; i++) {
// r0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
- if (extra.is(no_reg)) {
- __ push(receiver);
- }
- Register index = extra.is(no_reg) ? receiver : extra;
+ Register index = r1;
// Capacity is smi 2^n.
__ mov(index, FieldOperand(properties, kCapacityOffset));
__ dec(index);
@@ -173,16 +172,12 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
ASSERT(StringDictionary::kEntrySize == 3);
__ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
- Register entity_name = extra.is(no_reg) ? properties : extra;
+ Register entity_name = r1;
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
__ mov(entity_name, Operand(properties, index, times_half_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(entity_name, Factory::undefined_value());
- if (extra.is(no_reg)) {
- // 'receiver' shares a register with 'entity_name'.
- __ pop(receiver);
- }
if (i != kProbes - 1) {
__ j(equal, &done, taken);
@@ -190,10 +185,11 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ cmp(entity_name, Handle<String>(name));
__ j(equal, miss_label, not_taken);
- if (extra.is(no_reg)) {
- // Restore the properties if their register was occupied by the name.
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
- }
+ // Check if the entry name is not a symbol.
+ __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ kIsSymbolMask);
+ __ j(zero, miss_label, not_taken);
} else {
// Give up probing if still not found the undefined value.
__ j(not_equal, miss_label, not_taken);
@@ -525,6 +521,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -541,6 +538,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
receiver,
scratch1,
scratch2,
+ scratch3,
holder,
lookup,
name,
@@ -552,6 +550,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
receiver,
scratch1,
scratch2,
+ scratch3,
name,
holder,
miss);
@@ -564,6 +563,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
@@ -603,7 +603,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver,
interceptor_holder, scratch1,
- scratch2, name, depth1, miss);
+ scratch2, scratch3, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -619,7 +619,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
- scratch2, name, depth2, miss);
+ scratch2, scratch3, name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -655,12 +655,13 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
JSObject* interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
+ scratch1, scratch2, scratch3, name,
miss_label);
__ EnterInternalFrame();
@@ -862,14 +863,15 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
String* name,
int save_at_depth,
- Label* miss,
- Register extra) {
+ Label* miss) {
// Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
- ASSERT(!extra.is(object_reg) && !extra.is(holder_reg) && !extra.is(scratch));
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
// Keep track of the current object in register reg.
Register reg = object_reg;
JSObject* current = object;
@@ -909,31 +911,31 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
miss,
reg,
name,
- scratch,
- extra);
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ scratch1,
+ scratch2);
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else if (Heap::InNewSpace(prototype)) {
// Get the map of the current object.
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ cmp(Operand(scratch), Immediate(Handle<Map>(current->map())));
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
__ j(not_equal, miss, not_taken);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch, miss);
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
// Restore scratch register to be the map of the object.
// We load the prototype from the map in the scratch register.
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
// The prototype is in new space; we cannot store a reference
// to it in the code. Load it from the map.
reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
// Check the map of the current object.
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -944,7 +946,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// after the map check so that we know that the object is
// actually a global object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch, miss);
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
}
// The prototype is in old space; load it directly.
reg = holder_reg; // from now the object is in holder_reg
@@ -971,7 +973,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch, miss);
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
};
// If we've skipped any global objects, it's not enough to verify
@@ -981,7 +983,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
object,
holder,
name,
- scratch,
+ scratch1,
miss);
if (result->IsFailure()) set_failure(Failure::cast(result));
@@ -995,6 +997,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss) {
@@ -1005,7 +1008,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
// Check the prototype chain.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
// Get the value from the properties.
GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
@@ -1019,6 +1022,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
@@ -1030,7 +1034,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
Handle<AccessorInfo> callback_handle(callback);
@@ -1094,6 +1098,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss) {
@@ -1104,7 +1109,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
__ mov(eax, Handle<Object>(value));
@@ -1119,6 +1124,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
@@ -1147,7 +1153,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3,
+ name, miss);
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
// Save necessary data before invoking an interceptor.
@@ -1195,6 +1202,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
lookup->holder(),
scratch1,
scratch2,
+ scratch3,
name,
miss);
}
@@ -1235,7 +1243,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Check that the maps haven't changed.
Register holder_reg =
CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
__ pop(scratch2); // save old return address
PushInterceptorArguments(masm(), receiver, holder_reg,
name_reg, interceptor_holder);
@@ -1310,8 +1318,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
__ j(zero, &miss, not_taken);
// Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, edx, holder, ebx, eax,
- name, &miss, edi);
+ Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
+ name, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@@ -1373,7 +1381,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
- eax, name, &miss, edi);
+ eax, edi, name, &miss);
if (argc == 0) {
// Noop, return the length.
@@ -1519,7 +1527,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ j(zero, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
- eax, name, &miss, edi);
+ eax, edi, name, &miss);
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1594,7 +1602,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss, edi);
+ ebx, edx, edi, name, &miss);
Register receiver = ebx;
Register index = edi;
@@ -1659,7 +1667,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss, edi);
+ ebx, edx, edi, name, &miss);
Register receiver = eax;
Register index = edi;
@@ -1764,7 +1772,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, name, depth, &miss, edi);
+ ebx, eax, edi, name, depth, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -1787,7 +1795,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss, edi);
+ ebx, edx, edi, name, &miss);
}
break;
@@ -1807,7 +1815,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss, edi);
+ ebx, edx, edi, name, &miss);
}
break;
}
@@ -1828,7 +1836,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss, edi);
+ ebx, edx, edi, name, &miss);
}
break;
}
@@ -1888,6 +1896,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
edx,
ebx,
edi,
+ eax,
&miss);
// Restore receiver.
@@ -1950,7 +1959,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
// Check that the maps haven't changed.
- CheckPrototypes(object, edx, holder, ebx, eax, name, &miss, edi);
+ CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss);
// Get the value from the cell.
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
@@ -2226,7 +2235,7 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
// Check the maps of the full prototype chain. Also check that
// global property cells up to (but not including) the last object
// in the prototype chain are empty.
- CheckPrototypes(object, eax, last, ebx, edx, name, &miss);
+ CheckPrototypes(object, eax, last, ebx, edx, edi, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@@ -2263,7 +2272,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
// -----------------------------------
Label miss;
- GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
+ GenerateLoadField(object, holder, eax, ebx, edx, edi, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2284,7 +2293,7 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Label miss;
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+ bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -2307,7 +2316,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
// -----------------------------------
Label miss;
- GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
+ GenerateLoadConstant(object, holder, eax, ebx, edx, edi, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2338,6 +2347,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
ecx,
edx,
ebx,
+ edi,
name,
&miss);
@@ -2370,7 +2380,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
// Check that the maps haven't changed.
- CheckPrototypes(object, eax, holder, ebx, edx, name, &miss, edi);
+ CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
// Get the value from the cell.
__ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
@@ -2415,7 +2425,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadField(receiver, holder, edx, ebx, ecx, index, name, &miss);
+ GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_field, 1);
@@ -2444,7 +2454,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ j(not_equal, &miss, not_taken);
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx,
+ bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -2474,7 +2484,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadConstant(receiver, holder, edx, ebx, ecx,
+ GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
value, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_constant_function, 1);
@@ -2510,6 +2520,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
eax,
ecx,
ebx,
+ edi,
name,
&miss);
__ bind(&miss);
diff --git a/src/ic.cc b/src/ic.cc
index 12332f9f..9bb18f7e 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -525,17 +525,12 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
-#ifndef V8_TARGET_ARCH_IA32
- // Normal objects only implemented for IA32 by now.
- if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
-#else
if (lookup->holder() != *object &&
HasNormalObjectsInPrototypeChain(lookup, object->GetPrototype())) {
// Suppress optimization for prototype chains with slow properties objects
// in the middle.
return;
}
-#endif
// Compute the number of arguments.
int argc = target()->arguments_count();
diff --git a/src/json.js b/src/json.js
index cdb10be1..e7ec6100 100644
--- a/src/json.js
+++ b/src/json.js
@@ -29,7 +29,7 @@ var $JSON = global.JSON;
function ParseJSONUnfiltered(text) {
var s = $String(text);
- var f = %CompileString(text, true);
+ var f = %CompileString(s, true);
return f();
}
diff --git a/src/macros.py b/src/macros.py
index b4be15bf..643a2851 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -115,7 +115,8 @@ macro FLOOR(arg) = $floor(arg);
# Macro for ECMAScript 5 queries of the type:
# "Type(O) is object."
# This is the same as being either a function or an object in V8 terminology.
-macro IS_SPEC_OBJECT_OR_NULL(arg) = (%_IsObject(arg) || %_IsFunction(arg));
+# In addition, an undetectable object is also included by this.
+macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 95afb4ab..d9b0222a 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "execution.h"
+#include "heap-profiler.h"
#include "global-handles.h"
#include "ic-inl.h"
#include "mark-compact.h"
@@ -425,8 +426,10 @@ void MarkCompactCollector::MarkMapContents(Map* map) {
// Since the descriptor array has been marked already, it is fine
// that one of these fields contains a pointer to it.
MarkingVisitor visitor; // Has no state or contents.
- visitor.VisitPointers(HeapObject::RawField(map, Map::kPrototypeOffset),
- HeapObject::RawField(map, Map::kSize));
+ visitor.VisitPointers(HeapObject::RawField(map,
+ Map::kPointerFieldsBeginOffset),
+ HeapObject::RawField(map,
+ Map::kPointerFieldsEndOffset));
}
@@ -2216,6 +2219,7 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
+ HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
@@ -2262,6 +2266,7 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// Notify the logger that compiled code has moved.
PROFILE(CodeMoveEvent(old_addr, new_addr));
}
+ HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
@@ -2306,6 +2311,7 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
+ HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
diff --git a/src/messages.cc b/src/messages.cc
index 7cb1d202..ec91cc87 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -66,7 +66,8 @@ Handle<Object> MessageHandler::MakeMessageObject(
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace) {
+ Handle<String> stack_trace,
+ Handle<JSArray> stack_frames) {
// Build error message object
v8::HandleScope scope; // Instantiate a closeable HandleScope for EscapeFrom.
Handle<Object> type_str = Factory::LookupAsciiSymbol(type);
@@ -90,13 +91,17 @@ Handle<Object> MessageHandler::MakeMessageObject(
Handle<Object> stack_trace_val = stack_trace.is_null()
? Factory::undefined_value()
: Handle<Object>::cast(stack_trace);
- const int argc = 6;
+ Handle<Object> stack_frames_val = stack_frames.is_null()
+ ? Factory::undefined_value()
+ : Handle<Object>::cast(stack_frames);
+ const int argc = 7;
Object** argv[argc] = { type_str.location(),
array.location(),
start_handle.location(),
end_handle.location(),
script.location(),
- stack_trace_val.location() };
+ stack_trace_val.location(),
+ stack_frames_val.location() };
// Setup a catch handler to catch exceptions in creating the message. This
// handler is non-verbose to avoid calling MakeMessage recursively in case of
diff --git a/src/messages.h b/src/messages.h
index 80ce8eb9..440bde87 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -96,7 +96,8 @@ class MessageHandler {
static Handle<Object> MakeMessageObject(const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace);
+ Handle<String> stack_trace,
+ Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
static void ReportMessage(MessageLocation* loc, Handle<Object> message);
diff --git a/src/messages.js b/src/messages.js
index 99ba4546..b0f8aa16 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -181,7 +181,6 @@ function FormatMessage(message) {
// RangeError
invalid_array_length: "Invalid array length",
stack_overflow: "Maximum call stack size exceeded",
- apply_overflow: "Function.prototype.apply cannot support %0 arguments",
// SyntaxError
unable_to_parse: "Parse error",
duplicate_regexp_flag: "Duplicate RegExp flag %0",
@@ -601,18 +600,22 @@ function GetPositionInLine(message) {
}
-function ErrorMessage(type, args, startPos, endPos, script, stackTrace) {
+function ErrorMessage(type, args, startPos, endPos, script, stackTrace,
+ stackFrames) {
this.startPos = startPos;
this.endPos = endPos;
this.type = type;
this.args = args;
this.script = script;
this.stackTrace = stackTrace;
+ this.stackFrames = stackFrames;
}
-function MakeMessage(type, args, startPos, endPos, script, stackTrace) {
- return new ErrorMessage(type, args, startPos, endPos, script, stackTrace);
+function MakeMessage(type, args, startPos, endPos, script, stackTrace,
+ stackFrames) {
+ return new ErrorMessage(type, args, startPos, endPos, script, stackTrace,
+ stackFrames);
}
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index f8b88d7a..79801f07 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -907,6 +907,11 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 0f0a7462..3ad94e86 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -355,6 +355,7 @@ class CodeGenerator: public AstVisitor {
void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
void GenerateStringAdd(ZoneList<Expression*>* args);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 0b5ff993..d340e4b5 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -789,6 +789,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
CHECK(IsSharedFunctionInfo());
VerifyObjectField(kNameOffset);
VerifyObjectField(kCodeOffset);
+ VerifyObjectField(kScopeInfoOffset);
VerifyObjectField(kInstanceClassNameOffset);
VerifyObjectField(kFunctionDataOffset);
VerifyObjectField(kScriptOffset);
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 0e455508..101096d6 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2060,6 +2060,21 @@ void ExternalFloatArray::set(int index, float value) {
ptr[index] = value;
}
+inline Scavenger Map::scavenger() {
+ Scavenger callback = reinterpret_cast<Scavenger>(
+ READ_INTPTR_FIELD(this, kScavengerCallbackOffset));
+
+ ASSERT(callback == Heap::GetScavenger(instance_type(),
+ instance_size()));
+
+ return callback;
+}
+
+inline void Map::set_scavenger(Scavenger callback) {
+ WRITE_INTPTR_FIELD(this,
+ kScavengerCallbackOffset,
+ reinterpret_cast<intptr_t>(callback));
+}
int Map::instance_size() {
return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
@@ -2632,6 +2647,19 @@ void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
}
+SerializedScopeInfo* SharedFunctionInfo::scope_info() {
+ return reinterpret_cast<SerializedScopeInfo*>(
+ READ_FIELD(this, kScopeInfoOffset));
+}
+
+
+void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
+ WriteBarrierMode mode) {
+ WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
+ CONDITIONAL_WRITE_BARRIER(this, kScopeInfoOffset, mode);
+}
+
+
bool SharedFunctionInfo::is_compiled() {
// TODO(1242782): Create a code kind for uncompiled code.
return code()->kind() != Code::STUB;
@@ -2808,7 +2836,6 @@ JSValue* JSValue::cast(Object* obj) {
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
-INT_ACCESSORS(Code, sinfo_size, kSInfoSizeOffset)
byte* Code::instruction_start() {
@@ -2852,11 +2879,6 @@ bool Code::contains(byte* pc) {
}
-byte* Code::sinfo_start() {
- return FIELD_ADDR(this, kHeaderSize + body_size());
-}
-
-
ACCESSORS(JSArray, length, Object, kLengthOffset)
diff --git a/src/objects.cc b/src/objects.cc
index e79a5505..8f668fb3 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2190,6 +2190,8 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
int new_instance_size = map()->instance_size() - instance_size_delta;
new_map->set_inobject_properties(0);
new_map->set_instance_size(new_instance_size);
+ new_map->set_scavenger(Heap::GetScavenger(new_map->instance_type(),
+ new_map->instance_size()));
Heap::CreateFillerObjectAt(this->address() + new_instance_size,
instance_size_delta);
}
@@ -5033,7 +5035,7 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
void Map::MapIterateBody(ObjectVisitor* v) {
// Assumes all Object* members are contiguously allocated!
- IteratePointers(v, kPrototypeOffset, kCodeCacheOffset + kPointerSize);
+ IteratePointers(v, kPointerFieldsBeginOffset, kPointerFieldsEndOffset);
}
@@ -5325,8 +5327,6 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
for (; !it.done(); it.next()) {
it.rinfo()->Visit(v);
}
-
- ScopeInfo<>::IterateScopeInfo(this, v);
}
@@ -7338,6 +7338,46 @@ int HashTable<Shape, Key>::FindEntry(Key key) {
}
+// Find entry for key otherwise return kNotFound.
+int StringDictionary::FindEntry(String* key) {
+ if (!key->IsSymbol()) {
+ return HashTable<StringDictionaryShape, String*>::FindEntry(key);
+ }
+
+ // Optimized for symbol key. Knowledge of the key type allows:
+ // 1. Move the check if the key is a symbol out of the loop.
+ // 2. Avoid comparing hash codes in symbol to symbol comparision.
+ // 3. Detect a case when a dictionary key is not a symbol but the key is.
+ // In case of positive result the dictionary key may be replaced by
+ // the symbol with minimal performance penalty. It gives a chance to
+ // perform further lookups in code stubs (and significant performance boost
+ // a certain style of code).
+
+ // EnsureCapacity will guarantee the hash table is never full.
+ uint32_t capacity = Capacity();
+ uint32_t entry = FirstProbe(key->Hash(), capacity);
+ uint32_t count = 1;
+
+ while (true) {
+ int index = EntryToIndex(entry);
+ Object* element = get(index);
+ if (element->IsUndefined()) break; // Empty entry.
+ if (key == element) return entry;
+ if (!element->IsSymbol() &&
+ !element->IsNull() &&
+ String::cast(element)->Equals(key)) {
+ // Replace a non-symbol key by the equivalent symbol for faster further
+ // lookups.
+ set(index, key);
+ return entry;
+ }
+ ASSERT(element->IsNull() || !String::cast(element)->Equals(key));
+ entry = NextProbe(entry, count++, capacity);
+ }
+ return kNotFound;
+}
+
+
template<typename Shape, typename Key>
Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();
diff --git a/src/objects.h b/src/objects.h
index 4a7dee6a..2b646113 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2012,7 +2012,7 @@ class HashTable: public FixedArray {
static const int kMaxCapacity =
(FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
- // Find entry for key otherwise return -1.
+ // Find entry for key otherwise return kNotFound.
int FindEntry(Key key);
protected:
@@ -2294,6 +2294,10 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
// For transforming properties of a JSObject.
Object* TransformPropertiesToFastFor(JSObject* obj,
int unused_property_fields);
+
+ // Find entry for key otherwise return kNotFound. Optimzed version of
+ // HashTable::FindEntry.
+ int FindEntry(String* key);
};
@@ -2744,10 +2748,6 @@ class Code: public HeapObject {
inline int relocation_size();
- // [sinfo_size]: Size of scope information.
- inline int sinfo_size();
- inline void set_sinfo_size(int value);
-
// [flags]: Various code flags.
inline Flags flags();
inline void set_flags(Flags flags);
@@ -2816,9 +2816,6 @@ class Code: public HeapObject {
// Returns true if pc is inside this object's instructions.
inline bool contains(byte* pc);
- // Returns the address of the scope information.
- inline byte* sinfo_start();
-
// Relocate the code by delta bytes. Called to signal that this code
// object has been moved by delta bytes.
void Relocate(intptr_t delta);
@@ -2826,12 +2823,10 @@ class Code: public HeapObject {
// Migrate code described by desc.
void CopyFrom(const CodeDesc& desc);
- // Returns the object size for a given body and sinfo size (Used for
- // allocation).
- static int SizeFor(int body_size, int sinfo_size) {
+ // Returns the object size for a given body (used for allocation).
+ static int SizeFor(int body_size) {
ASSERT_SIZE_TAG_ALIGNED(body_size);
- ASSERT_SIZE_TAG_ALIGNED(sinfo_size);
- return RoundUp(kHeaderSize + body_size + sinfo_size, kCodeAlignment);
+ return RoundUp(kHeaderSize + body_size, kCodeAlignment);
}
// Calculate the size of the code object to report for log events. This takes
@@ -2851,7 +2846,7 @@ class Code: public HeapObject {
static inline Code* cast(Object* obj);
// Dispatched behavior.
- int CodeSize() { return SizeFor(body_size(), sinfo_size()); }
+ int CodeSize() { return SizeFor(body_size()); }
void CodeIterateBody(ObjectVisitor* v);
#ifdef DEBUG
void CodePrint();
@@ -2865,8 +2860,7 @@ class Code: public HeapObject {
// Layout description.
static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
- static const int kSInfoSizeOffset = kRelocationInfoOffset + kPointerSize;
- static const int kFlagsOffset = kSInfoSizeOffset + kIntSize;
+ static const int kFlagsOffset = kRelocationInfoOffset + kPointerSize;
static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
@@ -2899,6 +2893,7 @@ class Code: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
};
+typedef void (*Scavenger)(Map* map, HeapObject** slot, HeapObject* object);
// All heap objects have a Map that describes their structure.
// A Map contains information about:
@@ -3100,6 +3095,13 @@ class Map: public HeapObject {
void MapVerify();
#endif
+ inline Scavenger scavenger();
+ inline void set_scavenger(Scavenger callback);
+
+ inline void Scavenge(HeapObject** slot, HeapObject* obj) {
+ scavenger()(this, slot, obj);
+ }
+
static const int kMaxPreAllocatedPropertyFields = 255;
// Layout description.
@@ -3110,7 +3112,8 @@ class Map: public HeapObject {
static const int kInstanceDescriptorsOffset =
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
- static const int kPadStart = kCodeCacheOffset + kPointerSize;
+ static const int kScavengerCallbackOffset = kCodeCacheOffset + kPointerSize;
+ static const int kPadStart = kScavengerCallbackOffset + kPointerSize;
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
@@ -3273,6 +3276,9 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code.
DECL_ACCESSORS(code, Code)
+ // [scope_info]: Scope info.
+ DECL_ACCESSORS(scope_info, SerializedScopeInfo)
+
// [construct stub]: Code stub for constructing instances of this function.
DECL_ACCESSORS(construct_stub, Code)
@@ -3426,7 +3432,8 @@ class SharedFunctionInfo: public HeapObject {
// Pointer fields.
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kCodeOffset = kNameOffset + kPointerSize;
- static const int kConstructStubOffset = kCodeOffset + kPointerSize;
+ static const int kScopeInfoOffset = kCodeOffset + kPointerSize;
+ static const int kConstructStubOffset = kScopeInfoOffset + kPointerSize;
static const int kInstanceClassNameOffset =
kConstructStubOffset + kPointerSize;
static const int kFunctionDataOffset =
diff --git a/src/parser.cc b/src/parser.cc
index fb58cfa3..dd5f9bd0 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -36,6 +36,7 @@
#include "parser.h"
#include "platform.h"
#include "runtime.h"
+#include "scopeinfo.h"
#include "scopes.h"
#include "string-stream.h"
@@ -1969,7 +1970,8 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
Handle<Code> code = Handle<Code>(fun->shared()->code());
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<SharedFunctionInfo> shared =
- Factory::NewSharedFunctionInfo(name, literals, code);
+ Factory::NewSharedFunctionInfo(name, literals, code,
+ Handle<SerializedScopeInfo>(fun->shared()->scope_info()));
shared->set_construct_stub(*construct_stub);
// Copy the function data to the shared function info.
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index e3ae867e..58ff1540 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -83,6 +83,12 @@ void OS::Setup() {
}
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+ __asm__ __volatile__("" : : : "memory");
+ *ptr = value;
+}
+
+
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // OpenBSD runs on anything.
}
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 57ff6610..5315bfbd 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -181,8 +181,6 @@ void ProfileNode::Print(int indent) {
}
-namespace {
-
class DeleteNodesCallback {
public:
void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
@@ -194,8 +192,6 @@ class DeleteNodesCallback {
void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
};
-} // namespace
-
ProfileTree::ProfileTree()
: root_entry_(Logger::FUNCTION_TAG,
@@ -240,8 +236,6 @@ void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
}
-namespace {
-
struct NodesPair {
NodesPair(ProfileNode* src, ProfileNode* dst)
: src(src), dst(dst) { }
@@ -294,8 +288,6 @@ class FilteredCloneCallback {
int security_token_id_;
};
-} // namespace
-
void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) {
ms_to_ticks_scale_ = src->ms_to_ticks_scale_;
FilteredCloneCallback cb(root_, security_token_id);
@@ -309,8 +301,6 @@ void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
}
-namespace {
-
class Position {
public:
explicit Position(ProfileNode* node)
@@ -328,8 +318,6 @@ class Position {
int child_idx_;
};
-} // namespace
-
// Non-recursive implementation of a depth-first post-order tree traversal.
template <typename Callback>
@@ -355,8 +343,6 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
-namespace {
-
class CalculateTotalTicksCallback {
public:
void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
@@ -370,8 +356,6 @@ class CalculateTotalTicksCallback {
}
};
-} // namespace
-
void ProfileTree::CalculateTotalTicks() {
CalculateTotalTicksCallback cb;
@@ -877,6 +861,11 @@ void HeapEntry::SetAutoIndexReference(HeapEntry* entry) {
}
+void HeapEntry::SetUnidirAutoIndexReference(HeapEntry* entry) {
+ children_.Add(new HeapGraphEdge(next_auto_index_++, this, entry));
+}
+
+
int HeapEntry::TotalSize() {
return total_size_ != kUnknownSize ? total_size_ : CalculateTotalSize();
}
@@ -888,12 +877,12 @@ int HeapEntry::NonSharedTotalSize() {
}
-int HeapEntry::CalculateTotalSize() {
- snapshot_->ClearPaint();
+template<class Visitor>
+void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
List<HeapEntry*> list(10);
list.Add(this);
- total_size_ = self_size_;
this->PaintReachable();
+ visitor->Apply(this);
while (!list.is_empty()) {
HeapEntry* entry = list.RemoveLast();
const int children_count = entry->children_.length();
@@ -902,15 +891,48 @@ int HeapEntry::CalculateTotalSize() {
if (!child->painted_reachable()) {
list.Add(child);
child->PaintReachable();
- total_size_ += child->self_size_;
+ visitor->Apply(child);
}
}
}
- return total_size_;
}
-namespace {
+class NullClass {
+ public:
+ void Apply(HeapEntry* entry) { }
+};
+
+void HeapEntry::PaintAllReachable() {
+ NullClass null;
+ ApplyAndPaintAllReachable(&null);
+}
+
+
+class TotalSizeCalculator {
+ public:
+ TotalSizeCalculator()
+ : total_size_(0) {
+ }
+
+ int total_size() const { return total_size_; }
+
+ void Apply(HeapEntry* entry) {
+ total_size_ += entry->self_size();
+ }
+
+ private:
+ int total_size_;
+};
+
+int HeapEntry::CalculateTotalSize() {
+ snapshot_->ClearPaint();
+ TotalSizeCalculator calc;
+ ApplyAndPaintAllReachable(&calc);
+ total_size_ = calc.total_size();
+ return total_size_;
+}
+
class NonSharedSizeCalculator {
public:
@@ -930,41 +952,26 @@ class NonSharedSizeCalculator {
int non_shared_total_size_;
};
-} // namespace
-
int HeapEntry::CalculateNonSharedTotalSize() {
// To calculate non-shared total size, first we paint all reachable
// nodes in one color, then we paint all nodes reachable from other
// nodes with a different color. Then we consider only nodes painted
- // with the first color for caclulating the total size.
+ // with the first color for calculating the total size.
snapshot_->ClearPaint();
- List<HeapEntry*> list(10);
- list.Add(this);
- this->PaintReachable();
- while (!list.is_empty()) {
- HeapEntry* entry = list.RemoveLast();
- const int children_count = entry->children_.length();
- for (int i = 0; i < children_count; ++i) {
- HeapEntry* child = entry->children_[i]->to();
- if (!child->painted_reachable()) {
- list.Add(child);
- child->PaintReachable();
- }
- }
- }
+ PaintAllReachable();
- List<HeapEntry*> list2(10);
+ List<HeapEntry*> list(10);
if (this != snapshot_->root()) {
- list2.Add(snapshot_->root());
+ list.Add(snapshot_->root());
snapshot_->root()->PaintReachableFromOthers();
}
- while (!list2.is_empty()) {
- HeapEntry* entry = list2.RemoveLast();
+ while (!list.is_empty()) {
+ HeapEntry* entry = list.RemoveLast();
const int children_count = entry->children_.length();
for (int i = 0; i < children_count; ++i) {
HeapEntry* child = entry->children_[i]->to();
if (child != this && child->not_painted_reachable_from_others()) {
- list2.Add(child);
+ list.Add(child);
child->PaintReachableFromOthers();
}
}
@@ -972,7 +979,8 @@ int HeapEntry::CalculateNonSharedTotalSize() {
NonSharedSizeCalculator calculator;
snapshot_->IterateEntries(&calculator);
- return calculator.non_shared_total_size();
+ non_shared_total_size_ = calculator.non_shared_total_size();
+ return non_shared_total_size_;
}
@@ -1078,7 +1086,8 @@ void HeapEntry::CutEdges() {
void HeapEntry::Print(int max_depth, int indent) {
- OS::Print("%6d %6d %6d ", self_size_, TotalSize(), NonSharedTotalSize());
+ OS::Print("%6d %6d %6d [%ld] ",
+ self_size_, TotalSize(), NonSharedTotalSize(), id_);
if (type_ != STRING) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@@ -1244,7 +1253,13 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
: collection_(collection),
title_(title),
uid_(uid),
- root_(this) {
+ root_(this),
+ sorted_entries_(NULL) {
+}
+
+
+HeapSnapshot::~HeapSnapshot() {
+ delete sorted_entries_;
}
@@ -1355,6 +1370,7 @@ HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
HeapEntry* entry = new HeapEntry(this,
type,
name,
+ collection_->GetObjectId(object->address()),
GetObjectSize(object),
GetObjectSecurityToken(object));
entries_.Pair(object, entry);
@@ -1381,8 +1397,6 @@ HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
}
-namespace {
-
class EdgesCutter {
public:
explicit EdgesCutter(int global_security_token)
@@ -1400,8 +1414,6 @@ class EdgesCutter {
const int global_security_token_;
};
-} // namespace
-
void HeapSnapshot::CutObjectsFromForeignSecurityContexts() {
EdgesCutter cutter(GetGlobalSecurityToken());
entries_.Apply(&cutter);
@@ -1454,13 +1466,129 @@ int HeapSnapshot::CalculateNetworkSize(JSObject* obj) {
}
+class EntriesCollector {
+ public:
+ explicit EntriesCollector(List<HeapEntry*>* list) : list_(list) { }
+ void Apply(HeapEntry* entry) {
+ list_->Add(entry);
+ }
+ private:
+ List<HeapEntry*>* list_;
+};
+
+template<class T>
+static int SortByIds(const T* entry1_ptr,
+ const T* entry2_ptr) {
+ if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0;
+ return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
+}
+
+List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
+ if (sorted_entries_ != NULL) return sorted_entries_;
+ sorted_entries_ = new List<HeapEntry*>(entries_.capacity());
+ EntriesCollector collector(sorted_entries_);
+ entries_.Apply(&collector);
+ sorted_entries_->Sort(SortByIds);
+ return sorted_entries_;
+}
+
+
+HeapSnapshotsDiff* HeapSnapshot::CompareWith(HeapSnapshot* snapshot) {
+ return collection_->CompareSnapshots(this, snapshot);
+}
+
+
void HeapSnapshot::Print(int max_depth) {
root_.Print(max_depth, 0);
}
+HeapObjectsMap::HeapObjectsMap()
+ : initial_fill_mode_(true),
+ next_id_(1),
+ entries_map_(AddressesMatch),
+ entries_(new List<EntryInfo>()) { }
+
+
+HeapObjectsMap::~HeapObjectsMap() {
+ delete entries_;
+}
+
+
+void HeapObjectsMap::SnapshotGenerationFinished() {
+ initial_fill_mode_ = false;
+ RemoveDeadEntries();
+}
+
+
+uint64_t HeapObjectsMap::FindObject(Address addr) {
+ if (!initial_fill_mode_) {
+ uint64_t existing = FindEntry(addr);
+ if (existing != 0) return existing;
+ }
+ uint64_t id = next_id_++;
+ AddEntry(addr, id);
+ return id;
+}
+
+
+void HeapObjectsMap::MoveObject(Address from, Address to) {
+ if (from == to) return;
+ HashMap::Entry* entry = entries_map_.Lookup(from, AddressHash(from), false);
+ if (entry != NULL) {
+ void* value = entry->value;
+ entries_map_.Remove(from, AddressHash(from));
+ entry = entries_map_.Lookup(to, AddressHash(to), true);
+ // We can have an entry at the new location, it is OK, as GC can overwrite
+ // dead objects with alive objects being moved.
+ entry->value = value;
+ }
+}
+
+
+void HeapObjectsMap::AddEntry(Address addr, uint64_t id) {
+ HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
+ ASSERT(entry->value == NULL);
+ entry->value = reinterpret_cast<void*>(entries_->length());
+ entries_->Add(EntryInfo(id));
+}
+
+
+uint64_t HeapObjectsMap::FindEntry(Address addr) {
+ HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
+ if (entry != NULL) {
+ int entry_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_->at(entry_index);
+ entry_info.accessed = true;
+ return entry_info.id;
+ } else {
+ return 0;
+ }
+}
+
+
+void HeapObjectsMap::RemoveDeadEntries() {
+ List<EntryInfo>* new_entries = new List<EntryInfo>();
+ for (HashMap::Entry* entry = entries_map_.Start();
+ entry != NULL;
+ entry = entries_map_.Next(entry)) {
+ int entry_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_->at(entry_index);
+ if (entry_info.accessed) {
+ entry->value = reinterpret_cast<void*>(new_entries->length());
+ new_entries->Add(EntryInfo(entry_info.id, false));
+ }
+ }
+ delete entries_;
+ entries_ = new_entries;
+}
+
+
HeapSnapshotsCollection::HeapSnapshotsCollection()
- : snapshots_uids_(HeapSnapshotsMatch),
+ : is_tracking_objects_(false),
+ snapshots_uids_(HeapSnapshotsMatch),
token_enumerator_(new TokenEnumerator()) {
}
@@ -1478,6 +1606,7 @@ HeapSnapshotsCollection::~HeapSnapshotsCollection() {
HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
unsigned uid) {
+ is_tracking_objects_ = true; // Start watching for heap objects moves.
HeapSnapshot* snapshot = new HeapSnapshot(this, name, uid);
snapshots_.Add(snapshot);
HashMap::Entry* entry =
@@ -1498,6 +1627,13 @@ HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
}
+HeapSnapshotsDiff* HeapSnapshotsCollection::CompareSnapshots(
+ HeapSnapshot* snapshot1,
+ HeapSnapshot* snapshot2) {
+ return comparator_.Compare(snapshot1, snapshot2);
+}
+
+
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
: snapshot_(snapshot) {
}
@@ -1555,13 +1691,13 @@ void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj,
JSFunction* func = JSFunction::cast(js_obj);
Context* context = func->context();
ZoneScope zscope(DELETE_ON_EXIT);
- ScopeInfo<ZoneListAllocationPolicy> scope_info(
- context->closure()->shared()->code());
- int locals_number = scope_info.NumberOfLocals();
+ SerializedScopeInfo* serialized_scope_info =
+ context->closure()->shared()->scope_info();
+ ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
+ int locals_number = zone_scope_info.NumberOfLocals();
for (int i = 0; i < locals_number; ++i) {
- String* local_name = *scope_info.LocalName(i);
- int idx = ScopeInfo<>::ContextSlotIndex(
- context->closure()->shared()->code(), local_name, NULL);
+ String* local_name = *zone_scope_info.LocalName(i);
+ int idx = serialized_scope_info->ContextSlotIndex(local_name, NULL);
if (idx >= 0 && idx < context->length()) {
snapshot_->SetClosureReference(entry, local_name, context->get(idx));
}
@@ -1630,6 +1766,64 @@ void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj,
}
}
+
+static void DeleteHeapSnapshotsDiff(HeapSnapshotsDiff** diff_ptr) {
+ delete *diff_ptr;
+}
+
+HeapSnapshotsComparator::~HeapSnapshotsComparator() {
+ diffs_.Iterate(DeleteHeapSnapshotsDiff);
+}
+
+
+HeapSnapshotsDiff* HeapSnapshotsComparator::Compare(HeapSnapshot* snapshot1,
+ HeapSnapshot* snapshot2) {
+ HeapSnapshotsDiff* diff = new HeapSnapshotsDiff(snapshot1, snapshot2);
+ diffs_.Add(diff);
+ List<HeapEntry*>* entries1 = snapshot1->GetSortedEntriesList();
+ List<HeapEntry*>* entries2 = snapshot2->GetSortedEntriesList();
+ int i = 0, j = 0;
+ List<HeapEntry*> added_entries, deleted_entries;
+ while (i < entries1->length() && j < entries2->length()) {
+ uint64_t id1 = entries1->at(i)->id();
+ uint64_t id2 = entries2->at(j)->id();
+ if (id1 == id2) {
+ i++;
+ j++;
+ } else if (id1 < id2) {
+ HeapEntry* entry = entries1->at(i++);
+ deleted_entries.Add(entry);
+ } else {
+ HeapEntry* entry = entries2->at(j++);
+ added_entries.Add(entry);
+ }
+ }
+ while (i < entries1->length()) {
+ HeapEntry* entry = entries1->at(i++);
+ deleted_entries.Add(entry);
+ }
+ while (j < entries2->length()) {
+ HeapEntry* entry = entries2->at(j++);
+ added_entries.Add(entry);
+ }
+
+ snapshot1->ClearPaint();
+ snapshot1->root()->PaintAllReachable();
+ for (int i = 0; i < deleted_entries.length(); ++i) {
+ HeapEntry* entry = deleted_entries[i];
+ if (entry->painted_reachable())
+ diff->AddDeletedEntry(entry);
+ }
+ snapshot2->ClearPaint();
+ snapshot2->root()->PaintAllReachable();
+ for (int i = 0; i < added_entries.length(); ++i) {
+ HeapEntry* entry = added_entries[i];
+ if (entry->painted_reachable())
+ diff->AddAddedEntry(entry);
+ }
+ return diff;
+}
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 4e423c8d..cd2bd0b6 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -74,7 +74,7 @@ class StringsStorage {
reinterpret_cast<char*>(key2)) == 0;
}
- // String::Hash -> const char*
+ // Mapping of strings by String::Hash to const char* strings.
HashMap names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
@@ -156,7 +156,7 @@ class ProfileNode {
CodeEntry* entry_;
unsigned total_ticks_;
unsigned self_ticks_;
- // CodeEntry* -> ProfileNode*
+ // Mapping from CodeEntry* to ProfileNode*
HashMap children_;
List<ProfileNode*> children_list_;
@@ -312,11 +312,12 @@ class CpuProfilesCollection {
}
StringsStorage function_and_resource_names_;
- // args_count -> char*
+ // Mapping from args_count (int) to char* strings.
List<char*> args_count_names_;
List<CodeEntry*> code_entries_;
List<List<CpuProfile*>* > profiles_by_token_;
- // uid -> index
+ // Mapping from profiles' uids to indexes in the second nested list
+ // of profiles_by_token_.
HashMap profiles_uids_;
// Accessed by VM thread and profile generator thread.
@@ -482,6 +483,7 @@ class HeapEntry {
visited_(false),
type_(INTERNAL),
name_(""),
+ id_(0),
next_auto_index_(0),
self_size_(0),
security_token_id_(TokenEnumerator::kNoSecurityToken),
@@ -494,12 +496,14 @@ class HeapEntry {
HeapEntry(HeapSnapshot* snapshot,
Type type,
const char* name,
+ uint64_t id,
int self_size,
int security_token_id)
: snapshot_(snapshot),
visited_(false),
type_(type),
name_(name),
+ id_(id),
next_auto_index_(1),
self_size_(self_size),
security_token_id_(security_token_id),
@@ -514,6 +518,7 @@ class HeapEntry {
bool visited() const { return visited_; }
Type type() const { return type_; }
const char* name() const { return name_; }
+ uint64_t id() const { return id_; }
int self_size() const { return self_size_; }
int security_token_id() const { return security_token_id_; }
bool painted_reachable() { return painted_ == kPaintReachable; }
@@ -524,9 +529,13 @@ class HeapEntry {
const List<HeapGraphEdge*>* retainers() const { return &retainers_; }
const List<HeapGraphPath*>* GetRetainingPaths();
+ template<class Visitor>
+ void ApplyAndPaintAllReachable(Visitor* visitor);
+
void ClearPaint() { painted_ = kUnpainted; }
void CutEdges();
void MarkAsVisited() { visited_ = true; }
+ void PaintAllReachable();
void PaintReachable() {
ASSERT(painted_ == kUnpainted);
painted_ = kPaintReachable;
@@ -537,6 +546,7 @@ class HeapEntry {
void SetInternalReference(const char* name, HeapEntry* entry);
void SetPropertyReference(const char* name, HeapEntry* entry);
void SetAutoIndexReference(HeapEntry* entry);
+ void SetUnidirAutoIndexReference(HeapEntry* entry);
int TotalSize();
int NonSharedTotalSize();
@@ -557,6 +567,7 @@ class HeapEntry {
bool visited_;
Type type_;
const char* name_;
+ uint64_t id_;
int next_auto_index_;
int self_size_;
int security_token_id_;
@@ -607,6 +618,8 @@ class HeapEntriesMap {
HeapEntry* Map(HeapObject* object);
void Pair(HeapObject* object, HeapEntry* entry);
+ uint32_t capacity() { return entries_.capacity(); }
+
private:
INLINE(uint32_t Hash(HeapObject* object)) {
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
@@ -627,6 +640,7 @@ class HeapEntriesMap {
class HeapSnapshotsCollection;
+class HeapSnapshotsDiff;
// HeapSnapshot represents a single heap snapshot. It is stored in
// HeapSnapshotsCollection, which is also a factory for
@@ -638,6 +652,7 @@ class HeapSnapshot {
HeapSnapshot(HeapSnapshotsCollection* collection,
const char* title,
unsigned uid);
+ ~HeapSnapshot();
void ClearPaint();
void CutObjectsFromForeignSecurityContexts();
HeapEntry* GetEntry(Object* object);
@@ -655,6 +670,8 @@ class HeapSnapshot {
HeapEntry* root() { return &root_; }
template<class Visitor>
void IterateEntries(Visitor* visitor) { entries_.Apply(visitor); }
+ List<HeapEntry*>* GetSortedEntriesList();
+ HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
void Print(int max_depth);
@@ -679,19 +696,108 @@ class HeapSnapshot {
const char* title_;
unsigned uid_;
HeapEntry root_;
- // HeapObject* -> HeapEntry*
+ // Mapping from HeapObject* pointers to HeapEntry* pointers.
HeapEntriesMap entries_;
+ // Entries sorted by id.
+ List<HeapEntry*>* sorted_entries_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
};
+class HeapObjectsMap {
+ public:
+ HeapObjectsMap();
+ ~HeapObjectsMap();
+
+ void SnapshotGenerationFinished();
+ uint64_t FindObject(Address addr);
+ void MoveObject(Address from, Address to);
+
+ private:
+ struct EntryInfo {
+ explicit EntryInfo(uint64_t id) : id(id), accessed(true) { }
+ EntryInfo(uint64_t id, bool accessed) : id(id), accessed(accessed) { }
+ uint64_t id;
+ bool accessed;
+ };
+
+ void AddEntry(Address addr, uint64_t id);
+ uint64_t FindEntry(Address addr);
+ void RemoveDeadEntries();
+
+ static bool AddressesMatch(void* key1, void* key2) {
+ return key1 == key2;
+ }
+
+ static uint32_t AddressHash(Address addr) {
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(addr));
+ }
+
+ bool initial_fill_mode_;
+ uint64_t next_id_;
+ HashMap entries_map_;
+ List<EntryInfo>* entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
+};
+
+
+class HeapSnapshotsDiff {
+ public:
+ HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2)
+ : snapshot1_(snapshot1),
+ snapshot2_(snapshot2),
+ additions_root_(new HeapEntry(snapshot2)),
+ deletions_root_(new HeapEntry(snapshot1)) { }
+
+ ~HeapSnapshotsDiff() {
+ delete deletions_root_;
+ delete additions_root_;
+ }
+
+ void AddAddedEntry(HeapEntry* entry) {
+ additions_root_->SetUnidirAutoIndexReference(entry);
+ }
+
+ void AddDeletedEntry(HeapEntry* entry) {
+ deletions_root_->SetUnidirAutoIndexReference(entry);
+ }
+
+ const HeapEntry* additions_root() const { return additions_root_; }
+ const HeapEntry* deletions_root() const { return deletions_root_; }
+
+ private:
+ HeapSnapshot* snapshot1_;
+ HeapSnapshot* snapshot2_;
+ HeapEntry* additions_root_;
+ HeapEntry* deletions_root_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff);
+};
+
+
+class HeapSnapshotsComparator {
+ public:
+ HeapSnapshotsComparator() { }
+ ~HeapSnapshotsComparator();
+ HeapSnapshotsDiff* Compare(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2);
+ private:
+ List<HeapSnapshotsDiff*> diffs_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsComparator);
+};
+
+
class HeapSnapshotsCollection {
public:
HeapSnapshotsCollection();
~HeapSnapshotsCollection();
+ bool is_tracking_objects() { return is_tracking_objects_; }
+
HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
+ void SnapshotGenerationFinished() { ids_.SnapshotGenerationFinished(); }
List<HeapSnapshot*>* snapshots() { return &snapshots_; }
HeapSnapshot* GetSnapshot(unsigned uid);
@@ -699,16 +805,26 @@ class HeapSnapshotsCollection {
TokenEnumerator* token_enumerator() { return token_enumerator_; }
+ uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); }
+ void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
+
+ HeapSnapshotsDiff* CompareSnapshots(HeapSnapshot* snapshot1,
+ HeapSnapshot* snapshot2);
+
private:
INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
return key1 == key2;
}
+ bool is_tracking_objects_; // Whether tracking object moves is needed.
List<HeapSnapshot*> snapshots_;
- // uid -> HeapSnapshot*
+ // Mapping from snapshots' uids to HeapSnapshot* pointers.
HashMap snapshots_uids_;
StringsStorage names_;
TokenEnumerator* token_enumerator_;
+ // Mapping from HeapObject addresses to objects' uids.
+ HeapObjectsMap ids_;
+ HeapSnapshotsComparator comparator_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
};
diff --git a/src/runtime.cc b/src/runtime.cc
index 4a0fe7ae..fa881eb2 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1606,9 +1606,10 @@ static Object* Runtime_SetCode(Arguments args) {
if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
- // Set the code, formal parameter count, and the length of the target
- // function.
+ // Set the code, scope info, formal parameter count,
+ // and the length of the target function.
target->set_code(fun->code());
+ target->shared()->set_scope_info(shared->scope_info());
target->shared()->set_length(shared->length());
target->shared()->set_formal_parameter_count(
shared->formal_parameter_count());
@@ -5608,6 +5609,14 @@ static Object* Runtime_NumberUnaryMinus(Arguments args) {
}
+static Object* Runtime_NumberAlloc(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 0);
+
+ return Heap::NumberFromDouble(9876543210.0);
+}
+
+
static Object* Runtime_NumberDiv(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -6860,7 +6869,7 @@ static Object* Runtime_NewContext(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, function, args[0]);
- int length = ScopeInfo<>::NumberOfContextSlots(function->code());
+ int length = function->shared()->scope_info()->NumberOfContextSlots();
Object* result = Heap::AllocateFunctionContext(length, function);
if (result->IsFailure()) return result;
@@ -8480,9 +8489,10 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
// Check for constructor frame.
bool constructor = it.frame()->IsConstructor();
- // Get code and read scope info from it for local variable information.
- Handle<Code> code(it.frame()->code());
- ScopeInfo<> info(*code);
+ // Get scope info and read from it for local variable information.
+ Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
+ Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
+ ScopeInfo<> info(*scope_info);
// Get the context.
Handle<Context> context(Context::cast(it.frame()->context()));
@@ -8510,8 +8520,7 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
}
ASSERT(context->is_function_context());
locals->set(i * 2 + 1,
- context->get(ScopeInfo<>::ContextSlotIndex(*code, *name,
- NULL)));
+ context->get(scope_info->ContextSlotIndex(*name, NULL)));
}
}
@@ -8651,18 +8660,17 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
// Copy all the context locals into an object used to materialize a scope.
-static void CopyContextLocalsToScopeObject(Handle<Code> code,
- ScopeInfo<>& scope_info,
- Handle<Context> context,
- Handle<JSObject> scope_object) {
+static void CopyContextLocalsToScopeObject(
+ Handle<SerializedScopeInfo> serialized_scope_info,
+ ScopeInfo<>& scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object) {
// Fill all context locals to the context extension.
for (int i = Context::MIN_CONTEXT_SLOTS;
i < scope_info.number_of_context_slots();
i++) {
- int context_index =
- ScopeInfo<>::ContextSlotIndex(*code,
- *scope_info.context_slot_name(i),
- NULL);
+ int context_index = serialized_scope_info->ContextSlotIndex(
+ *scope_info.context_slot_name(i), NULL);
// Don't include the arguments shadow (.arguments) context variable.
if (*scope_info.context_slot_name(i) != Heap::arguments_shadow_symbol()) {
@@ -8678,8 +8686,9 @@ static void CopyContextLocalsToScopeObject(Handle<Code> code,
// frame.
static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<Code> code(function->code());
- ScopeInfo<> scope_info(*code);
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
+ ScopeInfo<> scope_info(*serialized_scope_info);
// Allocate and initialize a JSObject with all the arguments, stack locals
// heap locals and extension properties of the debugged function.
@@ -8702,7 +8711,7 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->fcontext());
- CopyContextLocalsToScopeObject(code, scope_info,
+ CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
function_context, local_scope);
// Finally copy any properties from the function context extension. This will
@@ -8729,8 +8738,9 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
ASSERT(context->is_function_context());
- Handle<Code> code(context->closure()->code());
- ScopeInfo<> scope_info(*code);
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
+ ScopeInfo<> scope_info(*serialized_scope_info);
// Allocate and initialize a JSObject with all the content of theis function
// closure.
@@ -8738,9 +8748,8 @@ static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
// Check whether the arguments shadow object exists.
int arguments_shadow_index =
- ScopeInfo<>::ContextSlotIndex(*code,
- Heap::arguments_shadow_symbol(),
- NULL);
+ shared->scope_info()->ContextSlotIndex(Heap::arguments_shadow_symbol(),
+ NULL);
if (arguments_shadow_index >= 0) {
// In this case all the arguments are available in the arguments shadow
// object.
@@ -8754,7 +8763,8 @@ static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
}
// Fill all context locals to the context extension.
- CopyContextLocalsToScopeObject(code, scope_info, context, closure_scope);
+ CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
+ context, closure_scope);
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
@@ -8803,8 +8813,8 @@ class ScopeIterator {
// created for evaluating top level code and it is not a real local scope.
// Checking for the existence of .result seems fragile, but the scope info
// saved with the code object does not otherwise have that information.
- Handle<Code> code(function_->code());
- int index = ScopeInfo<>::StackSlotIndex(*code, Heap::result_symbol());
+ int index = function_->shared()->scope_info()->
+ StackSlotIndex(Heap::result_symbol());
at_local_ = index < 0;
} else if (context_->is_function_context()) {
at_local_ = true;
@@ -8918,8 +8928,7 @@ class ScopeIterator {
case ScopeIterator::ScopeTypeLocal: {
PrintF("Local:\n");
- Handle<Code> code(function_->code());
- ScopeInfo<> scope_info(*code);
+ ScopeInfo<> scope_info(function_->shared()->scope_info());
scope_info.Print();
if (!CurrentContext().is_null()) {
CurrentContext()->Print();
@@ -9443,7 +9452,7 @@ static Handle<Context> CopyWithContextChain(Handle<Context> context_chain,
// Runtime_DebugEvaluate.
static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
Handle<JSFunction> function,
- Handle<Code> code,
+ Handle<SerializedScopeInfo> scope_info,
const ScopeInfo<>* sinfo,
Handle<Context> function_context) {
// Try to find the value of 'arguments' to pass as parameter. If it is not
@@ -9451,15 +9460,14 @@ static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
// does not support eval) then create an 'arguments' object.
int index;
if (sinfo->number_of_stack_slots() > 0) {
- index = ScopeInfo<>::StackSlotIndex(*code, Heap::arguments_symbol());
+ index = scope_info->StackSlotIndex(Heap::arguments_symbol());
if (index != -1) {
return Handle<Object>(frame->GetExpression(index));
}
}
if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
- index = ScopeInfo<>::ContextSlotIndex(*code, Heap::arguments_symbol(),
- NULL);
+ index = scope_info->ContextSlotIndex(Heap::arguments_symbol(), NULL);
if (index != -1) {
return Handle<Object>(function_context->get(index));
}
@@ -9510,8 +9518,8 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
JavaScriptFrameIterator it(id);
JavaScriptFrame* frame = it.frame();
Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<Code> code(function->code());
- ScopeInfo<> sinfo(*code);
+ Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
+ ScopeInfo<> sinfo(*scope_info);
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -9533,7 +9541,7 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
Factory::NewFunction(Factory::empty_string(), Factory::undefined_value());
go_between->set_context(function->context());
#ifdef DEBUG
- ScopeInfo<> go_between_sinfo(go_between->shared()->code());
+ ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info());
ASSERT(go_between_sinfo.number_of_parameters() == 0);
ASSERT(go_between_sinfo.number_of_context_slots() == 0);
#endif
@@ -9579,8 +9587,8 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
&has_pending_exception);
if (has_pending_exception) return Failure::Exception();
- Handle<Object> arguments = GetArgumentsObject(frame, function, code, &sinfo,
- function_context);
+ Handle<Object> arguments = GetArgumentsObject(frame, function, scope_info,
+ &sinfo, function_context);
// Invoke the evaluation function and return the result.
const int argc = 2;
diff --git a/src/runtime.h b/src/runtime.h
index 5719fc89..1c9bb080 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -115,6 +115,7 @@ namespace internal {
F(NumberDiv, 2, 1) \
F(NumberMod, 2, 1) \
F(NumberUnaryMinus, 1, 1) \
+ F(NumberAlloc, 0, 1) \
\
F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
diff --git a/src/runtime.js b/src/runtime.js
index ab6e3e9d..aca19457 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -80,7 +80,7 @@ function EQUALS(y) {
} else {
// x is not a number, boolean, null or undefined.
if (y == null) return 1; // not equal
- if (IS_SPEC_OBJECT_OR_NULL(y)) {
+ if (IS_SPEC_OBJECT(y)) {
return %_ObjectEquals(x, y) ? 0 : 1;
}
@@ -345,7 +345,7 @@ function DELETE(key) {
// ECMA-262, section 11.8.7, page 54.
function IN(x) {
- if (x == null || !IS_SPEC_OBJECT_OR_NULL(x)) {
+ if (!IS_SPEC_OBJECT(x)) {
throw %MakeTypeError('invalid_in_operator_use', [this, x]);
}
return %_IsNonNegativeSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this));
@@ -363,13 +363,13 @@ function INSTANCE_OF(F) {
}
// If V is not an object, return false.
- if (IS_NULL(V) || !IS_SPEC_OBJECT_OR_NULL(V)) {
+ if (!IS_SPEC_OBJECT(V)) {
return 1;
}
// Get the prototype of F; if it is not an object, throw an error.
var O = F.prototype;
- if (IS_NULL(O) || !IS_SPEC_OBJECT_OR_NULL(O)) {
+ if (!IS_SPEC_OBJECT(O)) {
throw %MakeTypeError('instanceof_nonobject_proto', [O]);
}
@@ -431,7 +431,7 @@ function APPLY_PREPARE(args) {
// big enough, but sanity check the value to avoid overflow when
// multiplying with pointer size.
if (length > 0x800000) {
- throw %MakeRangeError('apply_overflow', [length]);
+ throw %MakeRangeError('stack_overflow', []);
}
if (!IS_FUNCTION(this)) {
@@ -450,7 +450,7 @@ function APPLY_PREPARE(args) {
function APPLY_OVERFLOW(length) {
- throw %MakeRangeError('apply_overflow', [length]);
+ throw %MakeRangeError('stack_overflow', []);
}
@@ -483,8 +483,7 @@ function ToPrimitive(x, hint) {
// Fast case check.
if (IS_STRING(x)) return x;
// Normal behavior.
- if (!IS_SPEC_OBJECT_OR_NULL(x)) return x;
- if (x == null) return x; // check for null, undefined
+ if (!IS_SPEC_OBJECT(x)) return x;
if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
}
@@ -583,13 +582,10 @@ function SameValue(x, y) {
// Returns if the given x is a primitive value - not an object or a
// function.
function IsPrimitive(x) {
- if (!IS_SPEC_OBJECT_OR_NULL(x)) {
- return true;
- } else {
- // Even though the type of null is "object", null is still
- // considered a primitive value.
- return IS_NULL(x);
- }
+ // Even though the type of null is "object", null is still
+ // considered a primitive value. IS_SPEC_OBJECT handles this correctly
+ // (i.e., it will return false if x is null).
+ return !IS_SPEC_OBJECT(x);
}
diff --git a/src/scanner.cc b/src/scanner.cc
index 286f515b..ca0e2d86 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -341,8 +341,7 @@ Scanner::Scanner(ParserMode pre)
void Scanner::Initialize(Handle<String> source,
ParserLanguage language) {
- safe_string_input_buffer_.Reset(source.location());
- Init(source, &safe_string_input_buffer_, 0, source->length(), language);
+ Init(source, NULL, 0, source->length(), language);
}
@@ -357,9 +356,7 @@ void Scanner::Initialize(Handle<String> source,
int start_position,
int end_position,
ParserLanguage language) {
- safe_string_input_buffer_.Reset(source.location());
- Init(source, &safe_string_input_buffer_,
- start_position, end_position, language);
+ Init(source, NULL, start_position, end_position, language);
}
@@ -368,6 +365,10 @@ void Scanner::Init(Handle<String> source,
int start_position,
int end_position,
ParserLanguage language) {
+ // Either initialize the scanner from a character stream or from a
+ // string.
+ ASSERT(source.is_null() || stream == NULL);
+
// Initialize the source buffer.
if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
two_byte_string_buffer_.Initialize(
@@ -382,6 +383,10 @@ void Scanner::Init(Handle<String> source,
end_position);
source_ = &ascii_string_buffer_;
} else {
+ if (!source.is_null()) {
+ safe_string_input_buffer_.Reset(source.location());
+ stream = &safe_string_input_buffer_;
+ }
char_stream_buffer_.Initialize(source,
stream,
start_position,
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 2091ca72..7e7f1525 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -148,7 +148,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
}
-// Encoding format in the Code object:
+// Encoding format in a FixedArray object:
//
// - function name
//
@@ -204,12 +204,6 @@ static inline Object** ReadSymbol(Object** p, Handle<String>* s) {
}
-static inline Object** ReadSentinel(Object** p) {
- ASSERT(*p == NULL);
- return p + 1;
-}
-
-
template <class Allocator>
static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
ASSERT(list->is_empty());
@@ -220,7 +214,7 @@ static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
p = ReadSymbol(p, &s);
list->Add(s);
}
- return ReadSentinel(p);
+ return p;
}
@@ -239,27 +233,27 @@ static Object** ReadList(Object** p,
list->Add(s);
modes->Add(static_cast<Variable::Mode>(m));
}
- return ReadSentinel(p);
+ return p;
}
template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(Code* code)
+ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
: function_name_(Factory::empty_symbol()),
parameters_(4),
stack_slots_(8),
context_slots_(8),
context_modes_(8) {
- if (code == NULL || code->sinfo_size() == 0) return;
-
- Object** p0 = &Memory::Object_at(code->sinfo_start());
- Object** p = p0;
- p = ReadSymbol(p, &function_name_);
- p = ReadBool(p, &calls_eval_);
- p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
- p = ReadList<Allocator>(p, &parameters_);
- p = ReadList<Allocator>(p, &stack_slots_);
- ASSERT((p - p0) * kPointerSize == code->sinfo_size());
+ if (data->length() > 0) {
+ Object** p0 = data->data_start();
+ Object** p = p0;
+ p = ReadSymbol(p, &function_name_);
+ p = ReadBool(p, &calls_eval_);
+ p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
+ p = ReadList<Allocator>(p, &parameters_);
+ p = ReadList<Allocator>(p, &stack_slots_);
+ ASSERT((p - p0) == FixedArray::cast(data)->length());
+ }
}
@@ -281,12 +275,6 @@ static inline Object** WriteSymbol(Object** p, Handle<String> s) {
}
-static inline Object** WriteSentinel(Object** p) {
- *p++ = NULL;
- return p;
-}
-
-
template <class Allocator>
static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
const int n = list->length();
@@ -294,7 +282,7 @@ static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
for (int i = 0; i < n; i++) {
p = WriteSymbol(p, list->at(i));
}
- return WriteSentinel(p);
+ return p;
}
@@ -308,73 +296,99 @@ static Object** WriteList(Object** p,
p = WriteSymbol(p, list->at(i));
p = WriteInt(p, modes->at(i));
}
- return WriteSentinel(p);
+ return p;
}
template<class Allocator>
-int ScopeInfo<Allocator>::Serialize(Code* code) {
- // function name, calls eval, length & sentinel for 3 tables:
- const int extra_slots = 1 + 1 + 2 * 3;
- int size = (extra_slots +
- context_slots_.length() * 2 +
- parameters_.length() +
- stack_slots_.length()) * kPointerSize;
-
- if (code != NULL) {
- CHECK(code->sinfo_size() == size);
- Object** p0 = &Memory::Object_at(code->sinfo_start());
- Object** p = p0;
- p = WriteSymbol(p, function_name_);
- p = WriteBool(p, calls_eval_);
- p = WriteList(p, &context_slots_, &context_modes_);
- p = WriteList(p, &parameters_);
- p = WriteList(p, &stack_slots_);
- ASSERT((p - p0) * kPointerSize == size);
- }
+Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
+ // function name, calls eval, length for 3 tables:
+ const int extra_slots = 1 + 1 + 3;
+ int length = extra_slots +
+ context_slots_.length() * 2 +
+ parameters_.length() +
+ stack_slots_.length();
+
+ Handle<SerializedScopeInfo> data(
+ SerializedScopeInfo::cast(*Factory::NewFixedArray(length, TENURED)));
+ AssertNoAllocation nogc;
+
+ Object** p0 = data->data_start();
+ Object** p = p0;
+ p = WriteSymbol(p, function_name_);
+ p = WriteBool(p, calls_eval_);
+ p = WriteList(p, &context_slots_, &context_modes_);
+ p = WriteList(p, &parameters_);
+ p = WriteList(p, &stack_slots_);
+ ASSERT((p - p0) == length);
- return size;
+ return data;
}
template<class Allocator>
-void ScopeInfo<Allocator>::IterateScopeInfo(Code* code, ObjectVisitor* v) {
- Object** start = &Memory::Object_at(code->sinfo_start());
- Object** end = &Memory::Object_at(code->sinfo_start() + code->sinfo_size());
- v->VisitPointers(start, end);
+Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
+ // A local variable can be allocated either on the stack or in the context.
+ // For variables allocated in the context they are always preceded by
+ // Context::MIN_CONTEXT_SLOTS of fixed allocated slots in the context.
+ if (i < number_of_stack_slots()) {
+ return stack_slot_name(i);
+ } else {
+ return context_slot_name(i - number_of_stack_slots() +
+ Context::MIN_CONTEXT_SLOTS);
+ }
}
-static Object** ContextEntriesAddr(Code* code) {
- ASSERT(code->sinfo_size() > 0);
- // +2 for function name and calls eval:
- return &Memory::Object_at(code->sinfo_start()) + 2;
+template<class Allocator>
+int ScopeInfo<Allocator>::NumberOfLocals() const {
+ int number_of_locals = number_of_stack_slots();
+ if (number_of_context_slots() > 0) {
+ ASSERT(number_of_context_slots() >= Context::MIN_CONTEXT_SLOTS);
+ number_of_locals += number_of_context_slots() - Context::MIN_CONTEXT_SLOTS;
+ }
+ return number_of_locals;
}
-static Object** ParameterEntriesAddr(Code* code) {
- ASSERT(code->sinfo_size() > 0);
- Object** p = ContextEntriesAddr(code);
- int n; // number of context slots;
- p = ReadInt(p, &n);
- return p + n*2 + 1; // *2 for pairs, +1 for sentinel
+Handle<SerializedScopeInfo> SerializedScopeInfo::Create(Scope* scope) {
+ ScopeInfo<ZoneListAllocationPolicy> sinfo(scope);
+ return sinfo.Serialize();
}
-static Object** StackSlotEntriesAddr(Code* code) {
- ASSERT(code->sinfo_size() > 0);
- Object** p = ParameterEntriesAddr(code);
- int n; // number of parameter slots;
- p = ReadInt(p, &n);
- return p + n + 1; // +1 for sentinel
+SerializedScopeInfo* SerializedScopeInfo::Empty() {
+ return reinterpret_cast<SerializedScopeInfo*>(Heap::empty_fixed_array());
}
-template<class Allocator>
-bool ScopeInfo<Allocator>::CallsEval(Code* code) {
- if (code->sinfo_size() > 0) {
- // +1 for function name:
- Object** p = &Memory::Object_at(code->sinfo_start()) + 1;
+Object** SerializedScopeInfo::ContextEntriesAddr() {
+ ASSERT(length() > 0);
+ return data_start() + 2; // +2 for function name and calls eval.
+}
+
+
+Object** SerializedScopeInfo::ParameterEntriesAddr() {
+ ASSERT(length() > 0);
+ Object** p = ContextEntriesAddr();
+ int number_of_context_slots;
+ p = ReadInt(p, &number_of_context_slots);
+ return p + number_of_context_slots*2; // *2 for pairs
+}
+
+
+Object** SerializedScopeInfo::StackSlotEntriesAddr() {
+ ASSERT(length() > 0);
+ Object** p = ParameterEntriesAddr();
+ int number_of_parameter_slots;
+ p = ReadInt(p, &number_of_parameter_slots);
+ return p + number_of_parameter_slots;
+}
+
+
+bool SerializedScopeInfo::CallsEval() {
+ if (length() > 0) {
+ Object** p = data_start() + 1; // +1 for function name.
bool calls_eval;
p = ReadBool(p, &calls_eval);
return calls_eval;
@@ -383,53 +397,49 @@ bool ScopeInfo<Allocator>::CallsEval(Code* code) {
}
-template<class Allocator>
-int ScopeInfo<Allocator>::NumberOfStackSlots(Code* code) {
- if (code->sinfo_size() > 0) {
- Object** p = StackSlotEntriesAddr(code);
- int n; // number of stack slots;
- ReadInt(p, &n);
- return n;
+int SerializedScopeInfo::NumberOfStackSlots() {
+ if (length() > 0) {
+ Object** p = StackSlotEntriesAddr();
+ int number_of_stack_slots;
+ ReadInt(p, &number_of_stack_slots);
+ return number_of_stack_slots;
}
return 0;
}
-template<class Allocator>
-int ScopeInfo<Allocator>::NumberOfContextSlots(Code* code) {
- if (code->sinfo_size() > 0) {
- Object** p = ContextEntriesAddr(code);
- int n; // number of context slots;
- ReadInt(p, &n);
- return n + Context::MIN_CONTEXT_SLOTS;
+int SerializedScopeInfo::NumberOfContextSlots() {
+ if (length() > 0) {
+ Object** p = ContextEntriesAddr();
+ int number_of_context_slots;
+ ReadInt(p, &number_of_context_slots);
+ return number_of_context_slots + Context::MIN_CONTEXT_SLOTS;
}
return 0;
}
-template<class Allocator>
-bool ScopeInfo<Allocator>::HasHeapAllocatedLocals(Code* code) {
- if (code->sinfo_size() > 0) {
- Object** p = ContextEntriesAddr(code);
- int n; // number of context slots;
- ReadInt(p, &n);
- return n > 0;
+bool SerializedScopeInfo::HasHeapAllocatedLocals() {
+ if (length() > 0) {
+ Object** p = ContextEntriesAddr();
+ int number_of_context_slots;
+ ReadInt(p, &number_of_context_slots);
+ return number_of_context_slots > 0;
}
return false;
}
-template<class Allocator>
-int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) {
+int SerializedScopeInfo::StackSlotIndex(String* name) {
ASSERT(name->IsSymbol());
- if (code->sinfo_size() > 0) {
- // Loop below depends on the NULL sentinel after the stack slot names.
- ASSERT(NumberOfStackSlots(code) > 0 ||
- *(StackSlotEntriesAddr(code) + 1) == NULL);
- // slots start after length entry
- Object** p0 = StackSlotEntriesAddr(code) + 1;
+ if (length() > 0) {
+ // Slots start after length entry.
+ Object** p0 = StackSlotEntriesAddr();
+ int number_of_stack_slots;
+ p0 = ReadInt(p0, &number_of_stack_slots);
Object** p = p0;
- while (*p != NULL) {
+ Object** end = p0 + number_of_stack_slots;
+ while (p != end) {
if (*p == name) return static_cast<int>(p - p0);
p++;
}
@@ -437,24 +447,18 @@ int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) {
return -1;
}
-
-template<class Allocator>
-int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
- String* name,
- Variable::Mode* mode) {
+int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
ASSERT(name->IsSymbol());
- int result = ContextSlotCache::Lookup(code, name, mode);
+ int result = ContextSlotCache::Lookup(this, name, mode);
if (result != ContextSlotCache::kNotFound) return result;
- if (code->sinfo_size() > 0) {
- // Loop below depends on the NULL sentinel after the context slot names.
- ASSERT(NumberOfContextSlots(code) >= Context::MIN_CONTEXT_SLOTS ||
- *(ContextEntriesAddr(code) + 1) == NULL);
-
- // slots start after length entry
- Object** p0 = ContextEntriesAddr(code) + 1;
+ if (length() > 0) {
+ // Slots start after length entry.
+ Object** p0 = ContextEntriesAddr();
+ int number_of_context_slots;
+ p0 = ReadInt(p0, &number_of_context_slots);
Object** p = p0;
- // contexts may have no variable slots (in the presence of eval()).
- while (*p != NULL) {
+ Object** end = p0 + number_of_context_slots * 2;
+ while (p != end) {
if (*p == name) {
ASSERT(((p - p0) & 1) == 0);
int v;
@@ -462,21 +466,20 @@ int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
Variable::Mode mode_value = static_cast<Variable::Mode>(v);
if (mode != NULL) *mode = mode_value;
result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
- ContextSlotCache::Update(code, name, mode_value, result);
+ ContextSlotCache::Update(this, name, mode_value, result);
return result;
}
p += 2;
}
}
- ContextSlotCache::Update(code, name, Variable::INTERNAL, -1);
+ ContextSlotCache::Update(this, name, Variable::INTERNAL, -1);
return -1;
}
-template<class Allocator>
-int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) {
+int SerializedScopeInfo::ParameterIndex(String* name) {
ASSERT(name->IsSymbol());
- if (code->sinfo_size() > 0) {
+ if (length() > 0) {
// We must read parameters from the end since for
// multiply declared parameters the value of the
// last declaration of that parameter is used
@@ -487,10 +490,10 @@ int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) {
// once, with corresponding index. This requires a new
// implementation of the ScopeInfo code. See also other
// comments in this file regarding this.
- Object** p = ParameterEntriesAddr(code);
- int n; // number of parameters
- Object** p0 = ReadInt(p, &n);
- p = p0 + n;
+ Object** p = ParameterEntriesAddr();
+ int number_of_parameter_slots;
+ Object** p0 = ReadInt(p, &number_of_parameter_slots);
+ p = p0 + number_of_parameter_slots;
while (p > p0) {
p--;
if (*p == name) return static_cast<int>(p - p0);
@@ -500,64 +503,37 @@ int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) {
}
-template<class Allocator>
-int ScopeInfo<Allocator>::FunctionContextSlotIndex(Code* code, String* name) {
+int SerializedScopeInfo::FunctionContextSlotIndex(String* name) {
ASSERT(name->IsSymbol());
- if (code->sinfo_size() > 0) {
- Object** p = &Memory::Object_at(code->sinfo_start());
+ if (length() > 0) {
+ Object** p = data_start();
if (*p == name) {
- p = ContextEntriesAddr(code);
- int n; // number of context slots
- ReadInt(p, &n);
- ASSERT(n != 0);
+ p = ContextEntriesAddr();
+ int number_of_context_slots;
+ ReadInt(p, &number_of_context_slots);
+ ASSERT(number_of_context_slots != 0);
// The function context slot is the last entry.
- return n + Context::MIN_CONTEXT_SLOTS - 1;
+ return number_of_context_slots + Context::MIN_CONTEXT_SLOTS - 1;
}
}
return -1;
}
-template<class Allocator>
-Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
- // A local variable can be allocated either on the stack or in the context.
- // For variables allocated in the context they are always preceded by the
- // number Context::MIN_CONTEXT_SLOTS number of fixed allocated slots in the
- // context.
- if (i < number_of_stack_slots()) {
- return stack_slot_name(i);
- } else {
- return context_slot_name(i - number_of_stack_slots() +
- Context::MIN_CONTEXT_SLOTS);
- }
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::NumberOfLocals() const {
- int number_of_locals = number_of_stack_slots();
- if (number_of_context_slots() > 0) {
- ASSERT(number_of_context_slots() >= Context::MIN_CONTEXT_SLOTS);
- number_of_locals += number_of_context_slots() - Context::MIN_CONTEXT_SLOTS;
- }
- return number_of_locals;
-}
-
-
-int ContextSlotCache::Hash(Code* code, String* name) {
+int ContextSlotCache::Hash(Object* data, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(code)) >> 2;
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(data)) >> 2;
return static_cast<int>((addr_hash ^ name->Hash()) % kLength);
}
-int ContextSlotCache::Lookup(Code* code,
+int ContextSlotCache::Lookup(Object* data,
String* name,
Variable::Mode* mode) {
- int index = Hash(code, name);
+ int index = Hash(data, name);
Key& key = keys_[index];
- if ((key.code == code) && key.name->Equals(name)) {
+ if ((key.data == data) && key.name->Equals(name)) {
Value result(values_[index]);
if (mode != NULL) *mode = result.mode();
return result.index() + kNotFound;
@@ -566,28 +542,28 @@ int ContextSlotCache::Lookup(Code* code,
}
-void ContextSlotCache::Update(Code* code,
+void ContextSlotCache::Update(Object* data,
String* name,
Variable::Mode mode,
int slot_index) {
String* symbol;
ASSERT(slot_index > kNotFound);
if (Heap::LookupSymbolIfExists(name, &symbol)) {
- int index = Hash(code, symbol);
+ int index = Hash(data, symbol);
Key& key = keys_[index];
- key.code = code;
+ key.data = data;
key.name = symbol;
// Please note value only takes a uint as index.
values_[index] = Value(mode, slot_index - kNotFound).raw();
#ifdef DEBUG
- ValidateEntry(code, name, mode, slot_index);
+ ValidateEntry(data, name, mode, slot_index);
#endif
}
}
void ContextSlotCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].code = NULL;
+ for (int index = 0; index < kLength; index++) keys_[index].data = NULL;
}
@@ -599,15 +575,15 @@ uint32_t ContextSlotCache::values_[ContextSlotCache::kLength];
#ifdef DEBUG
-void ContextSlotCache::ValidateEntry(Code* code,
+void ContextSlotCache::ValidateEntry(Object* data,
String* name,
Variable::Mode mode,
int slot_index) {
String* symbol;
if (Heap::LookupSymbolIfExists(name, &symbol)) {
- int index = Hash(code, name);
+ int index = Hash(data, name);
Key& key = keys_[index];
- ASSERT(key.code == code);
+ ASSERT(key.data == data);
ASSERT(key.name->Equals(name));
Value result(values_[index]);
ASSERT(result.mode() == mode);
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 9fb26d03..0fdab56d 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -37,7 +37,7 @@ namespace internal {
// Scope information represents information about a functions's
// scopes (currently only one, because we don't do any inlining)
// and the allocation of the scope's variables. Scope information
-// is stored in a compressed form with Code objects and is used
+// is stored in a compressed form in FixedArray objects and is used
// at runtime (stack dumps, deoptimization, etc.).
//
// Historical note: In other VMs built by this team, ScopeInfo was
@@ -54,23 +54,11 @@ class ScopeInfo BASE_EMBEDDED {
// Create a ScopeInfo instance from a scope.
explicit ScopeInfo(Scope* scope);
- // Create a ScopeInfo instance from a Code object.
- explicit ScopeInfo(Code* code);
-
- // Write the ScopeInfo data into a Code object, and returns the
- // amount of space that was needed. If no Code object is provided
- // (NULL handle), Serialize() only returns the amount of space needed.
- //
- // This operations requires that the Code object has the correct amount
- // of space for the ScopeInfo data; otherwise the operation fails (fatal
- // error). Any existing scope info in the Code object is simply overwritten.
- int Serialize(Code* code);
-
- // Garbage collection support for scope info embedded in Code objects.
- // This code is in ScopeInfo because only here we should have to know
- // about the encoding.
- static void IterateScopeInfo(Code* code, ObjectVisitor* v);
+ // Create a ScopeInfo instance from SerializedScopeInfo.
+ explicit ScopeInfo(SerializedScopeInfo* data);
+ // Creates a SerializedScopeInfo holding the serialized scope info.
+ Handle<SerializedScopeInfo> Serialize();
// --------------------------------------------------------------------------
// Lookup
@@ -95,92 +83,97 @@ class ScopeInfo BASE_EMBEDDED {
int NumberOfLocals() const;
// --------------------------------------------------------------------------
- // The following functions provide quick access to scope info details
- // for runtime routines w/o the need to explicitly create a ScopeInfo
- // object.
- //
- // ScopeInfo is the only class which should have to know about the
- // encoding of it's information in a Code object, which is why these
- // functions are in this class.
+ // Debugging support
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Handle<String> function_name_;
+ bool calls_eval_;
+ List<Handle<String>, Allocator > parameters_;
+ List<Handle<String>, Allocator > stack_slots_;
+ List<Handle<String>, Allocator > context_slots_;
+ List<Variable::Mode, Allocator > context_modes_;
+};
+
+
+// This object provides quick access to scope info details for runtime
+// routines w/o the need to explicitly create a ScopeInfo object.
+class SerializedScopeInfo : public FixedArray {
+ public :
+
+ static SerializedScopeInfo* cast(Object* object) {
+ ASSERT(object->IsFixedArray());
+ return reinterpret_cast<SerializedScopeInfo*>(object);
+ }
// Does this scope call eval.
- static bool CallsEval(Code* code);
+ bool CallsEval();
// Return the number of stack slots for code.
- static int NumberOfStackSlots(Code* code);
+ int NumberOfStackSlots();
// Return the number of context slots for code.
- static int NumberOfContextSlots(Code* code);
+ int NumberOfContextSlots();
// Return if this has context slots besides MIN_CONTEXT_SLOTS;
- static bool HasHeapAllocatedLocals(Code* code);
+ bool HasHeapAllocatedLocals();
- // Lookup support for scope info embedded in Code objects. Returns
+ // Lookup support for serialized scope info. Returns the
// the stack slot index for a given slot name if the slot is
// present; otherwise returns a value < 0. The name must be a symbol
// (canonicalized).
- static int StackSlotIndex(Code* code, String* name);
+ int StackSlotIndex(String* name);
- // Lookup support for scope info embedded in Code objects. Returns the
+ // Lookup support for serialized scope info. Returns the
// context slot index for a given slot name if the slot is present; otherwise
// returns a value < 0. The name must be a symbol (canonicalized).
// If the slot is present and mode != NULL, sets *mode to the corresponding
// mode for that variable.
- static int ContextSlotIndex(Code* code, String* name, Variable::Mode* mode);
+ int ContextSlotIndex(String* name, Variable::Mode* mode);
- // Lookup support for scope info embedded in Code objects. Returns the
+ // Lookup support for serialized scope info. Returns the
// parameter index for a given parameter name if the parameter is present;
// otherwise returns a value < 0. The name must be a symbol (canonicalized).
- static int ParameterIndex(Code* code, String* name);
+ int ParameterIndex(String* name);
- // Lookup support for scope info embedded in Code objects. Returns the
+ // Lookup support for serialized scope info. Returns the
// function context slot index if the function name is present (named
// function expressions, only), otherwise returns a value < 0. The name
// must be a symbol (canonicalized).
- static int FunctionContextSlotIndex(Code* code, String* name);
+ int FunctionContextSlotIndex(String* name);
- // --------------------------------------------------------------------------
- // Debugging support
+ static Handle<SerializedScopeInfo> Create(Scope* scope);
-#ifdef DEBUG
- void Print();
-#endif
+ // Serializes empty scope info.
+ static SerializedScopeInfo* Empty();
private:
- Handle<String> function_name_;
- bool calls_eval_;
- List<Handle<String>, Allocator > parameters_;
- List<Handle<String>, Allocator > stack_slots_;
- List<Handle<String>, Allocator > context_slots_;
- List<Variable::Mode, Allocator > context_modes_;
-};
-class ZoneScopeInfo: public ScopeInfo<ZoneListAllocationPolicy> {
- public:
- // Create a ZoneScopeInfo instance from a scope.
- explicit ZoneScopeInfo(Scope* scope)
- : ScopeInfo<ZoneListAllocationPolicy>(scope) {}
+ inline Object** ContextEntriesAddr();
+
+ inline Object** ParameterEntriesAddr();
- // Create a ZoneScopeInfo instance from a Code object.
- explicit ZoneScopeInfo(Code* code)
- : ScopeInfo<ZoneListAllocationPolicy>(code) {}
+ inline Object** StackSlotEntriesAddr();
};
-// Cache for mapping (code, property name) into context slot index.
+// Cache for mapping (data, property name) into context slot index.
// The cache contains both positive and negative results.
// Slot index equals -1 means the property is absent.
// Cleared at startup and prior to mark sweep collection.
class ContextSlotCache {
public:
- // Lookup context slot index for (code, name).
+ // Lookup context slot index for (data, name).
// If absent, kNotFound is returned.
- static int Lookup(Code* code,
+ static int Lookup(Object* data,
String* name,
Variable::Mode* mode);
// Update an element in the cache.
- static void Update(Code* code,
+ static void Update(Object* data,
String* name,
Variable::Mode mode,
int slot_index);
@@ -190,10 +183,10 @@ class ContextSlotCache {
static const int kNotFound = -2;
private:
- inline static int Hash(Code* code, String* name);
+ inline static int Hash(Object* data, String* name);
#ifdef DEBUG
- static void ValidateEntry(Code* code,
+ static void ValidateEntry(Object* data,
String* name,
Variable::Mode mode,
int slot_index);
@@ -201,7 +194,7 @@ class ContextSlotCache {
static const int kLength = 256;
struct Key {
- Code* code;
+ Object* data;
String* name;
};
diff --git a/src/serialize.cc b/src/serialize.cc
index a6a516a7..0e283f46 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -360,6 +360,7 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
5,
"StackGuard::address_of_real_jslimit()");
+#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::address_of_regexp_stack_limit().address(),
UNCLASSIFIED,
6,
@@ -376,6 +377,7 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
9,
"OffsetsVector::static_offsets_vector");
+#endif // V8_INTERPRETED_REGEXP
Add(ExternalReference::new_space_start().address(),
UNCLASSIFIED,
10,
@@ -673,6 +675,14 @@ void Deserializer::ReadObject(int space_number,
LOG(SnapshotPositionEvent(address, source_->position()));
}
ReadChunk(current, limit, space_number, address);
+
+ if (space == Heap::map_space()) {
+ ASSERT(size == Map::kSize);
+ HeapObject* obj = HeapObject::FromAddress(address);
+ Map* map = reinterpret_cast<Map*>(obj);
+ map->set_scavenger(Heap::GetScavenger(map->instance_type(),
+ map->instance_size()));
+ }
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index a654a086..bc29d06a 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1186,7 +1186,7 @@ Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, const char* name) {
// Create code object in the heap.
CodeDesc desc;
masm_.GetCode(&desc);
- Object* result = Heap::CreateCode(desc, NULL, flags, masm_.CodeObject());
+ Object* result = Heap::CreateCode(desc, flags, masm_.CodeObject());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs && !result->IsFailure()) {
Code::cast(result)->Disassemble(name);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 856904a4..8c00ee83 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -429,23 +429,23 @@ class StubCompiler BASE_EMBEDDED {
Register object_reg,
JSObject* holder,
Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
String* name,
- Label* miss,
- Register extra = no_reg) {
- return CheckPrototypes(object, object_reg, holder, holder_reg, scratch,
- name, kInvalidProtoDepth, miss, extra);
+ Label* miss) {
+ return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
+ scratch2, name, kInvalidProtoDepth, miss);
}
Register CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
String* name,
int save_at_depth,
- Label* miss,
- Register extra = no_reg);
+ Label* miss);
protected:
Object* GetCodeWithFlags(Code::Flags flags, const char* name);
@@ -459,6 +459,7 @@ class StubCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss);
@@ -469,6 +470,7 @@ class StubCompiler BASE_EMBEDDED {
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
@@ -479,6 +481,7 @@ class StubCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss);
@@ -490,6 +493,7 @@ class StubCompiler BASE_EMBEDDED {
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss);
diff --git a/src/top.cc b/src/top.cc
index 516ec674..2887b766 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -44,6 +44,11 @@ Mutex* Top::break_access_ = OS::CreateMutex();
NoAllocationStringAllocator* preallocated_message_space = NULL;
+bool capture_stack_trace_for_uncaught_exceptions = false;
+int stack_trace_for_uncaught_exceptions_frame_limit = 0;
+StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options =
+ StackTrace::kOverview;
+
Address top_addresses[] = {
#define C(name) reinterpret_cast<Address>(Top::name()),
TOP_ADDRESS_LIST(C)
@@ -365,9 +370,8 @@ Handle<String> Top::StackTraceString() {
}
-Local<StackTrace> Top::CaptureCurrentStackTrace(
+Handle<JSArray> Top::CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options) {
- v8::HandleScope scope;
// Ensure no negative values.
int limit = Max(frame_limit, 0);
Handle<JSArray> stack_trace = Factory::NewJSArray(frame_limit);
@@ -443,7 +447,7 @@ Local<StackTrace> Top::CaptureCurrentStackTrace(
}
stack_trace->set_length(Smi::FromInt(frames_seen));
- return scope.Close(Utils::StackTraceToLocal(stack_trace));
+ return stack_trace;
}
@@ -681,10 +685,7 @@ Failure* Top::StackOverflow() {
// TODO(1240995): To avoid having to call JavaScript code to compute
// the message for stack overflow exceptions which is very likely to
// double fault with another stack overflow exception, we use a
- // precomputed message. This is somewhat problematic in that it
- // doesn't use ReportUncaughtException to determine the location
- // from where the exception occurred. It should probably be
- // reworked.
+ // precomputed message.
DoThrow(*exception, NULL, kStackOverflowMessage);
return Failure::Exception();
}
@@ -778,25 +779,6 @@ void Top::ComputeLocation(MessageLocation* target) {
}
-void Top::ReportUncaughtException(Handle<Object> exception,
- MessageLocation* location,
- Handle<String> stack_trace) {
- Handle<Object> message;
- if (!Bootstrapper::IsActive()) {
- // It's not safe to try to make message objects while the bootstrapper
- // is active since the infrastructure may not have been properly
- // initialized.
- message =
- MessageHandler::MakeMessageObject("uncaught_exception",
- location,
- HandleVector<Object>(&exception, 1),
- stack_trace);
- }
- // Report the uncaught exception.
- MessageHandler::ReportMessage(location, message);
-}
-
-
bool Top::ShouldReturnException(bool* is_caught_externally,
bool catchable_by_javascript) {
// Find the top-most try-catch handler.
@@ -869,8 +851,15 @@ void Top::DoThrow(Object* exception,
// may not have been properly initialized.
Handle<String> stack_trace;
if (FLAG_trace_exception) stack_trace = StackTraceString();
+ Handle<JSArray> stack_trace_object;
+ if (report_exception && capture_stack_trace_for_uncaught_exceptions) {
+ stack_trace_object = Top::CaptureCurrentStackTrace(
+ stack_trace_for_uncaught_exceptions_frame_limit,
+ stack_trace_for_uncaught_exceptions_options);
+ }
message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
- location, HandleVector<Object>(&exception_handle, 1), stack_trace);
+ location, HandleVector<Object>(&exception_handle, 1), stack_trace,
+ stack_trace_object);
}
}
@@ -997,6 +986,16 @@ bool Top::OptionalRescheduleException(bool is_bottom_call) {
}
+void Top::SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options) {
+ capture_stack_trace_for_uncaught_exceptions = capture;
+ stack_trace_for_uncaught_exceptions_frame_limit = frame_limit;
+ stack_trace_for_uncaught_exceptions_options = options;
+}
+
+
bool Top::is_out_of_memory() {
if (has_pending_exception()) {
Object* e = pending_exception();
diff --git a/src/top.h b/src/top.h
index 4a76a7f8..87333931 100644
--- a/src/top.h
+++ b/src/top.h
@@ -227,6 +227,11 @@ class Top {
(try_catch_handler() == thread_local_.catcher_);
}
+ static void SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options);
+
// Tells whether the current context has experienced an out of memory
// exception.
static bool is_out_of_memory();
@@ -266,7 +271,7 @@ class Top {
static void PrintStack(StringStream* accumulator);
static void PrintStack();
static Handle<String> StackTraceString();
- static Local<StackTrace> CaptureCurrentStackTrace(
+ static Handle<JSArray> CaptureCurrentStackTrace(
int frame_limit,
StackTrace::StackTraceOptions options);
@@ -302,9 +307,6 @@ class Top {
const char* message);
static bool ShouldReturnException(bool* is_caught_externally,
bool catchable_by_javascript);
- static void ReportUncaughtException(Handle<Object> exception,
- MessageLocation* location,
- Handle<String> stack_trace);
// Attempts to compute the current source location, storing the
// result in the target out parameter.
diff --git a/src/v8natives.js b/src/v8natives.js
index 487faabc..198cecc3 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -225,16 +225,14 @@ function ObjectHasOwnProperty(V) {
// ECMA-262 - 15.2.4.6
function ObjectIsPrototypeOf(V) {
- if (!IS_SPEC_OBJECT_OR_NULL(V) && !IS_UNDETECTABLE(V)) return false;
+ if (!IS_SPEC_OBJECT(V)) return false;
return %IsInPrototypeChain(this, V);
}
// ECMA-262 - 15.2.4.6
function ObjectPropertyIsEnumerable(V) {
- if (this == null) return false;
- if (!IS_SPEC_OBJECT_OR_NULL(this)) return false;
- return %IsPropertyEnumerable(this, ToString(V));
+ return %IsPropertyEnumerable(ToObject(this), ToString(V));
}
@@ -279,8 +277,7 @@ function ObjectLookupSetter(name) {
function ObjectKeys(obj) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj))
+ if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
return %LocalKeys(obj);
}
@@ -329,7 +326,7 @@ function FromPropertyDescriptor(desc) {
// ES5 8.10.5.
function ToPropertyDescriptor(obj) {
- if (!IS_SPEC_OBJECT_OR_NULL(obj)) {
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("property_desc_object", [obj]);
}
var desc = new PropertyDescriptor();
@@ -626,8 +623,7 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
// ES5 section 15.2.3.2.
function ObjectGetPrototypeOf(obj) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj))
+ if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
return obj.__proto__;
}
@@ -635,8 +631,7 @@ function ObjectGetPrototypeOf(obj) {
// ES5 section 15.2.3.3
function ObjectGetOwnPropertyDescriptor(obj, p) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj))
+ if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
var desc = GetOwnProperty(obj, p);
return FromPropertyDescriptor(desc);
@@ -645,8 +640,7 @@ function ObjectGetOwnPropertyDescriptor(obj, p) {
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj))
+ if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
// Find all the indexed properties.
@@ -698,7 +692,7 @@ function ObjectGetOwnPropertyNames(obj) {
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
- if (!IS_SPEC_OBJECT_OR_NULL(proto)) {
+ if (!IS_SPEC_OBJECT(proto) && proto !== null) {
throw MakeTypeError("proto_object_or_null", [proto]);
}
var obj = new $Object();
@@ -710,8 +704,7 @@ function ObjectCreate(proto, properties) {
// ES5 section 15.2.3.6.
function ObjectDefineProperty(obj, p, attributes) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj)) {
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
}
var name = ToString(p);
@@ -723,8 +716,7 @@ function ObjectDefineProperty(obj, p, attributes) {
// ES5 section 15.2.3.7.
function ObjectDefineProperties(obj, properties) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj))
+ if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
var props = ToObject(properties);
var key_values = [];
@@ -745,10 +737,42 @@ function ObjectDefineProperties(obj, properties) {
}
+// ES5 section 15.2.3.8.
+function ObjectSeal(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["seal"]);
+ }
+ var names = ObjectGetOwnPropertyNames(obj);
+ for (var key in names) {
+ var name = names[key];
+ var desc = GetOwnProperty(obj, name);
+ if (desc.isConfigurable()) desc.setConfigurable(false);
+ DefineOwnProperty(obj, name, desc, true);
+ }
+ ObjectPreventExtension(obj);
+}
+
+
+// ES5 section 15.2.3.9.
+function ObjectFreeze(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]);
+ }
+ var names = ObjectGetOwnPropertyNames(obj);
+ for (var key in names) {
+ var name = names[key];
+ var desc = GetOwnProperty(obj, name);
+ if (IsDataDescriptor(desc)) desc.setWritable(false);
+ if (desc.isConfigurable()) desc.setConfigurable(false);
+ DefineOwnProperty(obj, name, desc, true);
+ }
+ ObjectPreventExtension(obj);
+}
+
+
// ES5 section 15.2.3.10
function ObjectPreventExtension(obj) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj)) {
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
}
%PreventExtensions(obj);
@@ -756,10 +780,46 @@ function ObjectPreventExtension(obj) {
}
+// ES5 section 15.2.3.11
+function ObjectIsSealed(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["isSealed"]);
+ }
+ var names = ObjectGetOwnPropertyNames(obj);
+ for (var key in names) {
+ var name = names[key];
+ var desc = GetOwnProperty(obj, name);
+ if (desc.isConfigurable()) return false;
+ }
+ if (!ObjectIsExtensible(obj)) {
+ return true;
+ }
+ return false;
+}
+
+
+// ES5 section 15.2.3.12
+function ObjectIsFrozen(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]);
+ }
+ var names = ObjectGetOwnPropertyNames(obj);
+ for (var key in names) {
+ var name = names[key];
+ var desc = GetOwnProperty(obj, name);
+ if (IsDataDescriptor(desc) && desc.isWritable()) return false;
+ if (desc.isConfigurable()) return false;
+ }
+ if (!ObjectIsExtensible(obj)) {
+ return true;
+ }
+ return false;
+}
+
+
// ES5 section 15.2.3.13
function ObjectIsExtensible(obj) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj)) {
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
}
return %IsExtensible(obj);
@@ -799,11 +859,15 @@ function SetupObject() {
"create", ObjectCreate,
"defineProperty", ObjectDefineProperty,
"defineProperties", ObjectDefineProperties,
+ "freeze", ObjectFreeze,
"getPrototypeOf", ObjectGetPrototypeOf,
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
"getOwnPropertyNames", ObjectGetOwnPropertyNames,
"isExtensible", ObjectIsExtensible,
- "preventExtensions", ObjectPreventExtension
+ "isFrozen", ObjectIsFrozen,
+ "isSealed", ObjectIsSealed,
+ "preventExtensions", ObjectPreventExtension,
+ "seal", ObjectSeal
));
}
diff --git a/src/version.cc b/src/version.cc
index d930c8de..bf5feb14 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
-#define MINOR_VERSION 2
-#define BUILD_NUMBER 23
+#define MINOR_VERSION 3
+#define BUILD_NUMBER 1
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index 4df2cfda..aa4cedbb 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -74,8 +74,10 @@ VMState::VMState(StateTag state)
if (state == EXTERNAL) state = OTHER;
#endif
state_ = state;
- previous_ = current_state_; // Save the previous state.
- current_state_ = this; // Install the new state.
+ // Save the previous state.
+ previous_ = reinterpret_cast<VMState*>(current_state_);
+ // Install the new state.
+ OS::ReleaseStore(&current_state_, reinterpret_cast<AtomicWord>(this));
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
@@ -103,7 +105,8 @@ VMState::VMState(StateTag state)
VMState::~VMState() {
if (disabled_) return;
- current_state_ = previous_; // Return to the previous state.
+ // Return to the previous state.
+ OS::ReleaseStore(&current_state_, reinterpret_cast<AtomicWord>(previous_));
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
diff --git a/src/vm-state.cc b/src/vm-state.cc
index 3859efb8..6bd737df 100644
--- a/src/vm-state.cc
+++ b/src/vm-state.cc
@@ -33,7 +33,7 @@ namespace v8 {
namespace internal {
#ifdef ENABLE_VMSTATE_TRACKING
-VMState* VMState::current_state_ = NULL;
+AtomicWord VMState::current_state_ = 0;
#endif
} } // namespace v8::internal
diff --git a/src/vm-state.h b/src/vm-state.h
index 241df4c9..080eb8de 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -44,15 +44,17 @@ class VMState BASE_EMBEDDED {
// Used for debug asserts.
static bool is_outermost_external() {
- return current_state_ == NULL;
+ return current_state_ == 0;
}
static StateTag current_state() {
- return current_state_ ? current_state_->state() : EXTERNAL;
+ VMState* state = reinterpret_cast<VMState*>(current_state_);
+ return state ? state->state() : EXTERNAL;
}
static Address external_callback() {
- return current_state_ ? current_state_->external_callback_ : NULL;
+ VMState* state = reinterpret_cast<VMState*>(current_state_);
+ return state ? state->external_callback_ : NULL;
}
private:
@@ -62,7 +64,7 @@ class VMState BASE_EMBEDDED {
Address external_callback_;
// A stack of VM states.
- static VMState* current_state_;
+ static AtomicWord current_state_;
#else
public:
explicit VMState(StateTag state) {}
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index c19e2ba1..c66666a7 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -119,7 +119,6 @@ void CpuFeatures::Probe() {
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>());
if (!code->IsCode()) return;
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 7e04c20e..b41fb74c 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -140,149 +140,6 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
-// Deferred code objects
-//
-// These subclasses of DeferredCode add pieces of code to the end of generated
-// code. They are branched to from the generated code, and
-// keep some slower code out of the main body of the generated code.
-// Many of them call a code stub or a runtime function.
-
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-// The result of value + src is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
- DeferredInlineSmiAddReversed(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAddReversed");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- Register dst,
- Register src,
- Smi* value,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- src_(src),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register src_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
- DeferredInlineSmiOperationReversed(Token::Value op,
- Register dst,
- Smi* value,
- Register src,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- value_(value),
- src_(src),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperationReversed");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Smi* value_;
- Register src_;
- OverwriteMode overwrite_mode_;
-};
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
- // If the operands are not both numbers, jump to not_numbers.
- // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
- // NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
- static void LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
-};
-
-
-// -----------------------------------------------------------------------------
// CodeGenerator implementation.
CodeGenerator::CodeGenerator(MacroAssembler* masm)
@@ -298,20 +155,11 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
}
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals. The inevitable call
- // will sync frame elements to memory anyway, so we do it eagerly to
- // allow us to push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(rsi); // The context is the first argument.
- frame_->EmitPush(kScratchRegister);
- frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
- Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
+// Calling conventions:
+// rbp: caller's frame pointer
+// rsp: stack pointer
+// rdi: called JS function
+// rsi: callee's context
void CodeGenerator::Generate(CompilationInfo* info) {
// Record the position for debugging purposes.
@@ -329,7 +177,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Adjust for function-level loop nesting.
ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ += info->loop_nesting();
+ loop_nesting_ = info->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
@@ -543,209 +391,2105 @@ void CodeGenerator::Generate(CompilationInfo* info) {
allocator_ = NULL;
}
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
- // The return value is a live (but not currently reference counted)
- // reference to rax. This is safe because the current frame does not
- // contain a reference to rax (it is prepared for the return by spilling
- // all registers).
- if (FLAG_trace) {
- frame_->Push(return_value);
- *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
+
+Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return frame_->ParameterAt(index);
+
+ case Slot::LOCAL:
+ return frame_->LocalAt(index);
+
+ case Slot::CONTEXT: {
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(rsi)); // do not overwrite context register
+ Register context = rsi;
+ int chain_length = scope()->ContextChainLength(slot->var()->scope());
+ for (int i = 0; i < chain_length; i++) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
+
+ default:
+ UNREACHABLE();
+ return Operand(rsp, 0);
}
- return_value->ToRegister(rax);
+}
- // Add a label for checking the size of the code used for returning.
+
+Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
+ Result tmp,
+ JumpTarget* slow) {
+ ASSERT(slot->type() == Slot::CONTEXT);
+ ASSERT(tmp.is_register());
+ Register context = rsi;
+
+ for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ }
+ __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ context = tmp.reg();
+ }
+ }
+ // Check that last extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp.reg(), slot->index());
+}
+
+
+// Emit code to load the value of an expression to the top of the
+// frame. If the expression is boolean-valued it may be compiled (or
+// partially compiled) into control flow to the control destination.
+// If force_control is true, control flow is forced.
+void CodeGenerator::LoadCondition(Expression* expr,
+ ControlDestination* dest,
+ bool force_control) {
+ ASSERT(!in_spilled_code());
+ int original_height = frame_->height();
+
+ { CodeGenState new_state(this, dest);
+ Visit(expr);
+
+ // If we hit a stack overflow, we may not have actually visited
+ // the expression. In that case, we ensure that we have a
+ // valid-looking frame state because we will continue to generate
+ // code as we unwind the C++ stack.
+ //
+ // It's possible to have both a stack overflow and a valid frame
+ // state (eg, a subexpression overflowed, visiting it returned
+ // with a dummied frame state, and visiting this expression
+ // returned with a normal-looking state).
+ if (HasStackOverflow() &&
+ !dest->is_used() &&
+ frame_->height() == original_height) {
+ dest->Goto(true);
+ }
+ }
+
+ if (force_control && !dest->is_used()) {
+ // Convert the TOS value into flow to the control destination.
+ ToBoolean(dest);
+ }
+
+ ASSERT(!(force_control && !dest->is_used()));
+ ASSERT(dest->is_used() || frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Load(expression);
+ frame_->SpillAll();
+ set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
+ int original_height = frame_->height();
#endif
+ ASSERT(!in_spilled_code());
+ JumpTarget true_target;
+ JumpTarget false_target;
+ ControlDestination dest(&true_target, &false_target, true);
+ LoadCondition(expr, &dest, false);
- // Leave the frame and return popping the arguments and the
- // receiver.
- frame_->Exit();
- masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint.
- // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
- // with length 7 (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
+ if (dest.false_was_fall_through()) {
+ // The false target was just bound.
+ JumpTarget loaded;
+ frame_->Push(Factory::false_value());
+ // There may be dangling jumps to the true target.
+ if (true_target.is_linked()) {
+ loaded.Jump();
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ loaded.Bind();
+ }
+
+ } else if (dest.is_used()) {
+ // There is true, and possibly false, control flow (with true as
+ // the fall through).
+ JumpTarget loaded;
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ loaded.Bind();
+ }
+
+ } else {
+ // We have a valid value on top of the frame, but we still may
+ // have dangling jumps to the true and false targets from nested
+ // subexpressions (eg, the left subexpressions of the
+ // short-circuited boolean operators).
+ ASSERT(has_valid_frame());
+ if (true_target.is_linked() || false_target.is_linked()) {
+ JumpTarget loaded;
+ loaded.Jump(); // Don't lose the current TOS.
+ if (true_target.is_linked()) {
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ }
+ }
+ if (false_target.is_linked()) {
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ }
+ loaded.Bind();
+ }
}
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- DeleteFrame();
+
+ ASSERT(has_valid_frame());
+ ASSERT(frame_->height() == original_height + 1);
}
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
- && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
- && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
- && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
- && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
- && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
- && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
- && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
- && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
- && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
+void CodeGenerator::LoadGlobal() {
+ if (in_spilled_code()) {
+ frame_->EmitPush(GlobalObject());
+ } else {
+ Result temp = allocator_->Allocate();
+ __ movq(temp.reg(), GlobalObject());
+ frame_->Push(&temp);
+ }
}
-#endif
-class DeferredReferenceGetKeyedValue: public DeferredCode {
+void CodeGenerator::LoadGlobalReceiver() {
+ Result temp = allocator_->Allocate();
+ Register reg = temp.reg();
+ __ movq(reg, GlobalObject());
+ __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
+ frame_->Push(&temp);
+}
+
+
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
+ if (variable != NULL && !variable->is_this() && variable->is_global()) {
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
+ Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+ Literal key(variable->name());
+ Property property(&global, &key, RelocInfo::kNoPosition);
+ Reference ref(this, &property);
+ ref.GetValue();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+ } else {
+ // Anything else can be handled normally.
+ Load(expr);
+ }
+}
+
+
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+ if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+ ASSERT(scope()->arguments_shadow() != NULL);
+ // We don't want to do lazy arguments allocation for functions that
+ // have heap-allocated contexts, because it interfers with the
+ // uninitialized const tracking in the context objects.
+ return (scope()->num_heap_slots() > 0)
+ ? EAGER_ARGUMENTS_ALLOCATION
+ : LAZY_ARGUMENTS_ALLOCATION;
+}
+
+
+Result CodeGenerator::StoreArgumentsObject(bool initial) {
+ ArgumentsAllocationMode mode = ArgumentsMode();
+ ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+ Comment cmnt(masm_, "[ store arguments object");
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+ // When using lazy arguments allocation, we store the hole value
+ // as a sentinel indicating that the arguments object hasn't been
+ // allocated yet.
+ frame_->Push(Factory::the_hole_value());
+ } else {
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ frame_->PushFunction();
+ frame_->PushReceiverSlotAddress();
+ frame_->Push(Smi::FromInt(scope()->num_parameters()));
+ Result result = frame_->CallStub(&stub, 3);
+ frame_->Push(&result);
+ }
+
+ Variable* arguments = scope()->arguments()->var();
+ Variable* shadow = scope()->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
+ LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+ Result probe = frame_->Pop();
+ if (probe.is_constant()) {
+ // We have to skip updating the arguments object if it has
+ // been assigned a proper value.
+ skip_arguments = !probe.handle()->IsTheHole();
+ } else {
+ __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+ probe.Unuse();
+ done.Branch(not_equal);
+ }
+ }
+ if (!skip_arguments) {
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+ return frame_->Pop();
+}
+
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
+
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
+ cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+ ASSERT(is_unloaded() || is_illegal());
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+ // References are loaded from both spilled and unspilled code. Set the
+ // state to unspilled to allow that (and explicitly spill after
+ // construction at the construction sites).
+ bool was_in_spilled_code = in_spilled_code_;
+ in_spilled_code_ = false;
+
+ Comment cmnt(masm_, "[ LoadReference");
+ Expression* e = ref->expression();
+ Property* property = e->AsProperty();
+ Variable* var = e->AsVariableProxy()->AsVariable();
+
+ if (property != NULL) {
+ // The expression is either a property or a variable proxy that rewrites
+ // to a property.
+ Load(property->obj());
+ if (property->key()->IsPropertyName()) {
+ ref->set_type(Reference::NAMED);
+ } else {
+ Load(property->key());
+ ref->set_type(Reference::KEYED);
+ }
+ } else if (var != NULL) {
+ // The expression is a variable proxy that does not rewrite to a
+ // property. Global variables are treated as named property references.
+ if (var->is_global()) {
+ // If rax is free, the register allocator prefers it. Thus the code
+ // generator will load the global object into rax, which is where
+ // LoadIC wants it. Most uses of Reference call LoadIC directly
+ // after the reference is created.
+ frame_->Spill(rax);
+ LoadGlobal();
+ ref->set_type(Reference::NAMED);
+ } else {
+ ASSERT(var->slot() != NULL);
+ ref->set_type(Reference::SLOT);
+ }
+ } else {
+ // Anything else is a runtime error.
+ Load(e);
+ frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
+
+ in_spilled_code_ = was_in_spilled_code;
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+ // Pop a reference from the stack while preserving TOS.
+ Comment cmnt(masm_, "[ UnloadReference");
+ frame_->Nip(ref->size());
+ ref->set_unloaded();
+}
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
+// convert it to a boolean in the condition code register or jump to
+// 'false_target'/'true_target' as appropriate.
+void CodeGenerator::ToBoolean(ControlDestination* dest) {
+ Comment cmnt(masm_, "[ ToBoolean");
+
+ // The value to convert should be popped from the frame.
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ if (value.is_number()) {
+ // Fast case if TypeInfo indicates only numbers.
+ if (FLAG_debug_code) {
+ __ AbortIfNotNumber(value.reg());
+ }
+ // Smi => false iff zero.
+ __ SmiCompare(value.reg(), Smi::FromInt(0));
+ if (value.is_smi()) {
+ value.Unuse();
+ dest->Split(not_zero);
+ } else {
+ dest->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ dest->true_target()->Branch(is_smi);
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
+ value.Unuse();
+ dest->Split(not_zero);
+ }
+ } else {
+ // Fast case checks.
+ // 'false' => false.
+ __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
+ dest->false_target()->Branch(equal);
+
+ // 'true' => true.
+ __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
+ dest->true_target()->Branch(equal);
+
+ // 'undefined' => false.
+ __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
+ dest->false_target()->Branch(equal);
+
+ // Smi => false iff zero.
+ __ SmiCompare(value.reg(), Smi::FromInt(0));
+ dest->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ dest->true_target()->Branch(is_smi);
+
+ // Call the stub for all other cases.
+ frame_->Push(&value); // Undo the Pop() from above.
+ ToBooleanStub stub;
+ Result temp = frame_->CallStub(&stub, 1);
+ // Convert the result to a condition code.
+ __ testq(temp.reg(), temp.reg());
+ temp.Unuse();
+ dest->Split(not_equal);
+ }
+}
+
+
+class FloatingPointHelper : public AllStatic {
public:
- explicit DeferredReferenceGetKeyedValue(Register dst,
- Register receiver,
- Register key)
- : dst_(dst), receiver_(receiver), key_(key) {
- set_comment("[ DeferredReferenceGetKeyedValue");
+ // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
+ // If the operands are not both numbers, jump to not_numbers.
+ // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
+ // NumberOperands assumes both are smis or heap numbers.
+ static void LoadSSE2SmiOperands(MacroAssembler* masm);
+ static void LoadSSE2NumberOperands(MacroAssembler* masm);
+ static void LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers);
+
+ // Takes the operands in rdx and rax and loads them as integers in rax
+ // and rcx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ Label* operand_conversion_failure,
+ Register heap_number_map);
+ // As above, but we know the operands to be numbers. In that case,
+ // conversion can't fail.
+ static void LoadNumbersAsIntegers(MacroAssembler* masm);
+};
+
+
+const char* GenericBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
}
- virtual void Generate();
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
+ op_name,
+ overwrite_name,
+ (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+ args_in_registers_ ? "RegArgs" : "StackArgs",
+ args_reversed_ ? "_R" : "",
+ static_operands_type_.ToString(),
+ BinaryOpIC::GetName(runtime_operands_type_));
+ return name_;
+}
- Label* patch_site() { return &patch_site_; }
+
+// Call the specialized stub for a binary operation.
+class DeferredInlineBinaryOperation: public DeferredCode {
+ public:
+ DeferredInlineBinaryOperation(Token::Value op,
+ Register dst,
+ Register left,
+ Register right,
+ OverwriteMode mode)
+ : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
+ set_comment("[ DeferredInlineBinaryOperation");
+ }
+
+ virtual void Generate();
private:
- Label patch_site_;
+ Token::Value op_;
Register dst_;
- Register receiver_;
- Register key_;
+ Register left_;
+ Register right_;
+ OverwriteMode mode_;
};
-void DeferredReferenceGetKeyedValue::Generate() {
- if (receiver_.is(rdx)) {
- if (!key_.is(rax)) {
- __ movq(rax, key_);
- } // else do nothing.
- } else if (receiver_.is(rax)) {
- if (key_.is(rdx)) {
- __ xchg(rax, rdx);
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rax, key_);
+void DeferredInlineBinaryOperation::Generate() {
+ Label done;
+ if ((op_ == Token::ADD)
+ || (op_ == Token::SUB)
+ || (op_ == Token::MUL)
+ || (op_ == Token::DIV)) {
+ Label call_runtime;
+ Label left_smi, right_smi, load_right, do_op;
+ __ JumpIfSmi(left_, &left_smi);
+ __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_LEFT) {
+ __ movq(dst_, left_);
}
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
+ __ jmp(&load_right);
+
+ __ bind(&left_smi);
+ __ SmiToInteger32(left_, left_);
+ __ cvtlsi2sd(xmm0, left_);
+ __ Integer32ToSmi(left_, left_);
+ if (mode_ == OVERWRITE_LEFT) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+ }
+
+ __ bind(&load_right);
+ __ JumpIfSmi(right_, &right_smi);
+ __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_RIGHT) {
+ __ movq(dst_, right_);
+ } else if (mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+ }
+ __ jmp(&do_op);
+
+ __ bind(&right_smi);
+ __ SmiToInteger32(right_, right_);
+ __ cvtlsi2sd(xmm1, right_);
+ __ Integer32ToSmi(right_, right_);
+ if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+ }
+
+ __ bind(&do_op);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
+ __ jmp(&done);
+
+ __ bind(&call_runtime);
+ }
+ GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, left_, right_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ bind(&done);
+}
+
+
+static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
+ Token::Value op,
+ const Result& right,
+ const Result& left) {
+ // Set TypeInfo of result according to the operation performed.
+ // We rely on the fact that smis have a 32 bit payload on x64.
+ STATIC_ASSERT(kSmiValueSize == 32);
+ switch (op) {
+ case Token::COMMA:
+ return right.type_info();
+ case Token::OR:
+ case Token::AND:
+ // Result type can be either of the two input types.
+ return operands_type;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ // Result is always a smi.
+ return TypeInfo::Smi();
+ case Token::SAR:
+ case Token::SHL:
+ // Result is always a smi.
+ return TypeInfo::Smi();
+ case Token::SHR:
+ // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
+ return (right.is_constant() && right.handle()->IsSmi()
+ && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
+ ? TypeInfo::Smi()
+ : TypeInfo::Number();
+ case Token::ADD:
+ if (operands_type.IsNumber()) {
+ return TypeInfo::Number();
+ } else if (left.type_info().IsString() || right.type_info().IsString()) {
+ return TypeInfo::String();
+ } else {
+ return TypeInfo::Unknown();
+ }
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ // Result is always a number.
+ return TypeInfo::Number();
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return TypeInfo::Unknown();
+}
+
+
+void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
+ OverwriteMode overwrite_mode) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = expr->op();
+ Comment cmnt_token(masm_, Token::String(op));
+
+ if (op == Token::COMMA) {
+ // Simply discard left value.
+ frame_->Nip(1);
+ return;
+ }
+
+ Result right = frame_->Pop();
+ Result left = frame_->Pop();
+
+ if (op == Token::ADD) {
+ const bool left_is_string = left.type_info().IsString();
+ const bool right_is_string = right.type_info().IsString();
+ // Make sure constant strings have string type info.
+ ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
+ left_is_string);
+ ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
+ right_is_string);
+ if (left_is_string || right_is_string) {
+ frame_->Push(&left);
+ frame_->Push(&right);
+ Result answer;
+ if (left_is_string) {
+ if (right_is_string) {
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ answer = frame_->CallStub(&stub, 2);
+ } else {
+ answer =
+ frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
+ }
+ } else if (right_is_string) {
+ answer =
+ frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
+ }
+ answer.set_type_info(TypeInfo::String());
+ frame_->Push(&answer);
+ return;
+ }
+ // Neither operand is known to be a string.
+ }
+
+ bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
+ bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
+ bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
+ bool right_is_non_smi_constant =
+ right.is_constant() && !right.handle()->IsSmi();
+
+ if (left_is_smi_constant && right_is_smi_constant) {
+ // Compute the constant result at compile time, and leave it on the frame.
+ int left_int = Smi::cast(*left.handle())->value();
+ int right_int = Smi::cast(*right.handle())->value();
+ if (FoldConstantSmis(op, left_int, right_int)) return;
+ }
+
+ // Get number type of left and right sub-expressions.
+ TypeInfo operands_type =
+ TypeInfo::Combine(left.type_info(), right.type_info());
+
+ TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
+
+ Result answer;
+ if (left_is_non_smi_constant || right_is_non_smi_constant) {
+ // Go straight to the slow case, with no smi code.
+ GenericBinaryOpStub stub(op,
+ overwrite_mode,
+ NO_SMI_CODE_IN_STUB,
+ operands_type);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ } else if (right_is_smi_constant) {
+ answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
+ false, overwrite_mode);
+ } else if (left_is_smi_constant) {
+ answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
+ true, overwrite_mode);
} else {
- __ movq(rax, key_);
- __ movq(rdx, receiver_);
+ // Set the flags based on the operation, type and loop nesting level.
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ // For all other operations only inline the Smi check code for likely smis
+ // if the operation is part of a loop.
+ if (loop_nesting() > 0 &&
+ (Token::IsBitOp(op) ||
+ operands_type.IsInteger32() ||
+ expr->type()->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
+ } else {
+ GenericBinaryOpStub stub(op,
+ overwrite_mode,
+ NO_GENERIC_BINARY_FLAGS,
+ operands_type);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ }
}
- // Calculate the delta from the IC call instruction to the map check
- // movq instruction in the inlined version. This delta is stored in
- // a test(rax, delta) instruction after the call so that we can find
- // it in the IC initialization code and patch the movq instruction.
- // This means that we cannot allow test instructions after calls to
- // KeyedLoadIC stubs in other places.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the __
- // macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- // TODO(X64): Consider whether it's worth switching the test to a
- // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
- // be generated normally.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+ answer.set_type_info(result_type);
+ frame_->Push(&answer);
+}
+
+
+bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
+ Object* answer_object = Heap::undefined_value();
+ switch (op) {
+ case Token::ADD:
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
+ answer_object = Smi::FromInt(left + right);
+ }
+ break;
+ case Token::SUB:
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
+ answer_object = Smi::FromInt(left - right);
+ }
+ break;
+ case Token::MUL: {
+ double answer = static_cast<double>(left) * right;
+ if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
+ // If the product is zero and the non-zero factor is negative,
+ // the spec requires us to return floating point negative zero.
+ if (answer != 0 || (left >= 0 && right >= 0)) {
+ answer_object = Smi::FromInt(static_cast<int>(answer));
+ }
+ }
+ }
+ break;
+ case Token::DIV:
+ case Token::MOD:
+ break;
+ case Token::BIT_OR:
+ answer_object = Smi::FromInt(left | right);
+ break;
+ case Token::BIT_AND:
+ answer_object = Smi::FromInt(left & right);
+ break;
+ case Token::BIT_XOR:
+ answer_object = Smi::FromInt(left ^ right);
+ break;
+
+ case Token::SHL: {
+ int shift_amount = right & 0x1F;
+ if (Smi::IsValid(left << shift_amount)) {
+ answer_object = Smi::FromInt(left << shift_amount);
+ }
+ break;
+ }
+ case Token::SHR: {
+ int shift_amount = right & 0x1F;
+ unsigned int unsigned_left = left;
+ unsigned_left >>= shift_amount;
+ if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
+ answer_object = Smi::FromInt(unsigned_left);
+ }
+ break;
+ }
+ case Token::SAR: {
+ int shift_amount = right & 0x1F;
+ unsigned int unsigned_left = left;
+ if (left < 0) {
+ // Perform arithmetic shift of a negative number by
+ // complementing number, logical shifting, complementing again.
+ unsigned_left = ~unsigned_left;
+ unsigned_left >>= shift_amount;
+ unsigned_left = ~unsigned_left;
+ } else {
+ unsigned_left >>= shift_amount;
+ }
+ ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
+ answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (answer_object == Heap::undefined_value()) {
+ return false;
+ }
+ frame_->Push(Handle<Object>(answer_object));
+ return true;
+}
+
+
+void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
+ TypeInfo type,
+ DeferredCode* deferred) {
+ if (!type.IsSmi()) {
+ __ JumpIfNotSmi(reg, deferred->entry_label());
+ }
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(reg);
+ }
+}
+
+
+void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred) {
+ if (!left_info.IsSmi() && !right_info.IsSmi()) {
+ __ JumpIfNotBothSmi(left, right, deferred->entry_label());
+ } else if (!left_info.IsSmi()) {
+ __ JumpIfNotSmi(left, deferred->entry_label());
+ } else if (!right_info.IsSmi()) {
+ __ JumpIfNotSmi(right, deferred->entry_label());
+ }
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+}
+
+
+// Implements a binary operation using a deferred code object and some
+// inline code to operate on smis quickly.
+Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ // Copy the type info because left and right may be overwritten.
+ TypeInfo left_type_info = left->type_info();
+ TypeInfo right_type_info = right->type_info();
+ Token::Value op = expr->op();
+ Result answer;
+ // Special handling of div and mod because they use fixed registers.
+ if (op == Token::DIV || op == Token::MOD) {
+ // We need rax as the quotient register, rdx as the remainder
+ // register, neither left nor right in rax or rdx, and left copied
+ // to rax.
+ Result quotient;
+ Result remainder;
+ bool left_is_in_rax = false;
+ // Step 1: get rax for quotient.
+ if ((left->is_register() && left->reg().is(rax)) ||
+ (right->is_register() && right->reg().is(rax))) {
+ // One or both is in rax. Use a fresh non-rdx register for
+ // them.
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (fresh.reg().is(rdx)) {
+ remainder = fresh;
+ fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ }
+ if (left->is_register() && left->reg().is(rax)) {
+ quotient = *left;
+ *left = fresh;
+ left_is_in_rax = true;
+ }
+ if (right->is_register() && right->reg().is(rax)) {
+ quotient = *right;
+ *right = fresh;
+ }
+ __ movq(fresh.reg(), rax);
+ } else {
+ // Neither left nor right is in rax.
+ quotient = allocator_->Allocate(rax);
+ }
+ ASSERT(quotient.is_register() && quotient.reg().is(rax));
+ ASSERT(!(left->is_register() && left->reg().is(rax)));
+ ASSERT(!(right->is_register() && right->reg().is(rax)));
+
+ // Step 2: get rdx for remainder if necessary.
+ if (!remainder.is_valid()) {
+ if ((left->is_register() && left->reg().is(rdx)) ||
+ (right->is_register() && right->reg().is(rdx))) {
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (left->is_register() && left->reg().is(rdx)) {
+ remainder = *left;
+ *left = fresh;
+ }
+ if (right->is_register() && right->reg().is(rdx)) {
+ remainder = *right;
+ *right = fresh;
+ }
+ __ movq(fresh.reg(), rdx);
+ } else {
+ // Neither left nor right is in rdx.
+ remainder = allocator_->Allocate(rdx);
+ }
+ }
+ ASSERT(remainder.is_register() && remainder.reg().is(rdx));
+ ASSERT(!(left->is_register() && left->reg().is(rdx)));
+ ASSERT(!(right->is_register() && right->reg().is(rdx)));
+
+ left->ToRegister();
+ right->ToRegister();
+ frame_->Spill(rax);
+ frame_->Spill(rdx);
+
+ // Check that left and right are smi tagged.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ (op == Token::DIV) ? rax : rdx,
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
+ left_type_info, right_type_info, deferred);
+
+ if (op == Token::DIV) {
+ __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ answer = quotient;
+ } else {
+ ASSERT(op == Token::MOD);
+ __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ answer = remainder;
+ }
+ ASSERT(answer.is_valid());
+ return answer;
+ }
+
+ // Special handling of shift operations because they use fixed
+ // registers.
+ if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
+ // Move left out of rcx if necessary.
+ if (left->is_register() && left->reg().is(rcx)) {
+ *left = allocator_->Allocate();
+ ASSERT(left->is_valid());
+ __ movq(left->reg(), rcx);
+ }
+ right->ToRegister(rcx);
+ left->ToRegister();
+ ASSERT(left->is_register() && !left->reg().is(rcx));
+ ASSERT(right->is_register() && right->reg().is(rcx));
+
+ // We will modify right, it must be spilled.
+ frame_->Spill(rcx);
+
+ // Use a fresh answer register to avoid spilling the left operand.
+ answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+ // Check that both operands are smis using the answer register as a
+ // temporary.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ rcx,
+ overwrite_mode);
+
+ Label do_op;
+ if (right_type_info.IsSmi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(right->reg());
+ }
+ __ movq(answer.reg(), left->reg());
+ // If left is not known to be a smi, check if it is.
+ // If left is not known to be a number, and it isn't a smi, check if
+ // it is a HeapNumber.
+ if (!left_type_info.IsSmi()) {
+ __ JumpIfSmi(answer.reg(), &do_op);
+ if (!left_type_info.IsNumber()) {
+ // Branch if not a heapnumber.
+ __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ deferred->Branch(not_equal);
+ }
+ // Load integer value into answer register using truncation.
+ __ cvttsd2si(answer.reg(),
+ FieldOperand(answer.reg(), HeapNumber::kValueOffset));
+ // Branch if we might have overflowed.
+ // (False negative for Smi::kMinValue)
+ __ cmpq(answer.reg(), Immediate(0x80000000));
+ deferred->Branch(equal);
+ // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
+ __ Integer32ToSmi(answer.reg(), answer.reg());
+ } else {
+ // Fast case - both are actually smis.
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left->reg());
+ }
+ }
+ } else {
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
+ left_type_info, right_type_info, deferred);
+ }
+ __ bind(&do_op);
+
+ // Perform the operation.
+ switch (op) {
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
+ break;
+ case Token::SHR: {
+ __ SmiShiftLogicalRight(answer.reg(),
+ left->reg(),
+ rcx,
+ deferred->entry_label());
+ break;
+ }
+ case Token::SHL: {
+ __ SmiShiftLeft(answer.reg(),
+ left->reg(),
+ rcx);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ ASSERT(answer.is_valid());
+ return answer;
+ }
+
+ // Handle the other binary operations.
+ left->ToRegister();
+ right->ToRegister();
+ // A newly allocated register answer is used to hold the answer. The
+ // registers containing left and right are not modified so they don't
+ // need to be spilled in the fast case.
+ answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+
+ // Perform the smi tag check.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
+ left_type_info, right_type_info, deferred);
+
+ switch (op) {
+ case Token::ADD:
+ __ SmiAdd(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
+ break;
+
+ case Token::SUB:
+ __ SmiSub(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
+ break;
+
+ case Token::MUL: {
+ __ SmiMul(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
+ break;
+ }
+
+ case Token::BIT_OR:
+ __ SmiOr(answer.reg(), left->reg(), right->reg());
+ break;
+
+ case Token::BIT_AND:
+ __ SmiAnd(answer.reg(), left->reg(), right->reg());
+ break;
+
+ case Token::BIT_XOR:
+ __ SmiXor(answer.reg(), left->reg(), right->reg());
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ ASSERT(answer.is_valid());
+ return answer;
+}
+
+
+// Call the appropriate binary operation stub to compute src op value
+// and leave the result in dst.
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+ DeferredInlineSmiOperation(Token::Value op,
+ Register dst,
+ Register src,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : op_(op),
+ dst_(dst),
+ src_(src),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Token::Value op_;
+ Register dst_;
+ Register src_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiOperation::Generate() {
+ // For mod we don't generate all the Smi code inline.
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, src_, value_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
-class DeferredReferenceSetKeyedValue: public DeferredCode {
+// Call the appropriate binary operation stub to compute value op src
+// and leave the result in dst.
+class DeferredInlineSmiOperationReversed: public DeferredCode {
public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver)
- : value_(value), key_(key), receiver_(receiver) {
- set_comment("[ DeferredReferenceSetKeyedValue");
+ DeferredInlineSmiOperationReversed(Token::Value op,
+ Register dst,
+ Smi* value,
+ Register src,
+ OverwriteMode overwrite_mode)
+ : op_(op),
+ dst_(dst),
+ value_(value),
+ src_(src),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperationReversed");
}
virtual void Generate();
- Label* patch_site() { return &patch_site_; }
+ private:
+ Token::Value op_;
+ Register dst_;
+ Smi* value_;
+ Register src_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiOperationReversed::Generate() {
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, value_, src_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+class DeferredInlineSmiAdd: public DeferredCode {
+ public:
+ DeferredInlineSmiAdd(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAdd");
+ }
+
+ virtual void Generate();
private:
- Register value_;
- Register key_;
- Register receiver_;
- Label patch_site_;
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
};
-void DeferredReferenceSetKeyedValue::Generate() {
- __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
- // Move value, receiver, and key to registers rax, rdx, and rcx, as
- // the IC stub expects.
- // Move value to rax, using xchg if the receiver or key is in rax.
- if (!value_.is(rax)) {
- if (!receiver_.is(rax) && !key_.is(rax)) {
- __ movq(rax, value_);
+void DeferredInlineSmiAdd::Generate() {
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+// The result of value + src is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative addition and call the appropriate
+// specialized stub for add. The result is left in dst.
+class DeferredInlineSmiAddReversed: public DeferredCode {
+ public:
+ DeferredInlineSmiAddReversed(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAddReversed");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAddReversed::Generate() {
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, value_, dst_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+class DeferredInlineSmiSub: public DeferredCode {
+ public:
+ DeferredInlineSmiSub(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiSub");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiSub::Generate() {
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
+ Result* operand,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
+ // Generate inline code for a binary operation when one of the
+ // operands is a constant smi. Consumes the argument "operand".
+ if (IsUnsafeSmi(value)) {
+ Result unsafe_operand(value);
+ if (reversed) {
+ return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
+ overwrite_mode);
} else {
- __ xchg(rax, value_);
- // Update receiver_ and key_ if they are affected by the swap.
- if (receiver_.is(rax)) {
- receiver_ = value_;
- } else if (receiver_.is(value_)) {
- receiver_ = rax;
+ return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
+ overwrite_mode);
+ }
+ }
+
+ // Get the literal value.
+ Smi* smi_value = Smi::cast(*value);
+ int int_value = smi_value->value();
+
+ Token::Value op = expr->op();
+ Result answer;
+ switch (op) {
+ case Token::ADD: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred = NULL;
+ if (reversed) {
+ deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ } else {
+ deferred = new DeferredInlineSmiAdd(operand->reg(),
+ smi_value,
+ overwrite_mode);
}
- if (key_.is(rax)) {
- key_ = value_;
- } else if (key_.is(value_)) {
- key_ = rax;
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiAddConstant(operand->reg(),
+ operand->reg(),
+ smi_value,
+ deferred->entry_label());
+ deferred->BindExit();
+ answer = *operand;
+ break;
+ }
+
+ case Token::SUB: {
+ if (reversed) {
+ Result constant_operand(value);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ answer = *operand;
+ DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ // A smi currently fits in a 32-bit Immediate.
+ __ SmiSubConstant(operand->reg(),
+ operand->reg(),
+ smi_value,
+ deferred->entry_label());
+ deferred->BindExit();
+ operand->Unuse();
}
+ break;
+ }
+
+ case Token::SAR:
+ if (reversed) {
+ Result constant_operand(value);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiShiftArithmeticRightConstant(operand->reg(),
+ operand->reg(),
+ shift_value);
+ deferred->BindExit();
+ answer = *operand;
+ }
+ break;
+
+ case Token::SHR:
+ if (reversed) {
+ Result constant_operand(value);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ answer.reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiShiftLogicalRightConstant(answer.reg(),
+ operand->reg(),
+ shift_value,
+ deferred->entry_label());
+ deferred->BindExit();
+ operand->Unuse();
+ }
+ break;
+
+ case Token::SHL:
+ if (reversed) {
+ operand->ToRegister();
+
+ // We need rcx to be available to hold operand, and to be spilled.
+ // SmiShiftLeft implicitly modifies rcx.
+ if (operand->reg().is(rcx)) {
+ frame_->Spill(operand->reg());
+ answer = allocator()->Allocate();
+ } else {
+ Result rcx_reg = allocator()->Allocate(rcx);
+ // answer must not be rcx.
+ answer = allocator()->Allocate();
+ // rcx_reg goes out of scope.
+ }
+
+ DeferredInlineSmiOperationReversed* deferred =
+ new DeferredInlineSmiOperationReversed(op,
+ answer.reg(),
+ smi_value,
+ operand->reg(),
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+
+ __ Move(answer.reg(), smi_value);
+ __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
+ operand->Unuse();
+
+ deferred->BindExit();
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ if (shift_value == 0) {
+ // Spill operand so it can be overwritten in the slow case.
+ frame_->Spill(operand->reg());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ deferred->BindExit();
+ answer = *operand;
+ } else {
+ // Use a fresh temporary for nonzero shift values.
+ answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ answer.reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiShiftLeftConstant(answer.reg(),
+ operand->reg(),
+ shift_value);
+ deferred->BindExit();
+ operand->Unuse();
+ }
+ }
+ break;
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ if (reversed) {
+ // Bit operations with a constant smi are commutative.
+ // We can swap left and right operands with no problem.
+ // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
+ overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
+ }
+ DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ if (op == Token::BIT_AND) {
+ __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
+ } else if (op == Token::BIT_XOR) {
+ if (int_value != 0) {
+ __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
+ }
+ } else {
+ ASSERT(op == Token::BIT_OR);
+ if (int_value != 0) {
+ __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
+ }
+ }
+ deferred->BindExit();
+ answer = *operand;
+ break;
+ }
+
+ // Generate inline code for mod of powers of 2 and negative powers of 2.
+ case Token::MOD:
+ if (!reversed &&
+ int_value != 0 &&
+ (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ // Check for negative or non-Smi left hand side.
+ __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
+ if (int_value < 0) int_value = -int_value;
+ if (int_value == 1) {
+ __ Move(operand->reg(), Smi::FromInt(0));
+ } else {
+ __ SmiAndConstant(operand->reg(),
+ operand->reg(),
+ Smi::FromInt(int_value - 1));
+ }
+ deferred->BindExit();
+ answer = *operand;
+ break; // This break only applies if we generated code for MOD.
+ }
+ // Fall through if we did not find a power of 2 on the right hand side!
+ // The next case must be the default.
+
+ default: {
+ Result constant_operand(value);
+ if (reversed) {
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
+ overwrite_mode);
+ }
+ break;
}
}
- // Value is now in rax. Its original location is remembered in value_,
- // and the value is restored to value_ before returning.
- // The variables receiver_ and key_ are not preserved.
- // Move receiver and key to rdx and rcx, swapping if necessary.
- if (receiver_.is(rdx)) {
- if (!key_.is(rcx)) {
- __ movq(rcx, key_);
- } // Else everything is already in the right place.
- } else if (receiver_.is(rcx)) {
- if (key_.is(rdx)) {
- __ xchg(rcx, rdx);
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
+ ASSERT(answer.is_valid());
+ return answer;
+}
+
+
+static bool CouldBeNaN(const Result& result) {
+ if (result.type_info().IsSmi()) return false;
+ if (result.type_info().IsInteger32()) return false;
+ if (!result.is_constant()) return true;
+ if (!result.handle()->IsHeapNumber()) return false;
+ return isnan(HeapNumber::cast(*result.handle())->value());
+}
+
+
+// Convert from signed to unsigned comparison to match the way EFLAGS are set
+// by FPU and XMM compare instructions.
+static Condition DoubleCondition(Condition cc) {
+ switch (cc) {
+ case less: return below;
+ case equal: return equal;
+ case less_equal: return below_equal;
+ case greater: return above;
+ case greater_equal: return above_equal;
+ default: UNREACHABLE();
+ }
+ UNREACHABLE();
+ return equal;
+}
+
+
+void CodeGenerator::Comparison(AstNode* node,
+ Condition cc,
+ bool strict,
+ ControlDestination* dest) {
+ // Strict only makes sense for equality comparisons.
+ ASSERT(!strict || cc == equal);
+
+ Result left_side;
+ Result right_side;
+ // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
+ if (cc == greater || cc == less_equal) {
+ cc = ReverseCondition(cc);
+ left_side = frame_->Pop();
+ right_side = frame_->Pop();
+ } else {
+ right_side = frame_->Pop();
+ left_side = frame_->Pop();
+ }
+ ASSERT(cc == less || cc == equal || cc == greater_equal);
+
+ // If either side is a constant smi, optimize the comparison.
+ bool left_side_constant_smi = false;
+ bool left_side_constant_null = false;
+ bool left_side_constant_1_char_string = false;
+ if (left_side.is_constant()) {
+ left_side_constant_smi = left_side.handle()->IsSmi();
+ left_side_constant_null = left_side.handle()->IsNull();
+ left_side_constant_1_char_string =
+ (left_side.handle()->IsString() &&
+ String::cast(*left_side.handle())->length() == 1 &&
+ String::cast(*left_side.handle())->IsAsciiRepresentation());
+ }
+ bool right_side_constant_smi = false;
+ bool right_side_constant_null = false;
+ bool right_side_constant_1_char_string = false;
+ if (right_side.is_constant()) {
+ right_side_constant_smi = right_side.handle()->IsSmi();
+ right_side_constant_null = right_side.handle()->IsNull();
+ right_side_constant_1_char_string =
+ (right_side.handle()->IsString() &&
+ String::cast(*right_side.handle())->length() == 1 &&
+ String::cast(*right_side.handle())->IsAsciiRepresentation());
+ }
+
+ if (left_side_constant_smi || right_side_constant_smi) {
+ bool is_loop_condition = (node->AsExpression() != NULL) &&
+ node->AsExpression()->is_loop_condition();
+ ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
+ left_side_constant_smi, right_side_constant_smi,
+ is_loop_condition);
+ } else if (cc == equal &&
+ (left_side_constant_null || right_side_constant_null)) {
+ // To make null checks efficient, we check if either the left side or
+ // the right side is the constant 'null'.
+ // If so, we optimize the code by inlining a null check instead of
+ // calling the (very) general runtime routine for checking equality.
+ Result operand = left_side_constant_null ? right_side : left_side;
+ right_side.Unuse();
+ left_side.Unuse();
+ operand.ToRegister();
+ __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
+ if (strict) {
+ operand.Unuse();
+ dest->Split(equal);
} else {
- __ movq(rdx, receiver_);
- __ movq(rcx, key_);
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ dest->true_target()->Branch(equal);
+ __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
+ dest->true_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ dest->false_target()->Branch(is_smi);
+
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ temp.Unuse();
+ operand.Unuse();
+ dest->Split(not_zero);
+ }
+ } else if (left_side_constant_1_char_string ||
+ right_side_constant_1_char_string) {
+ if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
+ // Trivial case, comparing two constants.
+ int left_value = String::cast(*left_side.handle())->Get(0);
+ int right_value = String::cast(*right_side.handle())->Get(0);
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Only one side is a constant 1 character string.
+ // If left side is a constant 1-character string, reverse the operands.
+ // Since one side is a constant string, conversion order does not matter.
+ if (left_side_constant_1_char_string) {
+ Result temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may reintroduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant string, inlining the case
+ // where both sides are strings.
+ left_side.ToRegister();
+
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_not_string, is_string;
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
+ ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
+ Condition is_smi = masm()->CheckSmi(left_reg);
+ is_not_string.Branch(is_smi, &left_side);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(),
+ FieldOperand(left_reg, HeapObject::kMapOffset));
+ __ movzxbl(temp.reg(),
+ FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+ // If we are testing for equality then make use of the symbol shortcut.
+ // Check if the left hand side has the same type as the right hand
+ // side (which is always a symbol).
+ if (cc == equal) {
+ Label not_a_symbol;
+ ASSERT(kSymbolTag != 0);
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
+ __ j(zero, &not_a_symbol);
+ // They are symbols, so do identity compare.
+ __ Cmp(left_reg, right_side.handle());
+ dest->true_target()->Branch(equal);
+ dest->false_target()->Branch(not_equal);
+ __ bind(&not_a_symbol);
+ }
+ // Call the compare stub if the left side is not a flat ascii string.
+ __ andb(temp.reg(),
+ Immediate(kIsNotStringMask |
+ kStringRepresentationMask |
+ kStringEncodingMask));
+ __ cmpb(temp.reg(),
+ Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ temp.Unuse();
+ is_string.Branch(equal, &left_side);
+
+ // Setup and call the compare stub.
+ is_not_string.Bind(&left_side);
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, &left_side, &right_side);
+ result.ToRegister();
+ __ testq(result.reg(), result.reg());
+ result.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_string.Bind(&left_side);
+ // left_side is a sequential ASCII string.
+ ASSERT(left_side.reg().is(left_reg));
+ right_side = Result(right_val);
+ Result temp2 = allocator_->Allocate();
+ ASSERT(temp2.is_valid());
+ // Test string equality and comparison.
+ if (cc == equal) {
+ Label comparison_done;
+ __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Smi::FromInt(1));
+ __ j(not_equal, &comparison_done);
+ uint8_t char_value =
+ static_cast<uint8_t>(String::cast(*right_val)->Get(0));
+ __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
+ Immediate(char_value));
+ __ bind(&comparison_done);
+ } else {
+ __ movq(temp2.reg(),
+ FieldOperand(left_side.reg(), String::kLengthOffset));
+ __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
+ Label comparison;
+ // If the length is 0 then the subtraction gave -1 which compares less
+ // than any character.
+ __ j(negative, &comparison);
+ // Otherwise load the first character.
+ __ movzxbl(temp2.reg(),
+ FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
+ __ bind(&comparison);
+ // Compare the first character of the string with the
+ // constant 1-character string.
+ uint8_t char_value =
+ static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
+ __ cmpb(temp2.reg(), Immediate(char_value));
+ Label characters_were_different;
+ __ j(not_equal, &characters_were_different);
+ // If the first character is the same then the long string sorts after
+ // the short one.
+ __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Smi::FromInt(1));
+ __ bind(&characters_were_different);
+ }
+ temp2.Unuse();
+ left_side.Unuse();
+ right_side.Unuse();
+ dest->Split(cc);
}
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
} else {
- __ movq(rcx, key_);
- __ movq(rdx, receiver_);
+ // Neither side is a constant Smi, constant 1-char string, or constant null.
+ // If either side is a non-smi constant, or known to be a heap number,
+ // skip the smi check.
+ bool known_non_smi =
+ (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
+ (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
+ left_side.type_info().IsDouble() ||
+ right_side.type_info().IsDouble();
+
+ NaNInformation nan_info =
+ (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
+ kBothCouldBeNaN :
+ kCantBothBeNaN;
+
+ // Inline number comparison handling any combination of smi's and heap
+ // numbers if:
+ // code is in a loop
+ // the compare operation is different from equal
+ // compare is not a for-loop comparison
+ // The reason for excluding equal is that it will most likely be done
+ // with smi's (not heap numbers) and the code to comparing smi's is inlined
+ // separately. The same reason applies for for-loop comparison which will
+ // also most likely be smi comparisons.
+ bool is_loop_condition = (node->AsExpression() != NULL)
+ && node->AsExpression()->is_loop_condition();
+ bool inline_number_compare =
+ loop_nesting() > 0 && cc != equal && !is_loop_condition;
+
+ // Left and right needed in registers for the following code.
+ left_side.ToRegister();
+ right_side.ToRegister();
+
+ if (known_non_smi) {
+ // Inlined equality check:
+ // If at least one of the objects is not NaN, then if the objects
+ // are identical, they are equal.
+ if (nan_info == kCantBothBeNaN && cc == equal) {
+ __ cmpq(left_side.reg(), right_side.reg());
+ dest->true_target()->Branch(equal);
+ }
+
+ // Inlined number comparison:
+ if (inline_number_compare) {
+ GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+ }
+
+ // End of in-line compare, call out to the compare stub. Don't include
+ // number comparison in the stub if it was inlined.
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
+ answer.Unuse();
+ dest->Split(cc);
+ } else {
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_smi;
+ Register left_reg = left_side.reg();
+ Register right_reg = right_side.reg();
+
+ Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
+ is_smi.Branch(both_smi);
+
+ // Inline the equality check if both operands can't be a NaN. If both
+ // objects are the same they are equal.
+ if (nan_info == kCantBothBeNaN && cc == equal) {
+ __ cmpq(left_side.reg(), right_side.reg());
+ dest->true_target()->Branch(equal);
+ }
+
+ // Inlined number comparison:
+ if (inline_number_compare) {
+ GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+ }
+
+ // End of in-line compare, call out to the compare stub. Don't include
+ // number comparison in the stub if it was inlined.
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
+ answer.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_reg);
+ __ SmiCompare(left_side.reg(), right_side.reg());
+ right_side.Unuse();
+ left_side.Unuse();
+ dest->Split(cc);
+ }
}
+}
- // Call the IC stub.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instructions (initial movq)
- // to the test instruction. We use masm_-> directly here instead of the
- // __ macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC).
- if (!value_.is(rax)) __ movq(value_, rax);
+
+void CodeGenerator::ConstantSmiComparison(Condition cc,
+ bool strict,
+ ControlDestination* dest,
+ Result* left_side,
+ Result* right_side,
+ bool left_side_constant_smi,
+ bool right_side_constant_smi,
+ bool is_loop_condition) {
+ if (left_side_constant_smi && right_side_constant_smi) {
+ // Trivial case, comparing two constants.
+ int left_value = Smi::cast(*left_side->handle())->value();
+ int right_value = Smi::cast(*right_side->handle())->value();
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Only one side is a constant Smi.
+ // If left side is a constant Smi, reverse the operands.
+ // Since one side is a constant Smi, conversion order does not matter.
+ if (left_side_constant_smi) {
+ Result* temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may re-introduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant Smi, inlining the case
+ // where both sides are Smis.
+ left_side->ToRegister();
+ Register left_reg = left_side->reg();
+ Smi* constant_smi = Smi::cast(*right_side->handle());
+
+ if (left_side->is_smi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left_reg);
+ }
+ // Test smi equality and comparison by signed int comparison.
+ // Both sides are smis, so we can use an Immediate.
+ __ SmiCompare(left_reg, constant_smi);
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->Split(cc);
+ } else {
+ // Only the case where the left side could possibly be a non-smi is left.
+ JumpTarget is_smi;
+ if (cc == equal) {
+ // We can do the equality comparison before the smi check.
+ __ SmiCompare(left_reg, constant_smi);
+ dest->true_target()->Branch(equal);
+ Condition left_is_smi = masm_->CheckSmi(left_reg);
+ dest->false_target()->Branch(left_is_smi);
+ } else {
+ // Do the smi check, then the comparison.
+ Condition left_is_smi = masm_->CheckSmi(left_reg);
+ is_smi.Branch(left_is_smi, left_side, right_side);
+ }
+
+ // Jump or fall through to here if we are comparing a non-smi to a
+ // constant smi. If the non-smi is a heap number and this is not
+ // a loop condition, inline the floating point code.
+ if (!is_loop_condition) {
+ // Right side is a constant smi and left side has been checked
+ // not to be a smi.
+ JumpTarget not_number;
+ __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ not_number.Branch(not_equal, left_side);
+ __ movsd(xmm1,
+ FieldOperand(left_reg, HeapNumber::kValueOffset));
+ int value = constant_smi->value();
+ if (value == 0) {
+ __ xorpd(xmm0, xmm0);
+ } else {
+ Result temp = allocator()->Allocate();
+ __ movl(temp.reg(), Immediate(value));
+ __ cvtlsi2sd(xmm0, temp.reg());
+ temp.Unuse();
+ }
+ __ ucomisd(xmm1, xmm0);
+ // Jump to builtin for NaN.
+ not_number.Branch(parity_even, left_side);
+ left_side->Unuse();
+ dest->true_target()->Branch(DoubleCondition(cc));
+ dest->false_target()->Jump();
+ not_number.Bind(left_side);
+ }
+
+ // Setup and call the compare stub.
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, left_side, right_side);
+ result.ToRegister();
+ __ testq(result.reg(), result.reg());
+ result.Unuse();
+ if (cc == equal) {
+ dest->Split(cc);
+ } else {
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ // It is important for performance for this case to be at the end.
+ is_smi.Bind(left_side, right_side);
+ __ SmiCompare(left_reg, constant_smi);
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->Split(cc);
+ }
+ }
+ }
+}
+
+
+// Load a comparison operand into into a XMM register. Jump to not_numbers jump
+// target passing the left and right result if the operand is not a number.
+static void LoadComparisonOperand(MacroAssembler* masm_,
+ Result* operand,
+ XMMRegister xmm_reg,
+ Result* left_side,
+ Result* right_side,
+ JumpTarget* not_numbers) {
+ Label done;
+ if (operand->type_info().IsDouble()) {
+ // Operand is known to be a heap number, just load it.
+ __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ } else if (operand->type_info().IsSmi()) {
+ // Operand is known to be a smi. Convert it to double and keep the original
+ // smi.
+ __ SmiToInteger32(kScratchRegister, operand->reg());
+ __ cvtlsi2sd(xmm_reg, kScratchRegister);
+ } else {
+ // Operand type not known, check for smi or heap number.
+ Label smi;
+ __ JumpIfSmi(operand->reg(), &smi);
+ if (!operand->type_info().IsNumber()) {
+ __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ not_numbers->Branch(not_equal, left_side, right_side, taken);
+ }
+ __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&smi);
+ // Comvert smi to float and keep the original smi.
+ __ SmiToInteger32(kScratchRegister, operand->reg());
+ __ cvtlsi2sd(xmm_reg, kScratchRegister);
+ __ jmp(&done);
+ }
+ __ bind(&done);
+}
+
+
+void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
+ Result* right_side,
+ Condition cc,
+ ControlDestination* dest) {
+ ASSERT(left_side->is_register());
+ ASSERT(right_side->is_register());
+
+ JumpTarget not_numbers;
+ // Load left and right operand into registers xmm0 and xmm1 and compare.
+ LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
+ &not_numbers);
+ LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
+ &not_numbers);
+ __ ucomisd(xmm0, xmm1);
+ // Bail out if a NaN is involved.
+ not_numbers.Branch(parity_even, left_side, right_side);
+
+ // Split to destination targets based on comparison.
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->true_target()->Branch(DoubleCondition(cc));
+ dest->false_target()->Jump();
+
+ not_numbers.Bind(left_side, right_side);
+}
+
+
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ CallFunctionFlags flags,
+ int position) {
+ // Push the arguments ("left-to-right") on the stack.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ frame_->SpillTop();
+ }
+
+ // Record the position for debugging purposes.
+ CodeForSourcePosition(position);
+
+ // Use the shared code stub to call the function.
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop, flags);
+ Result answer = frame_->CallStub(&call_function, arg_count + 1);
+ // Restore context and replace function on the stack with the
+ // result of the stub invocation.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &answer);
}
@@ -967,7 +2711,6 @@ void CodeGenerator::CheckStack() {
void CodeGenerator::VisitAndSpill(Statement* statement) {
- // TODO(X64): No architecture specific code. Move to shared location.
ASSERT(in_spilled_code());
set_in_spilled_code(false);
Visit(statement);
@@ -979,6 +2722,9 @@ void CodeGenerator::VisitAndSpill(Statement* statement) {
void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
ASSERT(in_spilled_code());
set_in_spilled_code(false);
VisitStatements(statements);
@@ -986,14 +2732,20 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
frame_->SpillAll();
}
set_in_spilled_code(true);
+
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
ASSERT(!in_spilled_code());
for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
Visit(statements->at(i));
}
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
@@ -1010,6 +2762,21 @@ void CodeGenerator::VisitBlock(Block* node) {
}
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals. The inevitable call
+ // will sync frame elements to memory anyway, so we do it eagerly to
+ // allow us to push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(rsi); // The context is the first argument.
+ frame_->EmitPush(kScratchRegister);
+ frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var();
@@ -1214,6 +2981,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
CodeForStatementPosition(node);
Load(node->expression());
Result return_value = frame_->Pop();
+ masm()->WriteRecordedPositions();
if (function_return_is_shadowed_) {
function_return_.Jump(&return_value);
} else {
@@ -1230,6 +2998,45 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
}
+void CodeGenerator::GenerateReturnSequence(Result* return_value) {
+ // The return value is a live (but not currently reference counted)
+ // reference to rax. This is safe because the current frame does not
+ // contain a reference to rax (it is prepared for the return by spilling
+ // all registers).
+ if (FLAG_trace) {
+ frame_->Push(return_value);
+ *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
+ }
+ return_value->ToRegister(rax);
+
+ // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+
+ // Leave the frame and return popping the arguments and the
+ // receiver.
+ frame_->Exit();
+ masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
+ DeleteFrame();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Add padding that will be overwritten by a debugger breakpoint.
+ // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
+ // with length 7 (3 + 1 + 3).
+ const int kPadding = Assembler::kJSReturnSequenceLength - 7;
+ for (int i = 0; i < kPadding; ++i) {
+ masm_->int3();
+ }
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+}
+
+
void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ WithEnterStatement");
@@ -1265,8 +3072,6 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- // TODO(X64): This code is completely generic and should be moved somewhere
- // where it can be shared between architectures.
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
@@ -1558,8 +3363,8 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
LoadCondition(node->cond(), &dest, true);
}
} else {
- // If we have chosen not to recompile the test at the
- // bottom, jump back to the one at the top.
+ // If we have chosen not to recompile the test at the bottom,
+ // jump back to the one at the top.
if (has_valid_frame()) {
node->continue_target()->Jump();
}
@@ -1665,49 +3470,56 @@ void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
CodeForStatementPosition(node);
Slot* loop_var_slot = loop_var->slot();
if (loop_var_slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(loop_var_slot->index());
+ frame_->TakeLocalAt(loop_var_slot->index());
} else {
ASSERT(loop_var_slot->type() == Slot::PARAMETER);
- frame_->PushParameterAt(loop_var_slot->index());
+ frame_->TakeParameterAt(loop_var_slot->index());
}
Result loop_var_result = frame_->Pop();
if (!loop_var_result.is_register()) {
loop_var_result.ToRegister();
}
-
+ Register loop_var_reg = loop_var_result.reg();
+ frame_->Spill(loop_var_reg);
if (increments) {
- __ SmiAddConstant(loop_var_result.reg(),
- loop_var_result.reg(),
+ __ SmiAddConstant(loop_var_reg,
+ loop_var_reg,
Smi::FromInt(1));
} else {
- __ SmiSubConstant(loop_var_result.reg(),
- loop_var_result.reg(),
+ __ SmiSubConstant(loop_var_reg,
+ loop_var_reg,
Smi::FromInt(1));
}
- {
- __ SmiCompare(loop_var_result.reg(), limit_value);
- Condition condition;
- switch (compare_op) {
- case Token::LT:
- condition = less;
- break;
- case Token::LTE:
- condition = less_equal;
- break;
- case Token::GT:
- condition = greater;
- break;
- case Token::GTE:
- condition = greater_equal;
- break;
- default:
- condition = never;
- UNREACHABLE();
- }
- loop.Branch(condition);
+ frame_->Push(&loop_var_result);
+ if (loop_var_slot->type() == Slot::LOCAL) {
+ frame_->StoreToLocalAt(loop_var_slot->index());
+ } else {
+ ASSERT(loop_var_slot->type() == Slot::PARAMETER);
+ frame_->StoreToParameterAt(loop_var_slot->index());
+ }
+ frame_->Drop();
+
+ __ SmiCompare(loop_var_reg, limit_value);
+ Condition condition;
+ switch (compare_op) {
+ case Token::LT:
+ condition = less;
+ break;
+ case Token::LTE:
+ condition = less_equal;
+ break;
+ case Token::GT:
+ condition = greater;
+ break;
+ case Token::GTE:
+ condition = greater_equal;
+ break;
+ default:
+ condition = never;
+ UNREACHABLE();
}
- loop_var_result.Unuse();
+ loop.Branch(condition);
}
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@@ -2114,6 +3926,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
node->break_target()->Unuse();
}
+
void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
ASSERT(!in_spilled_code());
VirtualFrame::SpilledScope spilled_scope;
@@ -2531,6 +4344,349 @@ void CodeGenerator::VisitConditional(Conditional* node) {
}
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ JumpTarget slow;
+ JumpTarget done;
+ Result value;
+
+ // Generate fast case for loading from slots that correspond to
+ // local/global variables or arguments unless they are shadowed by
+ // eval-introduced bindings.
+ EmitDynamicLoadFromSlotFastCase(slot,
+ typeof_state,
+ &value,
+ &slow,
+ &done);
+
+ slow.Bind();
+ // A runtime call is inevitable. We eagerly sync frame elements
+ // to memory so that we can push the arguments directly into place
+ // on top of the frame.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ if (typeof_state == INSIDE_TYPEOF) {
+ value =
+ frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ } else {
+ value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ }
+
+ done.Bind(&value);
+ frame_->Push(&value);
+
+ } else if (slot->var()->mode() == Variable::CONST) {
+ // Const slots may contain 'the hole' value (the constant hasn't been
+ // initialized yet) which needs to be converted into the 'undefined'
+ // value.
+ //
+ // We currently spill the virtual frame because constants use the
+ // potentially unsafe direct-frame access of SlotOperand.
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Load const");
+ JumpTarget exit;
+ __ movq(rcx, SlotOperand(slot, rcx));
+ __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
+ exit.Branch(not_equal);
+ __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
+ exit.Bind();
+ frame_->EmitPush(rcx);
+
+ } else if (slot->type() == Slot::PARAMETER) {
+ frame_->PushParameterAt(slot->index());
+
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->PushLocalAt(slot->index());
+
+ } else {
+ // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
+ // here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because it will always be a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
+ frame_->Push(&temp);
+ }
+}
+
+
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+ TypeofState state) {
+ LoadFromSlot(slot, state);
+
+ // Bail out quickly if we're not using lazy arguments allocation.
+ if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+ // ... or if the slot isn't a non-parameter arguments slot.
+ if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+ // Pop the loaded value from the stack.
+ Result value = frame_->Pop();
+
+ // If the loaded value is a constant, we know if the arguments
+ // object has been lazily loaded yet.
+ if (value.is_constant()) {
+ if (value.handle()->IsTheHole()) {
+ Result arguments = StoreArgumentsObject(false);
+ frame_->Push(&arguments);
+ } else {
+ frame_->Push(&value);
+ }
+ return;
+ }
+
+ // The loaded value is in a register. If it is the sentinel that
+ // indicates that we haven't loaded the arguments object yet, we
+ // need to do it now.
+ JumpTarget exit;
+ __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+ frame_->Push(&value);
+ exit.Branch(not_equal);
+ Result arguments = StoreArgumentsObject(false);
+ frame_->SetElementAt(0, &arguments);
+ exit.Bind();
+}
+
+
+Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
+ Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow) {
+ // Check that no extension objects have been created by calls to
+ // eval from the current scope to the global scope.
+ Register context = rsi;
+ Result tmp = allocator_->Allocate();
+ ASSERT(tmp.is_valid()); // All non-reserved registers were available.
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ }
+ // Load next context in chain.
+ __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ context = tmp.reg();
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions. If we have reached an eval scope, we check
+ // all extensions from this point.
+ if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ // Loop up the context chain. There is no frame effect so it is
+ // safe to use raw labels here.
+ Label next, fast;
+ if (!context.is(tmp.reg())) {
+ __ movq(tmp.reg(), context);
+ }
+ // Load map for comparison into register, outside loop.
+ __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
+ __ bind(&next);
+ // Terminate at global context.
+ __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
+ __ j(equal, &fast);
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
+ slow->Branch(not_equal);
+ // Load next context in chain.
+ __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ __ jmp(&next);
+ __ bind(&fast);
+ }
+ tmp.Unuse();
+
+ // All extension objects were empty and it is safe to use a global
+ // load IC call.
+ LoadGlobal();
+ frame_->Push(slot->var()->name());
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame_->CallLoadIC(mode);
+ // A test rax instruction following the call signals that the inobject
+ // property case was inlined. Ensure that there is not a test rax
+ // instruction here.
+ masm_->nop();
+ return answer;
+}
+
+
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ Result* result,
+ JumpTarget* slow,
+ JumpTarget* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
+ done->Jump(result);
+
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+ if (potential_slot != NULL) {
+ // Generate fast case for locals that rewrite to slots.
+ // Allocate a fresh register to use as a temp in
+ // ContextSlotOperandCheckExtensions and to hold the result
+ // value.
+ *result = allocator_->Allocate();
+ ASSERT(result->is_valid());
+ __ movq(result->reg(),
+ ContextSlotOperandCheckExtensions(potential_slot,
+ *result,
+ slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
+ done->Branch(not_equal, result);
+ __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
+ }
+ done->Jump(result);
+ } else if (rewrite != NULL) {
+ // Generate fast case for argument loads.
+ Property* property = rewrite->AsProperty();
+ if (property != NULL) {
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+ Literal* key_literal = property->key()->AsLiteral();
+ if (obj_proxy != NULL &&
+ key_literal != NULL &&
+ obj_proxy->IsArguments() &&
+ key_literal->handle()->IsSmi()) {
+ // Load arguments object if there are no eval-introduced
+ // variables. Then load the argument from the arguments
+ // object using keyed load.
+ Result arguments = allocator()->Allocate();
+ ASSERT(arguments.is_valid());
+ __ movq(arguments.reg(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ arguments,
+ slow));
+ frame_->Push(&arguments);
+ frame_->Push(key_literal->handle());
+ *result = EmitKeyedLoad();
+ done->Jump(result);
+ }
+ }
+ }
+ }
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ // For now, just do a runtime call. Since the call is inevitable,
+ // we eagerly sync the virtual frame so we can directly push the
+ // arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(slot->var()->name());
+
+ Result value;
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize const
+ // properties (introduced via eval("const foo = (some expr);")). Also,
+ // uses the current function context instead of the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the same
+ // time, because the const declaration may be at the end of the eval
+ // code (sigh...) and the const variable may have been used before
+ // (where its value is 'undefined'). Thus, we can only do the
+ // initialization when we actually encounter the expression and when
+ // the expression operands are defined and valid, and thus we need the
+ // split into 2 operations: declaration of the context slot followed
+ // by initialization.
+ value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling chained assignment
+ // expressions.
+ frame_->Push(&value);
+ } else {
+ ASSERT(!slot->var()->is_dynamic());
+
+ JumpTarget exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is executed,
+ // the code is identical to a normal store (see below).
+ //
+ // We spill the frame in the code below because the direct-frame
+ // access of SlotOperand is potentially unsafe with an unspilled
+ // frame.
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Init const");
+ __ movq(rcx, SlotOperand(slot, rcx));
+ __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
+ exit.Branch(not_equal);
+ }
+
+ // We must execute the store. Storing a variable must keep the (new)
+ // value on the stack. This is necessary for compiling assignment
+ // expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will initialize
+ // consts to 'the hole' value and by doing so, end up calling this code.
+ if (slot->type() == Slot::PARAMETER) {
+ frame_->StoreToParameterAt(slot->index());
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->StoreToLocalAt(slot->index());
+ } else {
+ // The other slot types (LOOKUP and GLOBAL) cannot reach here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because the slot is a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ frame_->Dup();
+ Result value = frame_->Pop();
+ value.ToRegister();
+ Result start = allocator_->Allocate();
+ ASSERT(start.is_valid());
+ __ movq(SlotOperand(slot, start.reg()), value.reg());
+ // RecordWrite may destroy the value registers.
+ //
+ // TODO(204): Avoid actually spilling when the value is not
+ // needed (probably the common case).
+ frame_->Spill(value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
+ // The results start, value, and temp are unused by going out of
+ // scope.
+ }
+
+ exit.Bind();
+ }
+}
+
+
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
@@ -2557,6 +4713,17 @@ void CodeGenerator::VisitLiteral(Literal* node) {
}
+void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+ UNIMPLEMENTED();
+ // TODO(X64): Implement security policy for loads of smis.
+}
+
+
+bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
+ return false;
+}
+
+
// Materialize the regexp literal 'node' in the literals array
// 'literals' of the function. Leave the regexp boilerplate in
// 'boilerplate'.
@@ -3245,905 +5412,48 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
}
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- if (CheckForInlineRuntimeCall(node)) {
- return;
- }
-
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- Runtime::Function* function = node->function();
-
- if (function == NULL) {
- // Push the builtins object found in the current global object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), GlobalObject());
- __ movq(temp.reg(),
- FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
- frame_->Push(&temp);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- if (function == NULL) {
- // Call the JS runtime function.
- frame_->Push(node->name());
- Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting_);
- frame_->RestoreContextRegister();
- frame_->Push(&answer);
- } else {
- // Call the C runtime function.
- Result answer = frame_->CallRuntime(function, arg_count);
- frame_->Push(&answer);
- }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- Comment cmnt(masm_, "[ UnaryOperation");
-
- Token::Value op = node->op();
-
- if (op == Token::NOT) {
- // Swap the true and false targets but keep the same actual label
- // as the fall through.
- destination()->Invert();
- LoadCondition(node->expression(), destination(), true);
- // Swap the labels back.
- destination()->Invert();
-
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
- frame_->Push(&answer);
- return;
- }
-
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (variable != NULL) {
- Slot* slot = variable->slot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->Push(variable->name());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
- frame_->Push(&answer);
- return;
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to look up the context holding the named
- // variable. Sync the virtual frame eagerly so we can push the
- // arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- frame_->EmitPush(variable->name());
- Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
- ASSERT(context.is_register());
- frame_->EmitPush(context.reg());
- context.Unuse();
- frame_->EmitPush(variable->name());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
- frame_->Push(&answer);
- return;
- }
-
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(Factory::false_value());
-
- } else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, Factory::true_value());
- }
-
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->Push(&answer);
-
- } else if (op == Token::VOID) {
- Expression* expression = node->expression();
- if (expression && expression->AsLiteral() && (
- expression->AsLiteral()->IsTrue() ||
- expression->AsLiteral()->IsFalse() ||
- expression->AsLiteral()->handle()->IsNumber() ||
- expression->AsLiteral()->handle()->IsString() ||
- expression->AsLiteral()->handle()->IsJSRegExp() ||
- expression->AsLiteral()->IsNull())) {
- // Omit evaluating the value of the primitive literal.
- // It will be discarded anyway, and can have no side effect.
- frame_->Push(Factory::undefined_value());
- } else {
- Load(node->expression());
- frame_->SetElementAt(0, Factory::undefined_value());
- }
-
- } else {
- bool can_overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- bool no_negative_zero = node->expression()->no_negative_zero();
- Load(node->expression());
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
-
- case Token::SUB: {
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- Result operand = frame_->Pop();
- Result answer = frame_->CallStub(&stub, &operand);
- answer.set_type_info(TypeInfo::Number());
- frame_->Push(&answer);
- break;
- }
-
- case Token::BIT_NOT: {
- // Smi check.
- JumpTarget smi_label;
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- operand.ToRegister();
-
- Condition is_smi = masm_->CheckSmi(operand.reg());
- smi_label.Branch(is_smi, &operand);
-
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- Result answer = frame_->CallStub(&stub, &operand);
- continue_label.Jump(&answer);
-
- smi_label.Bind(&answer);
- answer.ToRegister();
- frame_->Spill(answer.reg());
- __ SmiNot(answer.reg(), answer.reg());
- continue_label.Bind(&answer);
- answer.set_type_info(TypeInfo::Smi());
- frame_->Push(&answer);
- break;
- }
-
- case Token::ADD: {
- // Smi check.
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- Condition is_smi = masm_->CheckSmi(operand.reg());
- continue_label.Branch(is_smi, &operand);
- frame_->Push(&operand);
- Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
- CALL_FUNCTION, 1);
-
- continue_label.Bind(&answer);
- if (operand_info.IsSmi()) {
- answer.set_type_info(TypeInfo::Smi());
- } else if (operand_info.IsInteger32()) {
- answer.set_type_info(TypeInfo::Integer32());
- } else {
- answer.set_type_info(TypeInfo::Number());
- }
- frame_->Push(&answer);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged. Call into the runtime
-// to convert the argument to a number, and call the specialized add
-// or subtract stub. The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
- DeferredPrefixCountOperation(Register dst,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
- Register left;
- if (input_type_.IsNumber()) {
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- left = rax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged. Call into the runtime
-// to convert the argument to a number. Update the original value in
-// old. Call the specialized add or subtract stub. The result is
-// left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
- DeferredPostfixCountOperation(Register dst,
- Register old,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst),
- old_(old),
- is_increment_(is_increment),
- input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Register old_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
- Register left;
- if (input_type_.IsNumber()) {
- __ push(dst_); // Save the input to use as the old value.
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ push(rax); // Save the result of ToNumber to use as the old value.
- left = rax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- Comment cmnt(masm_, "[ CountOperation");
-
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
-
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
- // Postfix operations need a stack slot under the reference to hold
- // the old value while the new value is being stored. This is so that
- // in the case that storing the new value requires a call, the old
- // value will be in the frame to be spilled.
- if (is_postfix) frame_->Push(Smi::FromInt(0));
-
- // A constant reference is not saved to, so the reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!is_postfix) frame_->Push(Smi::FromInt(0));
- return;
- }
- target.TakeValue();
-
- Result new_value = frame_->Pop();
- new_value.ToRegister();
-
- Result old_value; // Only allocated in the postfix case.
- if (is_postfix) {
- // Allocate a temporary to preserve the old value.
- old_value = allocator_->Allocate();
- ASSERT(old_value.is_valid());
- __ movq(old_value.reg(), new_value.reg());
-
- // The return value for postfix operations is ToNumber(input).
- // Keep more precise type info if the input is some kind of
- // number already. If the input is not a number we have to wait
- // for the deferred code to convert it.
- if (new_value.type_info().IsNumber()) {
- old_value.set_type_info(new_value.type_info());
- }
- }
- // Ensure the new value is writable.
- frame_->Spill(new_value.reg());
-
- DeferredCode* deferred = NULL;
- if (is_postfix) {
- deferred = new DeferredPostfixCountOperation(new_value.reg(),
- old_value.reg(),
- is_increment,
- new_value.type_info());
- } else {
- deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment,
- new_value.type_info());
- }
-
- if (new_value.is_smi()) {
- if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
- } else {
- __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
- }
- if (is_increment) {
- __ SmiAddConstant(new_value.reg(),
- new_value.reg(),
- Smi::FromInt(1),
- deferred->entry_label());
- } else {
- __ SmiSubConstant(new_value.reg(),
- new_value.reg(),
- Smi::FromInt(1),
- deferred->entry_label());
- }
- deferred->BindExit();
-
- // Postfix count operations return their input converted to
- // number. The case when the input is already a number is covered
- // above in the allocation code for old_value.
- if (is_postfix && !new_value.type_info().IsNumber()) {
- old_value.set_type_info(TypeInfo::Number());
- }
-
- new_value.set_type_info(TypeInfo::Number());
-
- // Postfix: store the old value in the allocated slot under the
- // reference.
- if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
- frame_->Push(&new_value);
- // Non-constant: update the reference.
- if (!is_const) target.SetValue(NOT_CONST_INIT);
- }
-
- // Postfix: drop the new value and use the old.
- if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
-
- // NOTE: If the left hand side produces a materialized value (not
- // control flow), we force the right hand side to do the same. This
- // is necessary because we assume that if we get control flow on the
- // last path out of an expression we got it on all paths.
- if (node->op() == Token::AND) {
- JumpTarget is_true;
- ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The current false target was used as the fall-through. If
- // there are no dangling jumps to is_true then the left
- // subexpression was unconditionally false. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_true.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current false target was a forward jump then we have a
- // valid frame, we have just bound the false target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->false_target()->Unuse();
- destination()->false_target()->Jump();
- }
- is_true.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have actually just jumped to or bound the current false
- // target but the current control destination is not marked as
- // used.
- destination()->Use(false);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_true
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_true
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&pop_and_continue, &exit, true);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_true.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
-
- } else {
- ASSERT(node->op() == Token::OR);
- JumpTarget is_false;
- ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.true_was_fall_through()) {
- // The current true target was used as the fall-through. If
- // there are no dangling jumps to is_false then the left
- // subexpression was unconditionally true. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_false.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current true target was a forward jump then we have a
- // valid frame, we have just bound the true target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->true_target()->Unuse();
- destination()->true_target()->Jump();
- }
- is_false.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have just jumped to or bound the current true target but
- // the current control destination is not marked as used.
- destination()->Use(true);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_false
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_false
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&exit, &pop_and_continue, false);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_false.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
- }
-}
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- Comment cmnt(masm_, "[ BinaryOperation");
-
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else {
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->AsBinaryOperation() != NULL &&
- node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->AsBinaryOperation() != NULL &&
- node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_RIGHT;
- }
-
- if (node->left()->IsTrivial()) {
- Load(node->right());
- Result right = frame_->Pop();
- frame_->Push(node->left());
- frame_->Push(&right);
- } else {
- Load(node->left());
- Load(node->right());
- }
- GenericBinaryOperation(node, overwrite_mode);
- }
-}
-
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- Comment cmnt(masm_, "[ CompareOperation");
-
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
-
- // Load the operand and move it to a register.
- LoadTypeofExpression(operation->expression());
- Result answer = frame_->Pop();
- answer.ToRegister();
-
- if (check->Equals(Heap::number_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->true_target()->Branch(is_smi);
- frame_->Spill(answer.reg());
- __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::string_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable string object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
- answer.Unuse();
- destination()->Split(below); // Unsigned byte comparison needed.
-
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
- destination()->true_target()->Branch(equal);
- __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
- destination()->true_target()->Branch(equal);
-
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- answer.Unuse();
- destination()->Split(not_zero);
-
- } else if (check->Equals(Heap::function_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
- frame_->Spill(answer.reg());
- __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
- destination()->true_target()->Branch(equal);
- // Regular expressions are callable so typeof == 'function'.
- __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::object_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
- __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
- destination()->true_target()->Branch(equal);
-
- // Regular expressions are typeof == 'function', not 'object'.
- __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
- destination()->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(below);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- answer.Unuse();
- destination()->Split(below_equal);
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- answer.Unuse();
- destination()->Goto(false);
- }
- return;
- }
-
- Condition cc = no_condition;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN: {
- Load(left);
- Load(right);
- Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
- frame_->Push(&answer); // push the result
- return;
- }
- case Token::INSTANCEOF: {
- Load(left);
- Load(right);
- InstanceofStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- answer.ToRegister();
- __ testq(answer.reg(), answer.reg());
- answer.Unuse();
- destination()->Split(zero);
- return;
- }
- default:
- UNREACHABLE();
- }
-
- if (left->IsTrivial()) {
- Load(right);
- Result right_result = frame_->Pop();
- frame_->Push(left);
- frame_->Push(&right_result);
- } else {
- Load(left);
- Load(right);
- }
-
- Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- frame_->PushFunction();
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in rdx and the formal
- // parameter count in rax.
- Load(args->at(0));
- Result key = frame_->Pop();
- // Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- Result result = frame_->CallStub(&stub, &key, &count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a JS array or not.
- __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
value.Unuse();
- destination()->Split(equal);
+ destination()->Split(is_smi);
}
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (ShouldGenerateLog(args->at(0))) {
+ Load(args->at(1));
+ Load(args->at(2));
+ frame_->CallRuntime(Runtime::kLog, 2);
+ }
+#endif
+ // Finally, we're expected to leave a value on the top of the stack.
+ frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a regexp.
- __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
+ Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
value.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
-
- __ Move(kScratchRegister, Factory::null_value());
- __ cmpq(obj.reg(), kScratchRegister);
- destination()->true_target()->Branch(equal);
-
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ movzxbq(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
- destination()->false_target()->Branch(below);
- __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
- obj.Unuse();
- destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- obj.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kBitFieldOffset));
- __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
- obj.Unuse();
- destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- // Get the frame pointer for the calling frame.
- Result fp = allocator()->Allocate();
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
- fp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Result fp = allocator_->Allocate();
- Result result = allocator_->Allocate();
- ASSERT(fp.is_valid() && result.is_valid());
-
- Label exit;
-
- // Get the number of formal parameters.
- __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ movq(result.reg(),
- Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- result.set_type_info(TypeInfo::Smi());
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(result.reg());
- }
- frame_->Push(&result);
+ destination()->Split(positive_smi);
}
@@ -4352,275 +5662,312 @@ void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
}
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
- Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ destination()->false_target()->Branch(is_smi);
+ // It is a heap object - get map.
+ // Check if the object is a JS array or not.
+ __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
value.Unuse();
- destination()->Split(positive_smi);
+ destination()->Split(equal);
}
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
+void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
Load(args->at(0));
- Load(args->at(1));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ destination()->false_target()->Branch(is_smi);
+ // It is a heap object - get map.
+ // Check if the object is a regexp.
+ __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
+ value.Unuse();
+ destination()->Split(equal);
+}
- Label allocate_return;
- // Load the two operands while leaving the values on the frame.
- frame()->Dup();
- Result exponent = frame()->Pop();
- exponent.ToRegister();
- frame()->Spill(exponent.reg());
- frame()->PushElementAt(1);
- Result base = frame()->Pop();
- base.ToRegister();
- frame()->Spill(base.reg());
- Result answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- ASSERT(!exponent.reg().is(base.reg()));
- JumpTarget call_runtime;
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
- // Save 1 in xmm3 - we need this several times later on.
- __ movl(answer.reg(), Immediate(1));
- __ cvtlsi2sd(xmm3, answer.reg());
+ __ Move(kScratchRegister, Factory::null_value());
+ __ cmpq(obj.reg(), kScratchRegister);
+ destination()->true_target()->Branch(equal);
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
- __ JumpIfNotSmi(base.reg(), &base_nonsmi);
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ movzxbq(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ destination()->false_target()->Branch(below);
+ __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
+ obj.Unuse();
+ destination()->Split(below_equal);
+}
- // Optimized version when y is an integer.
- Label powi;
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
+ // typeof(arg) == function).
+ // It includes undetectable objects (as opposed to IsObject).
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ destination()->false_target()->Branch(is_smi);
+ // Check that this is an object.
+ __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ value.Unuse();
+ destination()->Split(above_equal);
+}
- // Optimized version of pow if y is an integer.
- __ bind(&powi);
- __ SmiToInteger32(exponent.reg(), exponent.reg());
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ movl(base.reg(), exponent.reg());
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+ obj.Unuse();
+ destination()->Split(equal);
+}
- // Get absolute value of exponent.
- Label no_neg;
- __ cmpl(exponent.reg(), Immediate(0));
- __ j(greater_equal, &no_neg);
- __ negl(exponent.reg());
- __ bind(&no_neg);
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kBitFieldOffset));
+ __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
+ obj.Unuse();
+ destination()->Split(not_zero);
+}
- __ bind(&while_true);
- __ shrl(exponent.reg(), Immediate(1));
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ testl(exponent.reg(), exponent.reg());
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
- // x has the original value of y - if y is negative return 1/result.
- __ testl(base.reg(), base.reg());
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ movl(answer.reg(), Immediate(0x7FB00000));
- __ movd(xmm0, answer.reg());
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- call_runtime.Branch(equal);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- call_runtime.Branch(parity_even);
+ // Get the frame pointer for the calling frame.
+ Result fp = allocator()->Allocate();
+ __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- Label base_not_smi;
- Label handle_special_cases;
- __ JumpIfNotSmi(base.reg(), &base_not_smi);
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&handle_special_cases);
- __ bind(&base_not_smi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
- __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- call_runtime.Branch(greater_equal);
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &check_frame_marker);
+ __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ movl(answer.reg(), Immediate(0xBF000000));
- __ movd(xmm2, answer.reg());
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
+ fp.Unuse();
+ destination()->Split(equal);
+}
- // Calculates reciprocal of square root.
- // Note that 1/sqrt(x) = sqrt(1/x))
- __ divsd(xmm3, xmm0);
- __ movsd(xmm1, xmm3);
- __ sqrtsd(xmm1, xmm1);
- __ jmp(&allocate_return);
- // Test for 0.5.
- __ bind(&not_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- call_runtime.Branch(not_equal);
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
- // Calculates square root.
- __ movsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
+ Result fp = allocator_->Allocate();
+ Result result = allocator_->Allocate();
+ ASSERT(fp.is_valid() && result.is_valid());
- JumpTarget done;
- Label failure, success;
- __ bind(&allocate_return);
- // Make a copy of the frame to enable us to handle allocation
- // failure after the JumpTarget jump.
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
- __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
- // Remove the two original values from the frame - we only need those
- // in the case where we branch to runtime.
- frame()->Drop(2);
- exponent.Unuse();
- base.Unuse();
- done.Jump(&answer);
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- // If we experience an allocation failure we branch to runtime.
- __ bind(&failure);
- call_runtime.Bind();
- answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
+ Label exit;
- done.Bind(&answer);
- frame()->Push(&answer);
+ // Get the number of formal parameters.
+ __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ movq(result.reg(),
+ Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ result.set_type_info(TypeInfo::Smi());
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(result.reg());
+ }
+ frame_->Push(&result);
}
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- Load(args->at(0));
+ JumpTarget leave, null, function, non_function_constructor;
+ Load(args->at(0)); // Load the object.
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ frame_->Spill(obj.reg());
- // Leave original value on the frame if we need to call runtime.
- frame()->Dup();
- Result result = frame()->Pop();
- result.ToRegister();
- frame()->Spill(result.reg());
- Label runtime;
- Label non_smi;
- Label load_done;
- JumpTarget end;
+ // If the object is a smi, we return null.
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ null.Branch(is_smi);
- __ JumpIfNotSmi(result.reg(), &non_smi);
- __ SmiToInteger32(result.reg(), result.reg());
- __ cvtlsi2sd(xmm0, result.reg());
- __ jmp(&load_done);
- __ bind(&non_smi);
- __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &runtime);
- __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
- __ bind(&load_done);
- __ sqrtsd(xmm0, xmm0);
- // A copy of the virtual frame to allow us to go to runtime after the
- // JumpTarget jump.
- Result scratch = allocator()->Allocate();
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
+ __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
+ null.Branch(below);
- __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
- frame()->Drop(1);
- scratch.Unuse();
- end.Jump(&result);
- // We only branch to runtime if we have an allocation error.
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&runtime);
- result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
+ function.Branch(equal);
- end.Bind(&result);
- frame()->Push(&result);
+ // Check if the constructor in the map is a function.
+ __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+ non_function_constructor.Branch(not_equal);
+
+ // The obj register now contains the constructor function. Grab the
+ // instance class name from there.
+ __ movq(obj.reg(),
+ FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ movq(obj.reg(),
+ FieldOperand(obj.reg(),
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ frame_->Push(&obj);
+ leave.Jump();
+
+ // Functions have class 'Function'.
+ function.Bind();
+ frame_->Push(Factory::function_class_symbol());
+ leave.Jump();
+
+ // Objects with a non-function constructor have class 'Object'.
+ non_function_constructor.Bind();
+ frame_->Push(Factory::Object_symbol());
+ leave.Jump();
+
+ // Non-JS objects have class null.
+ null.Bind();
+ frame_->Push(Factory::null_value());
+
+ // All done.
+ leave.Bind();
}
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- Load(args->at(0));
+ JumpTarget leave;
+ Load(args->at(0)); // Load the object.
+ frame_->Dup();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ ASSERT(object.is_valid());
+ // if (object->IsSmi()) return object.
+ Condition is_smi = masm_->CheckSmi(object.reg());
+ leave.Branch(is_smi);
+ // It is a heap object - get map.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ // if (!object->IsJSValue()) return object.
+ __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
+ leave.Branch(not_equal);
+ __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
+ object.Unuse();
+ frame_->SetElementAt(0, &temp);
+ leave.Bind();
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ JumpTarget leave;
+ Load(args->at(0)); // Load the object.
+ Load(args->at(1)); // Load the value.
Result value = frame_->Pop();
+ Result object = frame_->Pop();
value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- value.Unuse();
- destination()->Split(is_smi);
+ object.ToRegister();
+
+ // if (object->IsSmi()) return value.
+ Condition is_smi = masm_->CheckSmi(object.reg());
+ leave.Branch(is_smi, &value);
+
+ // It is a heap object - get its map.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ // if (!object->IsJSValue()) return value.
+ __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
+ leave.Branch(not_equal, &value);
+
+ // Store the value.
+ __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ Result duplicate_value = allocator_->Allocate();
+ ASSERT(duplicate_value.is_valid());
+ __ movq(duplicate_value.reg(), value.reg());
+ // The object register is also overwritten by the write barrier and
+ // possibly aliased in the frame.
+ frame_->Spill(object.reg());
+ __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
+ scratch.reg());
+ object.Unuse();
+ scratch.Unuse();
+ duplicate_value.Unuse();
+
+ // Leave.
+ leave.Bind(&value);
+ frame_->Push(&value);
}
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(Factory::undefined_value());
+void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in rdx and the formal
+ // parameter count in rax.
+ Load(args->at(0));
+ Result key = frame_->Pop();
+ // Explicitly create a constant result.
+ Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
+ // Call the shared stub to get to arguments[key].
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ Result result = frame_->CallStub(&stub, &key, &count);
+ frame_->Push(&result);
}
@@ -4664,11 +6011,8 @@ void CodeGenerator::GenerateRandomHeapNumber(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ movq(rbx, rax);
__ bind(&heapnumber_allocated);
@@ -4695,6 +6039,43 @@ void CodeGenerator::GenerateRandomHeapNumber(
}
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ SubStringStub stub;
+ Result answer = frame_->CallStub(&stub, 3);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ StringCompareStub stub;
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+}
+
+
void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 4);
@@ -5136,1723 +6517,1004 @@ void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
}
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
+// Generates the Math.pow method. Only handles special cases and
+// branches to the runtime system for everything else. Please note
+// that this function assumes that the callsite has executed ToNumber
+// on both arguments.
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
+ Label allocate_return;
+ // Load the two operands while leaving the values on the frame.
+ frame()->Dup();
+ Result exponent = frame()->Pop();
+ exponent.ToRegister();
+ frame()->Spill(exponent.reg());
+ frame()->PushElementAt(1);
+ Result base = frame()->Pop();
+ base.ToRegister();
+ frame()->Spill(base.reg());
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
+ Result answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ ASSERT(!exponent.reg().is(base.reg()));
+ JumpTarget call_runtime;
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
+ // Save 1 in xmm3 - we need this several times later on.
+ __ movl(answer.reg(), Immediate(1));
+ __ cvtlsi2sd(xmm3, answer.reg());
- SubStringStub stub;
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
-}
+ Label exponent_nonsmi;
+ Label base_nonsmi;
+ // If the exponent is a heap number go to that specific case.
+ __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
+ __ JumpIfNotSmi(base.reg(), &base_nonsmi);
+ // Optimized version when y is an integer.
+ Label powi;
+ __ SmiToInteger32(base.reg(), base.reg());
+ __ cvtlsi2sd(xmm0, base.reg());
+ __ jmp(&powi);
+ // exponent is smi and base is a heapnumber.
+ __ bind(&base_nonsmi);
+ __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
+ __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
- Load(args->at(0));
- Load(args->at(1));
+ // Optimized version of pow if y is an integer.
+ __ bind(&powi);
+ __ SmiToInteger32(exponent.reg(), exponent.reg());
- StringCompareStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
+ // Save exponent in base as we need to check if exponent is negative later.
+ // We know that base and exponent are in different registers.
+ __ movl(base.reg(), exponent.reg());
+ // Get absolute value of exponent.
+ Label no_neg;
+ __ cmpl(exponent.reg(), Immediate(0));
+ __ j(greater_equal, &no_neg);
+ __ negl(exponent.reg());
+ __ bind(&no_neg);
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
- Load(args->at(0)); // Load the object.
- Result obj = frame_->Pop();
- obj.ToRegister();
- frame_->Spill(obj.reg());
+ // Load xmm1 with 1.
+ __ movsd(xmm1, xmm3);
+ Label while_true;
+ Label no_multiply;
- // If the object is a smi, we return null.
- Condition is_smi = masm_->CheckSmi(obj.reg());
- null.Branch(is_smi);
+ __ bind(&while_true);
+ __ shrl(exponent.reg(), Immediate(1));
+ __ j(not_carry, &no_multiply);
+ __ mulsd(xmm1, xmm0);
+ __ bind(&no_multiply);
+ __ testl(exponent.reg(), exponent.reg());
+ __ mulsd(xmm0, xmm0);
+ __ j(not_zero, &while_true);
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
+ // x has the original value of y - if y is negative return 1/result.
+ __ testl(base.reg(), base.reg());
+ __ j(positive, &allocate_return);
+ // Special case if xmm1 has reached infinity.
+ __ movl(answer.reg(), Immediate(0x7FB00000));
+ __ movd(xmm0, answer.reg());
+ __ cvtss2sd(xmm0, xmm0);
+ __ ucomisd(xmm0, xmm1);
+ call_runtime.Branch(equal);
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
+ __ jmp(&allocate_return);
- __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(below);
+ // exponent (or both) is a heapnumber - no matter what we should now work
+ // on doubles.
+ __ bind(&exponent_nonsmi);
+ __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
+ __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
+ // Test if exponent is nan.
+ __ ucomisd(xmm1, xmm1);
+ call_runtime.Branch(parity_even);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
+ Label base_not_smi;
+ Label handle_special_cases;
+ __ JumpIfNotSmi(base.reg(), &base_not_smi);
+ __ SmiToInteger32(base.reg(), base.reg());
+ __ cvtlsi2sd(xmm0, base.reg());
+ __ jmp(&handle_special_cases);
+ __ bind(&base_not_smi);
+ __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
+ __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
+ __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
+ __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
+ // base is NaN or +/-Infinity
+ call_runtime.Branch(greater_equal);
+ __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
- // Check if the constructor in the map is a function.
- __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- non_function_constructor.Branch(not_equal);
+ // base is in xmm0 and exponent is in xmm1.
+ __ bind(&handle_special_cases);
+ Label not_minus_half;
+ // Test for -0.5.
+ // Load xmm2 with -0.5.
+ __ movl(answer.reg(), Immediate(0xBF000000));
+ __ movd(xmm2, answer.reg());
+ __ cvtss2sd(xmm2, xmm2);
+ // xmm2 now has -0.5.
+ __ ucomisd(xmm2, xmm1);
+ __ j(not_equal, &not_minus_half);
- // The obj register now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(obj.reg(),
- FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
- __ movq(obj.reg(),
- FieldOperand(obj.reg(),
- SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->Push(&obj);
- leave.Jump();
+ // Calculates reciprocal of square root.
+ // Note that 1/sqrt(x) = sqrt(1/x))
+ __ divsd(xmm3, xmm0);
+ __ movsd(xmm1, xmm3);
+ __ sqrtsd(xmm1, xmm1);
+ __ jmp(&allocate_return);
- // Functions have class 'Function'.
- function.Bind();
- frame_->Push(Factory::function_class_symbol());
- leave.Jump();
+ // Test for 0.5.
+ __ bind(&not_minus_half);
+ // Load xmm2 with 0.5.
+ // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+ __ addsd(xmm2, xmm3);
+ // xmm2 now has 0.5.
+ __ ucomisd(xmm2, xmm1);
+ call_runtime.Branch(not_equal);
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- frame_->Push(Factory::Object_symbol());
- leave.Jump();
+ // Calculates square root.
+ __ movsd(xmm1, xmm0);
+ __ sqrtsd(xmm1, xmm1);
- // Non-JS objects have class null.
- null.Bind();
- frame_->Push(Factory::null_value());
+ JumpTarget done;
+ Label failure, success;
+ __ bind(&allocate_return);
+ // Make a copy of the frame to enable us to handle allocation
+ // failure after the JumpTarget jump.
+ VirtualFrame* clone = new VirtualFrame(frame());
+ __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
+ __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
+ // Remove the two original values from the frame - we only need those
+ // in the case where we branch to runtime.
+ frame()->Drop(2);
+ exponent.Unuse();
+ base.Unuse();
+ done.Jump(&answer);
+ // Use the copy of the original frame as our current frame.
+ RegisterFile empty_regs;
+ SetFrame(clone, &empty_regs);
+ // If we experience an allocation failure we branch to runtime.
+ __ bind(&failure);
+ call_runtime.Bind();
+ answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
- // All done.
- leave.Bind();
+ done.Bind(&answer);
+ frame()->Push(&answer);
}
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Result value = frame_->Pop();
- Result object = frame_->Pop();
- value.ToRegister();
- object.ToRegister();
-
- // if (object->IsSmi()) return value.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi, &value);
-
- // It is a heap object - get its map.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- // if (!object->IsJSValue()) return value.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
- leave.Branch(not_equal, &value);
-
- // Store the value.
- __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- Result duplicate_value = allocator_->Allocate();
- ASSERT(duplicate_value.is_valid());
- __ movq(duplicate_value.reg(), value.reg());
- // The object register is also overwritten by the write barrier and
- // possibly aliased in the frame.
- frame_->Spill(object.reg());
- __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
- scratch.reg());
- object.Unuse();
- scratch.Unuse();
- duplicate_value.Unuse();
-
- // Leave.
- leave.Bind(&value);
- frame_->Push(&value);
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ Load(args->at(0));
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
}
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- frame_->Dup();
- Result object = frame_->Pop();
- object.ToRegister();
- ASSERT(object.is_valid());
- // if (object->IsSmi()) return object.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // if (!object->IsJSValue()) return object.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
- leave.Branch(not_equal);
- __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
- object.Unuse();
- frame_->SetElementAt(0, &temp);
- leave.Bind();
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ Load(args->at(0));
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
}
-// -----------------------------------------------------------------------------
-// CodeGenerator implementation of Expressions
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- // TODO(x64): No architecture specific code. Move to shared location.
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression);
- frame_->SpillAll();
- set_in_spilled_code(true);
-}
-
+// Generates the Math.sqrt method. Please note - this function assumes that
+// the callsite has executed ToNumber on the argument.
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- JumpTarget true_target;
- JumpTarget false_target;
- ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(expr, &dest, false);
+ // Leave original value on the frame if we need to call runtime.
+ frame()->Dup();
+ Result result = frame()->Pop();
+ result.ToRegister();
+ frame()->Spill(result.reg());
+ Label runtime;
+ Label non_smi;
+ Label load_done;
+ JumpTarget end;
- if (dest.false_was_fall_through()) {
- // The false target was just bound.
- JumpTarget loaded;
- frame_->Push(Factory::false_value());
- // There may be dangling jumps to the true target.
- if (true_target.is_linked()) {
- loaded.Jump();
- true_target.Bind();
- frame_->Push(Factory::true_value());
- loaded.Bind();
- }
+ __ JumpIfNotSmi(result.reg(), &non_smi);
+ __ SmiToInteger32(result.reg(), result.reg());
+ __ cvtlsi2sd(xmm0, result.reg());
+ __ jmp(&load_done);
+ __ bind(&non_smi);
+ __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &runtime);
+ __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
- } else if (dest.is_used()) {
- // There is true, and possibly false, control flow (with true as
- // the fall through).
- JumpTarget loaded;
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- false_target.Bind();
- frame_->Push(Factory::false_value());
- loaded.Bind();
- }
+ __ bind(&load_done);
+ __ sqrtsd(xmm0, xmm0);
+ // A copy of the virtual frame to allow us to go to runtime after the
+ // JumpTarget jump.
+ Result scratch = allocator()->Allocate();
+ VirtualFrame* clone = new VirtualFrame(frame());
+ __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
- } else {
- // We have a valid value on top of the frame, but we still may
- // have dangling jumps to the true and false targets from nested
- // subexpressions (eg, the left subexpressions of the
- // short-circuited boolean operators).
- ASSERT(has_valid_frame());
- if (true_target.is_linked() || false_target.is_linked()) {
- JumpTarget loaded;
- loaded.Jump(); // Don't lose the current TOS.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- }
- }
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->Push(Factory::false_value());
- }
- loaded.Bind();
- }
- }
+ __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
+ frame()->Drop(1);
+ scratch.Unuse();
+ end.Jump(&result);
+ // We only branch to runtime if we have an allocation error.
+ // Use the copy of the original frame as our current frame.
+ RegisterFile empty_regs;
+ SetFrame(clone, &empty_regs);
+ __ bind(&runtime);
+ result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
- ASSERT(has_valid_frame());
- ASSERT(frame_->height() == original_height + 1);
+ end.Bind(&result);
+ frame()->Push(&result);
}
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* x,
- ControlDestination* dest,
- bool force_control) {
- ASSERT(!in_spilled_code());
- int original_height = frame_->height();
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+ if (CheckForInlineRuntimeCall(node)) {
+ return;
+ }
- { CodeGenState new_state(this, dest);
- Visit(x);
+ ZoneList<Expression*>* args = node->arguments();
+ Comment cmnt(masm_, "[ CallRuntime");
+ Runtime::Function* function = node->function();
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- !dest->is_used() &&
- frame_->height() == original_height) {
- dest->Goto(true);
- }
+ if (function == NULL) {
+ // Push the builtins object found in the current global object.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(), GlobalObject());
+ __ movq(temp.reg(),
+ FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
+ frame_->Push(&temp);
}
- if (force_control && !dest->is_used()) {
- // Convert the TOS value into flow to the control destination.
- // TODO(X64): Make control flow to control destinations work.
- ToBoolean(dest);
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
}
- ASSERT(!(force_control && !dest->is_used()));
- ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
- Comment cmnt(masm_, "[ ToBoolean");
-
- // The value to convert should be popped from the frame.
- Result value = frame_->Pop();
- value.ToRegister();
-
- if (value.is_number()) {
- // Fast case if TypeInfo indicates only numbers.
- if (FLAG_debug_code) {
- __ AbortIfNotNumber(value.reg());
- }
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- if (value.is_smi()) {
- value.Unuse();
- dest->Split(not_zero);
- } else {
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
- value.Unuse();
- dest->Split(not_zero);
- }
+ if (function == NULL) {
+ // Call the JS runtime function.
+ frame_->Push(node->name());
+ Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting_);
+ frame_->RestoreContextRegister();
+ frame_->Push(&answer);
} else {
- // Fast case checks.
- // 'false' => false.
- __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
- dest->false_target()->Branch(equal);
-
- // 'true' => true.
- __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
- dest->true_target()->Branch(equal);
-
- // 'undefined' => false.
- __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
- dest->false_target()->Branch(equal);
-
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
-
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ testq(temp.reg(), temp.reg());
- temp.Unuse();
- dest->Split(not_equal);
+ // Call the C runtime function.
+ Result answer = frame_->CallRuntime(function, arg_count);
+ frame_->Push(&answer);
}
}
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
- UNIMPLEMENTED();
- // TODO(X64): Implement security policy for loads of smis.
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
- return false;
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+ Comment cmnt(masm_, "[ UnaryOperation");
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
+ Token::Value op = node->op();
+ if (op == Token::NOT) {
+ // Swap the true and false targets but keep the same actual label
+ // as the fall through.
+ destination()->Invert();
+ LoadCondition(node->expression(), destination(), true);
+ // Swap the labels back.
+ destination()->Invert();
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
+ } else if (op == Token::DELETE) {
+ Property* property = node->expression()->AsProperty();
+ if (property != NULL) {
+ Load(property->obj());
+ Load(property->key());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
+ }
+ Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+ if (variable != NULL) {
+ Slot* slot = variable->slot();
+ if (variable->is_global()) {
+ LoadGlobal();
+ frame_->Push(variable->name());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+ CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
-void CodeGenerator::LoadReference(Reference* ref) {
- // References are loaded from both spilled and unspilled code. Set the
- // state to unspilled to allow that (and explicitly spill after
- // construction at the construction sites).
- bool was_in_spilled_code = in_spilled_code_;
- in_spilled_code_ = false;
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // Call the runtime to look up the context holding the named
+ // variable. Sync the virtual frame eagerly so we can push the
+ // arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(variable->name());
+ Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
+ ASSERT(context.is_register());
+ frame_->EmitPush(context.reg());
+ context.Unuse();
+ frame_->EmitPush(variable->name());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+ CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
+ }
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
+ // Default: Result of deleting non-global, not dynamically
+ // introduced variables is false.
+ frame_->Push(Factory::false_value());
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
} else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
+ // Default: Result of deleting expressions is true.
+ Load(node->expression()); // may have side-effects
+ frame_->SetElementAt(0, Factory::true_value());
}
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- // If rax is free, the register allocator prefers it. Thus the code
- // generator will load the global object into rax, which is where
- // LoadIC wants it. Most uses of Reference call LoadIC directly
- // after the reference is created.
- frame_->Spill(rax);
- LoadGlobal();
- ref->set_type(Reference::NAMED);
+
+ } else if (op == Token::TYPEOF) {
+ // Special case for loading the typeof expression; see comment on
+ // LoadTypeofExpression().
+ LoadTypeofExpression(node->expression());
+ Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
+ frame_->Push(&answer);
+
+ } else if (op == Token::VOID) {
+ Expression* expression = node->expression();
+ if (expression && expression->AsLiteral() && (
+ expression->AsLiteral()->IsTrue() ||
+ expression->AsLiteral()->IsFalse() ||
+ expression->AsLiteral()->handle()->IsNumber() ||
+ expression->AsLiteral()->handle()->IsString() ||
+ expression->AsLiteral()->handle()->IsJSRegExp() ||
+ expression->AsLiteral()->IsNull())) {
+ // Omit evaluating the value of the primitive literal.
+ // It will be discarded anyway, and can have no side effect.
+ frame_->Push(Factory::undefined_value());
} else {
- ASSERT(var->slot() != NULL);
- ref->set_type(Reference::SLOT);
+ Load(node->expression());
+ frame_->SetElementAt(0, Factory::undefined_value());
}
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-
- in_spilled_code_ = was_in_spilled_code;
-}
+ } else {
+ bool can_overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ bool no_negative_zero = node->expression()->no_negative_zero();
+ Load(node->expression());
+ switch (op) {
+ case Token::NOT:
+ case Token::DELETE:
+ case Token::TYPEOF:
+ UNREACHABLE(); // handled above
+ break;
-void CodeGenerator::UnloadReference(Reference* ref) {
- // Pop a reference from the stack while preserving TOS.
- Comment cmnt(masm_, "[ UnloadReference");
- frame_->Nip(ref->size());
- ref->set_unloaded();
-}
+ case Token::SUB: {
+ GenericUnaryOpStub stub(
+ Token::SUB,
+ overwrite,
+ no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
+ Result operand = frame_->Pop();
+ Result answer = frame_->CallStub(&stub, &operand);
+ answer.set_type_info(TypeInfo::Number());
+ frame_->Push(&answer);
+ break;
+ }
+ case Token::BIT_NOT: {
+ // Smi check.
+ JumpTarget smi_label;
+ JumpTarget continue_label;
+ Result operand = frame_->Pop();
+ operand.ToRegister();
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ smi_label.Branch(is_smi, &operand);
- case Slot::LOCAL:
- return frame_->LocalAt(index);
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ Result answer = frame_->CallStub(&stub, &operand);
+ continue_label.Jump(&answer);
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(rsi)); // do not overwrite context register
- Register context = rsi;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
+ smi_label.Bind(&answer);
+ answer.ToRegister();
+ frame_->Spill(answer.reg());
+ __ SmiNot(answer.reg(), answer.reg());
+ continue_label.Bind(&answer);
+ answer.set_type_info(TypeInfo::Smi());
+ frame_->Push(&answer);
+ break;
}
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
- default:
- UNREACHABLE();
- return Operand(rsp, 0);
- }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- ASSERT(tmp.is_register());
- Register context = rsi;
+ case Token::ADD: {
+ // Smi check.
+ JumpTarget continue_label;
+ Result operand = frame_->Pop();
+ TypeInfo operand_info = operand.type_info();
+ operand.ToRegister();
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ continue_label.Branch(is_smi, &operand);
+ frame_->Push(&operand);
+ Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
+ CALL_FUNCTION, 1);
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
+ continue_label.Bind(&answer);
+ if (operand_info.IsSmi()) {
+ answer.set_type_info(TypeInfo::Smi());
+ } else if (operand_info.IsInteger32()) {
+ answer.set_type_info(TypeInfo::Integer32());
+ } else {
+ answer.set_type_info(TypeInfo::Number());
+ }
+ frame_->Push(&answer);
+ break;
}
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
+ default:
+ UNREACHABLE();
}
}
- // Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp.reg(), slot->index());
}
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- JumpTarget slow;
- JumpTarget done;
- Result value;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &value,
- &slow,
- &done);
-
- slow.Bind();
- // A runtime call is inevitable. We eagerly sync frame elements
- // to memory so that we can push the arguments directly into place
- // on top of the frame.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- if (typeof_state == INSIDE_TYPEOF) {
- value =
- frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind(&value);
- frame_->Push(&value);
+// The value in dst was optimistically incremented or decremented.
+// The result overflowed or was not smi tagged. Call into the runtime
+// to convert the argument to a number, and call the specialized add
+// or subtract stub. The result is left in dst.
+class DeferredPrefixCountOperation: public DeferredCode {
+ public:
+ DeferredPrefixCountOperation(Register dst,
+ bool is_increment,
+ TypeInfo input_type)
+ : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
+ set_comment("[ DeferredCountOperation");
+ }
- } else if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- //
- // We currently spill the virtual frame because constants use the
- // potentially unsafe direct-frame access of SlotOperand.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Load const");
- JumpTarget exit;
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
- exit.Bind();
- frame_->EmitPush(rcx);
+ virtual void Generate();
- } else if (slot->type() == Slot::PARAMETER) {
- frame_->PushParameterAt(slot->index());
+ private:
+ Register dst_;
+ bool is_increment_;
+ TypeInfo input_type_;
+};
- } else if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
+void DeferredPrefixCountOperation::Generate() {
+ Register left;
+ if (input_type_.IsNumber()) {
+ left = dst_;
} else {
- // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
- // here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because it will always be a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
- frame_->Push(&temp);
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ left = rax;
}
+
+ GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS,
+ TypeInfo::Number());
+ stub.GenerateCall(masm_, left, Smi::FromInt(1));
+
+ if (!dst_.is(rax)) __ movq(dst_, rax);
}
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- LoadFromSlot(slot, state);
+// The value in dst was optimistically incremented or decremented.
+// The result overflowed or was not smi tagged. Call into the runtime
+// to convert the argument to a number. Update the original value in
+// old. Call the specialized add or subtract stub. The result is
+// left in dst.
+class DeferredPostfixCountOperation: public DeferredCode {
+ public:
+ DeferredPostfixCountOperation(Register dst,
+ Register old,
+ bool is_increment,
+ TypeInfo input_type)
+ : dst_(dst),
+ old_(old),
+ is_increment_(is_increment),
+ input_type_(input_type) {
+ set_comment("[ DeferredCountOperation");
+ }
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+ virtual void Generate();
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+ private:
+ Register dst_;
+ Register old_;
+ bool is_increment_;
+ TypeInfo input_type_;
+};
- // Pop the loaded value from the stack.
- Result value = frame_->Pop();
- // If the loaded value is a constant, we know if the arguments
- // object has been lazily loaded yet.
- if (value.is_constant()) {
- if (value.handle()->IsTheHole()) {
- Result arguments = StoreArgumentsObject(false);
- frame_->Push(&arguments);
- } else {
- frame_->Push(&value);
- }
- return;
+void DeferredPostfixCountOperation::Generate() {
+ Register left;
+ if (input_type_.IsNumber()) {
+ __ push(dst_); // Save the input to use as the old value.
+ left = dst_;
+ } else {
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ push(rax); // Save the result of ToNumber to use as the old value.
+ left = rax;
}
- // The loaded value is in a register. If it is the sentinel that
- // indicates that we haven't loaded the arguments object yet, we
- // need to do it now.
- JumpTarget exit;
- __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
- frame_->Push(&value);
- exit.Branch(not_equal);
- Result arguments = StoreArgumentsObject(false);
- frame_->SetElementAt(0, &arguments);
- exit.Bind();
-}
+ GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS,
+ TypeInfo::Number());
+ stub.GenerateCall(masm_, left, Smi::FromInt(1));
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ pop(old_);
+}
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
- // For now, just do a runtime call. Since the call is inevitable,
- // we eagerly sync the virtual frame so we can directly push the
- // arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+ Comment cmnt(masm_, "[ CountOperation");
- frame_->EmitPush(rsi);
- frame_->EmitPush(slot->var()->name());
+ bool is_postfix = node->is_postfix();
+ bool is_increment = node->op() == Token::INC;
- Result value;
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling chained assignment
- // expressions.
- frame_->Push(&value);
- } else {
- ASSERT(!slot->var()->is_dynamic());
+ Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+ bool is_const = (var != NULL && var->mode() == Variable::CONST);
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- //
- // We spill the frame in the code below because the direct-frame
- // access of SlotOperand is potentially unsafe with an unspilled
- // frame.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Init const");
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- }
+ // Postfix operations need a stack slot under the reference to hold
+ // the old value while the new value is being stored. This is so that
+ // in the case that storing the new value requires a call, the old
+ // value will be in the frame to be spilled.
+ if (is_postfix) frame_->Push(Smi::FromInt(0));
- // We must execute the store. Storing a variable must keep the (new)
- // value on the stack. This is necessary for compiling assignment
- // expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this code.
- if (slot->type() == Slot::PARAMETER) {
- frame_->StoreToParameterAt(slot->index());
- } else if (slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(slot->index());
- } else {
- // The other slot types (LOOKUP and GLOBAL) cannot reach here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because the slot is a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- frame_->Dup();
- Result value = frame_->Pop();
- value.ToRegister();
- Result start = allocator_->Allocate();
- ASSERT(start.is_valid());
- __ movq(SlotOperand(slot, start.reg()), value.reg());
- // RecordWrite may destroy the value registers.
- //
- // TODO(204): Avoid actually spilling when the value is not
- // needed (probably the common case).
- frame_->Spill(value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
- // The results start, value, and temp are unused by going out of
- // scope.
+ // A constant reference is not saved to, so the reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
+ if (target.is_illegal()) {
+ // Spoof the virtual frame to have the expected height (one higher
+ // than on entry).
+ if (!is_postfix) frame_->Push(Smi::FromInt(0));
+ return;
}
+ target.TakeValue();
- exit.Bind();
- }
-}
-
+ Result new_value = frame_->Pop();
+ new_value.ToRegister();
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register context = rsi;
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid()); // All non-reserved registers were available.
+ Result old_value; // Only allocated in the postfix case.
+ if (is_postfix) {
+ // Allocate a temporary to preserve the old value.
+ old_value = allocator_->Allocate();
+ ASSERT(old_value.is_valid());
+ __ movq(old_value.reg(), new_value.reg());
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
+ // The return value for postfix operations is ToNumber(input).
+ // Keep more precise type info if the input is some kind of
+ // number already. If the input is not a number we have to wait
+ // for the deferred code to convert it.
+ if (new_value.type_info().IsNumber()) {
+ old_value.set_type_info(new_value.type_info());
}
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
}
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
+ // Ensure the new value is writable.
+ frame_->Spill(new_value.reg());
- if (s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(tmp.reg())) {
- __ movq(tmp.reg(), context);
+ DeferredCode* deferred = NULL;
+ if (is_postfix) {
+ deferred = new DeferredPostfixCountOperation(new_value.reg(),
+ old_value.reg(),
+ is_increment,
+ new_value.type_info());
+ } else {
+ deferred = new DeferredPrefixCountOperation(new_value.reg(),
+ is_increment,
+ new_value.type_info());
}
- // Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
- __ bind(&next);
- // Terminate at global context.
- __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal);
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
- tmp.Unuse();
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- LoadGlobal();
- frame_->Push(slot->var()->name());
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame_->CallLoadIC(mode);
- // A test rax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test rax
- // instruction here.
- masm_->nop();
- return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- done->Jump(result);
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- *result = allocator_->Allocate();
- ASSERT(result->is_valid());
- __ movq(result->reg(),
- ContextSlotOperandCheckExtensions(potential_slot,
- *result,
- slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
- done->Branch(not_equal, result);
- __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
- }
- done->Jump(result);
- } else if (rewrite != NULL) {
- // Generate fast case for argument loads.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- Result arguments = allocator()->Allocate();
- ASSERT(arguments.is_valid());
- __ movq(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
- arguments,
- slow));
- frame_->Push(&arguments);
- frame_->Push(key_literal->handle());
- *result = EmitKeyedLoad();
- done->Jump(result);
- }
- }
+ if (new_value.is_smi()) {
+ if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
+ } else {
+ __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
}
- }
-}
-
-
-void CodeGenerator::LoadGlobal() {
- if (in_spilled_code()) {
- frame_->EmitPush(GlobalObject());
- } else {
- Result temp = allocator_->Allocate();
- __ movq(temp.reg(), GlobalObject());
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
- Result temp = allocator_->Allocate();
- Register reg = temp.reg();
- __ movq(reg, GlobalObject());
- __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->Push(&temp);
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
- ASSERT(scope()->arguments_shadow() != NULL);
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0)
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the hole value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->Push(Factory::the_hole_value());
- } else {
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope()->num_parameters()));
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
- }
-
-
- Variable* arguments = scope()->arguments()->var();
- Variable* shadow = scope()->arguments_shadow()->var();
- ASSERT(arguments != NULL && arguments->slot() != NULL);
- ASSERT(shadow != NULL && shadow->slot() != NULL);
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
- Result probe = frame_->Pop();
- if (probe.is_constant()) {
- // We have to skip updating the arguments object if it has been
- // assigned a proper value.
- skip_arguments = !probe.handle()->IsTheHole();
+ if (is_increment) {
+ __ SmiAddConstant(new_value.reg(),
+ new_value.reg(),
+ Smi::FromInt(1),
+ deferred->entry_label());
} else {
- __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
- probe.Unuse();
- done.Branch(not_equal);
+ __ SmiSubConstant(new_value.reg(),
+ new_value.reg(),
+ Smi::FromInt(1),
+ deferred->entry_label());
}
- }
- if (!skip_arguments) {
- StoreToSlot(arguments->slot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- StoreToSlot(shadow->slot(), NOT_CONST_INIT);
- return frame_->Pop();
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->slot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
+ deferred->BindExit();
+ // Postfix count operations return their input converted to
+ // number. The case when the input is already a number is covered
+ // above in the allocation code for old_value.
+ if (is_postfix && !new_value.type_info().IsNumber()) {
+ old_value.set_type_info(TypeInfo::Number());
+ }
-static bool CouldBeNaN(const Result& result) {
- if (result.type_info().IsSmi()) return false;
- if (result.type_info().IsInteger32()) return false;
- if (!result.is_constant()) return true;
- if (!result.handle()->IsHeapNumber()) return false;
- return isnan(HeapNumber::cast(*result.handle())->value());
-}
+ new_value.set_type_info(TypeInfo::Number());
+ // Postfix: store the old value in the allocated slot under the
+ // reference.
+ if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
- switch (cc) {
- case less: return below;
- case equal: return equal;
- case less_equal: return below_equal;
- case greater: return above;
- case greater_equal: return above_equal;
- default: UNREACHABLE();
+ frame_->Push(&new_value);
+ // Non-constant: update the reference.
+ if (!is_const) target.SetValue(NOT_CONST_INIT);
}
- UNREACHABLE();
- return equal;
-}
-
-void CodeGenerator::Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* dest) {
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == equal);
-
- Result left_side;
- Result right_side;
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == greater || cc == less_equal) {
- cc = ReverseCondition(cc);
- left_side = frame_->Pop();
- right_side = frame_->Pop();
- } else {
- right_side = frame_->Pop();
- left_side = frame_->Pop();
- }
- ASSERT(cc == less || cc == equal || cc == greater_equal);
+ // Postfix: drop the new value and use the old.
+ if (is_postfix) frame_->Drop();
+}
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi = false;
- bool left_side_constant_null = false;
- bool left_side_constant_1_char_string = false;
- if (left_side.is_constant()) {
- left_side_constant_smi = left_side.handle()->IsSmi();
- left_side_constant_null = left_side.handle()->IsNull();
- left_side_constant_1_char_string =
- (left_side.handle()->IsString() &&
- String::cast(*left_side.handle())->length() == 1 &&
- String::cast(*left_side.handle())->IsAsciiRepresentation());
- }
- bool right_side_constant_smi = false;
- bool right_side_constant_null = false;
- bool right_side_constant_1_char_string = false;
- if (right_side.is_constant()) {
- right_side_constant_smi = right_side.handle()->IsSmi();
- right_side_constant_null = right_side.handle()->IsNull();
- right_side_constant_1_char_string =
- (right_side.handle()->IsString() &&
- String::cast(*right_side.handle())->length() == 1 &&
- String::cast(*right_side.handle())->IsAsciiRepresentation());
- }
- if (left_side_constant_smi || right_side_constant_smi) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side.handle())->value();
- int right_value = Smi::cast(*right_side.handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are Smis.
- left_side.ToRegister();
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
+void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
+ // According to ECMA-262 section 11.11, page 58, the binary logical
+ // operators must yield the result of one of the two expressions
+ // before any ToBoolean() conversions. This means that the value
+ // produced by a && or || operator is not necessarily a boolean.
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
+ // NOTE: If the left hand side produces a materialized value (not
+ // control flow), we force the right hand side to do the same. This
+ // is necessary because we assume that if we get control flow on the
+ // last path out of an expression we got it on all paths.
+ if (node->op() == Token::AND) {
+ JumpTarget is_true;
+ ControlDestination dest(&is_true, destination()->false_target(), true);
+ LoadCondition(node->left(), &dest, false);
- if (left_side.is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_side.reg());
+ if (dest.false_was_fall_through()) {
+ // The current false target was used as the fall-through. If
+ // there are no dangling jumps to is_true then the left
+ // subexpression was unconditionally false. Otherwise we have
+ // paths where we do have to evaluate the right subexpression.
+ if (is_true.is_linked()) {
+ // We need to compile the right subexpression. If the jump to
+ // the current false target was a forward jump then we have a
+ // valid frame, we have just bound the false target, and we
+ // have to jump around the code for the right subexpression.
+ if (has_valid_frame()) {
+ destination()->false_target()->Unuse();
+ destination()->false_target()->Jump();
}
+ is_true.Bind();
+ // The left subexpression compiled to control flow, so the
+ // right one is free to do so as well.
+ LoadCondition(node->right(), destination(), false);
} else {
- Condition left_is_smi = masm_->CheckSmi(left_side.reg());
- is_smi.Branch(left_is_smi);
-
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- if (!is_loop_condition && right_val->IsSmi()) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- JumpTarget not_number;
- __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
- not_number.Branch(not_equal, &left_side);
- __ movsd(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = Smi::cast(*right_val)->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ movl(temp.reg(), Immediate(value));
- __ cvtlsi2sd(xmm0, temp.reg());
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, &left_side);
- left_side.Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(&left_side);
- }
-
- // Setup and call the compare stub.
- CompareStub stub(cc, strict, kCantBothBeNaN);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_smi.Bind();
+ // We have actually just jumped to or bound the current false
+ // target but the current control destination is not marked as
+ // used.
+ destination()->Use(false);
}
- left_side = Result(left_reg);
- right_side = Result(right_val);
- // Test smi equality and comparison by signed int comparison.
- // Both sides are smis, so we can use an Immediate.
- __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else if (cc == equal &&
- (left_side_constant_null || right_side_constant_null)) {
- // To make null checks efficient, we check if either the left side or
- // the right side is the constant 'null'.
- // If so, we optimize the code by inlining a null check instead of
- // calling the (very) general runtime routine for checking equality.
- Result operand = left_side_constant_null ? right_side : left_side;
- right_side.Unuse();
- left_side.Unuse();
- operand.ToRegister();
- __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
- if (strict) {
- operand.Unuse();
- dest->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- dest->true_target()->Branch(equal);
- __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
- dest->true_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(operand.reg());
- dest->false_target()->Branch(is_smi);
+ } else if (dest.is_used()) {
+ // The left subexpression compiled to control flow (and is_true
+ // was just bound), so the right is free to do so as well.
+ LoadCondition(node->right(), destination(), false);
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- temp.Unuse();
- operand.Unuse();
- dest->Split(not_zero);
- }
- } else if (left_side_constant_1_char_string ||
- right_side_constant_1_char_string) {
- if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
- // Trivial case, comparing two constants.
- int left_value = String::cast(*left_side.handle())->Get(0);
- int right_value = String::cast(*right_side.handle())->Get(0);
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
} else {
- // Only one side is a constant 1 character string.
- // If left side is a constant 1-character string, reverse the operands.
- // Since one side is a constant string, conversion order does not matter.
- if (left_side_constant_1_char_string) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may reintroduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant string, inlining the case
- // where both sides are strings.
- left_side.ToRegister();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_not_string, is_string;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
- ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
- Condition is_smi = masm()->CheckSmi(left_reg);
- is_not_string.Branch(is_smi, &left_side);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(left_reg, HeapObject::kMapOffset));
- __ movzxbl(temp.reg(),
- FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
- // If we are testing for equality then make use of the symbol shortcut.
- // Check if the left hand side has the same type as the right hand
- // side (which is always a symbol).
- if (cc == equal) {
- Label not_a_symbol;
- ASSERT(kSymbolTag != 0);
- // Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
- __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
- __ j(zero, &not_a_symbol);
- // They are symbols, so do identity compare.
- __ Cmp(left_reg, right_side.handle());
- dest->true_target()->Branch(equal);
- dest->false_target()->Branch(not_equal);
- __ bind(&not_a_symbol);
- }
- // Call the compare stub if the left side is not a flat ascii string.
- __ andb(temp.reg(),
- Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kStringEncodingMask));
- __ cmpb(temp.reg(),
- Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- temp.Unuse();
- is_string.Branch(equal, &left_side);
+ // We have a materialized value on the frame, so we exit with
+ // one on all paths. There are possibly also jumps to is_true
+ // from nested subexpressions.
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
- // Setup and call the compare stub.
- is_not_string.Bind(&left_side);
- CompareStub stub(cc, strict, kCantBothBeNaN);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
+ // Avoid popping the result if it converts to 'false' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ //
+ // Duplicate the TOS value. The duplicate will be popped by
+ // ToBoolean.
+ frame_->Dup();
+ ControlDestination dest(&pop_and_continue, &exit, true);
+ ToBoolean(&dest);
- is_string.Bind(&left_side);
- // left_side is a sequential ASCII string.
- ASSERT(left_side.reg().is(left_reg));
- right_side = Result(right_val);
- Result temp2 = allocator_->Allocate();
- ASSERT(temp2.is_valid());
- // Test string equality and comparison.
- if (cc == equal) {
- Label comparison_done;
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ j(not_equal, &comparison_done);
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- Immediate(char_value));
- __ bind(&comparison_done);
- } else {
- __ movq(temp2.reg(),
- FieldOperand(left_side.reg(), String::kLengthOffset));
- __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
- Label comparison;
- // If the length is 0 then the subtraction gave -1 which compares less
- // than any character.
- __ j(negative, &comparison);
- // Otherwise load the first character.
- __ movzxbl(temp2.reg(),
- FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
- __ bind(&comparison);
- // Compare the first character of the string with the
- // constant 1-character string.
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
- __ cmpb(temp2.reg(), Immediate(char_value));
- Label characters_were_different;
- __ j(not_equal, &characters_were_different);
- // If the first character is the same then the long string sorts after
- // the short one.
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ bind(&characters_were_different);
- }
- temp2.Unuse();
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else {
- // Neither side is a constant Smi, constant 1-char string, or constant null.
- // If either side is a non-smi constant, skip the smi check.
- bool known_non_smi =
- (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
- (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
- left_side.type_info().IsDouble() ||
- right_side.type_info().IsDouble();
+ // Pop the result of evaluating the first part.
+ frame_->Drop();
- NaNInformation nan_info =
- (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
- kBothCouldBeNaN :
- kCantBothBeNaN;
+ // Compile right side expression.
+ is_true.Bind();
+ Load(node->right());
- // Inline number comparison handling any combination of smi's and heap
- // numbers if:
- // code is in a loop
- // the compare operation is different from equal
- // compare is not a for-loop comparison
- // The reason for excluding equal is that it will most likely be done
- // with smi's (not heap numbers) and the code to comparing smi's is inlined
- // separately. The same reason applies for for-loop comparison which will
- // also most likely be smi comparisons.
- bool is_loop_condition = (node->AsExpression() != NULL)
- && node->AsExpression()->is_loop_condition();
- bool inline_number_compare =
- loop_nesting() > 0 && cc != equal && !is_loop_condition;
+ // Exit (always with a materialized value).
+ exit.Bind();
+ }
- left_side.ToRegister();
- right_side.ToRegister();
+ } else {
+ ASSERT(node->op() == Token::OR);
+ JumpTarget is_false;
+ ControlDestination dest(destination()->true_target(), &is_false, false);
+ LoadCondition(node->left(), &dest, false);
- if (known_non_smi) {
- // Inlined equality check:
- // If at least one of the objects is not NaN, then if the objects
- // are identical, they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
+ if (dest.true_was_fall_through()) {
+ // The current true target was used as the fall-through. If
+ // there are no dangling jumps to is_false then the left
+ // subexpression was unconditionally true. Otherwise we have
+ // paths where we do have to evaluate the right subexpression.
+ if (is_false.is_linked()) {
+ // We need to compile the right subexpression. If the jump to
+ // the current true target was a forward jump then we have a
+ // valid frame, we have just bound the true target, and we
+ // have to jump around the code for the right subexpression.
+ if (has_valid_frame()) {
+ destination()->true_target()->Unuse();
+ destination()->true_target()->Jump();
+ }
+ is_false.Bind();
+ // The left subexpression compiled to control flow, so the
+ // right one is free to do so as well.
+ LoadCondition(node->right(), destination(), false);
+ } else {
+ // We have just jumped to or bound the current true target but
+ // the current control destination is not marked as used.
+ destination()->Use(true);
}
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
+ } else if (dest.is_used()) {
+ // The left subexpression compiled to control flow (and is_false
+ // was just bound), so the right is free to do so as well.
+ LoadCondition(node->right(), destination(), false);
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
- answer.Unuse();
- dest->Split(cc);
} else {
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Register right_reg = right_side.reg();
-
- Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
- is_smi.Branch(both_smi);
+ // We have a materialized value on the frame, so we exit with
+ // one on all paths. There are possibly also jumps to is_false
+ // from nested subexpressions.
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
+ // Avoid popping the result if it converts to 'true' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ //
+ // Duplicate the TOS value. The duplicate will be popped by
+ // ToBoolean.
+ frame_->Dup();
+ ControlDestination dest(&exit, &pop_and_continue, false);
+ ToBoolean(&dest);
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
+ // Pop the result of evaluating the first part.
+ frame_->Drop();
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
- answer.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
+ // Compile right side expression.
+ is_false.Bind();
+ Load(node->right());
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ SmiCompare(left_side.reg(), right_side.reg());
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
+ // Exit (always with a materialized value).
+ exit.Bind();
}
}
}
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ Comment cmnt(masm_, "[ BinaryOperation");
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
- Result* operand,
- XMMRegister xmm_reg,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
+ if (node->op() == Token::AND || node->op() == Token::OR) {
+ GenerateLogicalBooleanOperation(node);
} else {
- // Operand type not known, check for smi or heap number.
- Label smi;
- __ JumpIfSmi(operand->reg(), &smi);
- if (!operand->type_info().IsNumber()) {
- __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- kScratchRegister);
- not_numbers->Branch(not_equal, left_side, right_side, taken);
+ // NOTE: The code below assumes that the slow cases (calls to runtime)
+ // never return a constant/immutable object.
+ OverwriteMode overwrite_mode = NO_OVERWRITE;
+ if (node->left()->AsBinaryOperation() != NULL &&
+ node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ overwrite_mode = OVERWRITE_LEFT;
+ } else if (node->right()->AsBinaryOperation() != NULL &&
+ node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ overwrite_mode = OVERWRITE_RIGHT;
}
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&smi);
- // Comvert smi to float and keep the original smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- __ jmp(&done);
+ if (node->left()->IsTrivial()) {
+ Load(node->right());
+ Result right = frame_->Pop();
+ frame_->Push(node->left());
+ frame_->Push(&right);
+ } else {
+ Load(node->left());
+ Load(node->right());
+ }
+ GenericBinaryOperation(node, overwrite_mode);
}
- __ bind(&done);
}
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest) {
- ASSERT(left_side->is_register());
- ASSERT(right_side->is_register());
-
- JumpTarget not_numbers;
- // Load left and right operand into registers xmm0 and xmm1 and compare.
- LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
- &not_numbers);
- LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
- &not_numbers);
- __ ucomisd(xmm0, xmm1);
- // Bail out if a NaN is involved.
- not_numbers.Branch(parity_even, left_side, right_side);
-
- // Split to destination targets based on comparison.
- left_side->Unuse();
- right_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
-
- not_numbers.Bind(left_side, right_side);
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+ frame_->PushFunction();
}
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
- DeferredInlineBinaryOperation(Token::Value op,
- Register dst,
- Register left,
- Register right,
- OverwriteMode mode)
- : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
- set_comment("[ DeferredInlineBinaryOperation");
- }
-
- virtual void Generate();
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+ Comment cmnt(masm_, "[ CompareOperation");
- private:
- Token::Value op_;
- Register dst_;
- Register left_;
- Register right_;
- OverwriteMode mode_;
-};
+ // Get the expressions from the node.
+ Expression* left = node->left();
+ Expression* right = node->right();
+ Token::Value op = node->op();
+ // To make typeof testing for natives implemented in JavaScript really
+ // efficient, we generate special code for expressions of the form:
+ // 'typeof <expression> == <string>'.
+ UnaryOperation* operation = left->AsUnaryOperation();
+ if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+ (operation != NULL && operation->op() == Token::TYPEOF) &&
+ (right->AsLiteral() != NULL &&
+ right->AsLiteral()->handle()->IsString())) {
+ Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
+ // Load the operand and move it to a register.
+ LoadTypeofExpression(operation->expression());
+ Result answer = frame_->Pop();
+ answer.ToRegister();
-void DeferredInlineBinaryOperation::Generate() {
- Label done;
- if ((op_ == Token::ADD)
- || (op_ == Token::SUB)
- || (op_ == Token::MUL)
- || (op_ == Token::DIV)) {
- Label call_runtime;
- Label left_smi, right_smi, load_right, do_op;
- __ JumpIfSmi(left_, &left_smi);
- __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ movq(dst_, left_);
- }
- __ jmp(&load_right);
+ if (check->Equals(Heap::number_symbol())) {
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->true_target()->Branch(is_smi);
+ frame_->Spill(answer.reg());
+ __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
+ answer.Unuse();
+ destination()->Split(equal);
- __ bind(&left_smi);
- __ SmiToInteger32(left_, left_);
- __ cvtlsi2sd(xmm0, left_);
- __ Integer32ToSmi(left_, left_);
- if (mode_ == OVERWRITE_LEFT) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
+ } else if (check->Equals(Heap::string_symbol())) {
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
- __ bind(&load_right);
- __ JumpIfSmi(right_, &right_smi);
- __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ movq(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
- __ jmp(&do_op);
+ // It can be an undetectable string object.
+ __ movq(kScratchRegister,
+ FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
+ answer.Unuse();
+ destination()->Split(below); // Unsigned byte comparison needed.
- __ bind(&right_smi);
- __ SmiToInteger32(right_, right_);
- __ cvtlsi2sd(xmm1, right_);
- __ Integer32ToSmi(right_, right_);
- if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
+ destination()->true_target()->Branch(equal);
+ __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
+ answer.Unuse();
+ destination()->Split(equal);
- __ bind(&do_op);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- __ jmp(&done);
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
+ destination()->true_target()->Branch(equal);
- __ bind(&call_runtime);
- }
- GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, left_, right_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ bind(&done);
-}
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
+ // It can be an undetectable object.
+ __ movq(kScratchRegister,
+ FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ answer.Unuse();
+ destination()->Split(not_zero);
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
- Token::Value op,
- const Result& right,
- const Result& left) {
- // Set TypeInfo of result according to the operation performed.
- // We rely on the fact that smis have a 32 bit payload on x64.
- STATIC_ASSERT(kSmiValueSize == 32);
- switch (op) {
- case Token::COMMA:
- return right.type_info();
- case Token::OR:
- case Token::AND:
- // Result type can be either of the two input types.
- return operands_type;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SAR:
- case Token::SHL:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SHR:
- // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
- return (right.is_constant() && right.handle()->IsSmi()
- && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
- ? TypeInfo::Smi()
- : TypeInfo::Number();
- case Token::ADD:
- if (operands_type.IsNumber()) {
- return TypeInfo::Number();
- } else if (left.type_info().IsString() || right.type_info().IsString()) {
- return TypeInfo::String();
- } else {
- return TypeInfo::Unknown();
- }
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Result is always a number.
- return TypeInfo::Number();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
+ } else if (check->Equals(Heap::function_symbol())) {
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
+ frame_->Spill(answer.reg());
+ __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+ destination()->true_target()->Branch(equal);
+ // Regular expressions are callable so typeof == 'function'.
+ __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
+ answer.Unuse();
+ destination()->Split(equal);
+ } else if (check->Equals(Heap::object_symbol())) {
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
+ destination()->true_target()->Branch(equal);
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Comment cmnt_token(masm_, Token::String(op));
+ // Regular expressions are typeof == 'function', not 'object'.
+ __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
+ destination()->false_target()->Branch(equal);
- if (op == Token::COMMA) {
- // Simply discard left value.
- frame_->Nip(1);
+ // It can be an undetectable object.
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
+ destination()->false_target()->Branch(below);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ answer.Unuse();
+ destination()->Split(below_equal);
+ } else {
+ // Uncommon case: typeof testing against a string literal that is
+ // never returned from the typeof operator.
+ answer.Unuse();
+ destination()->Goto(false);
+ }
return;
}
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- if (op == Token::ADD) {
- const bool left_is_string = left.type_info().IsString();
- const bool right_is_string = right.type_info().IsString();
- // Make sure constant strings have string type info.
- ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
- left_is_string);
- ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
- right_is_string);
- if (left_is_string || right_is_string) {
- frame_->Push(&left);
- frame_->Push(&right);
- Result answer;
- if (left_is_string) {
- if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- } else {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
- }
- } else if (right_is_string) {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
- }
- answer.set_type_info(TypeInfo::String());
- frame_->Push(&answer);
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (op) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = equal;
+ break;
+ case Token::LT:
+ cc = less;
+ break;
+ case Token::GT:
+ cc = greater;
+ break;
+ case Token::LTE:
+ cc = less_equal;
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ break;
+ case Token::IN: {
+ Load(left);
+ Load(right);
+ Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
+ frame_->Push(&answer); // push the result
return;
}
- // Neither operand is known to be a string.
+ case Token::INSTANCEOF: {
+ Load(left);
+ Load(right);
+ InstanceofStub stub;
+ Result answer = frame_->CallStub(&stub, 2);
+ answer.ToRegister();
+ __ testq(answer.reg(), answer.reg());
+ answer.Unuse();
+ destination()->Split(zero);
+ return;
+ }
+ default:
+ UNREACHABLE();
}
- bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi_constant =
- right.is_constant() && !right.handle()->IsSmi();
-
- if (left_is_smi_constant && right_is_smi_constant) {
- // Compute the constant result at compile time, and leave it on the frame.
- int left_int = Smi::cast(*left.handle())->value();
- int right_int = Smi::cast(*right.handle())->value();
- if (FoldConstantSmis(op, left_int, right_int)) return;
+ if (left->IsTrivial()) {
+ Load(right);
+ Result right_result = frame_->Pop();
+ frame_->Push(left);
+ frame_->Push(&right_result);
+ } else {
+ Load(left);
+ Load(right);
}
- // Get number type of left and right sub-expressions.
- TypeInfo operands_type =
- TypeInfo::Combine(left.type_info(), right.type_info());
+ Comparison(node, cc, strict, destination());
+}
- TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
- Result answer;
- if (left_is_non_smi_constant || right_is_non_smi_constant) {
- // Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_SMI_CODE_IN_STUB,
- operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
- } else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
- false, overwrite_mode);
- } else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
- true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 &&
- (Token::IsBitOp(op) ||
- operands_type.IsInteger32() ||
- expr->type()->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
- } else {
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_GENERIC_BINARY_FLAGS,
- operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
- }
- }
-
- answer.set_type_info(result_type);
- frame_->Push(&answer);
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() {
+ return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
+ && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
+ && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
+ && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
+ && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
+ && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
+ && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
+ && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
+ && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
+ && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
}
+#endif
+
// Emit a LoadIC call to get the value from receiver and leave it in
@@ -6901,623 +7563,155 @@ void DeferredReferenceGetNamedValue::Generate() {
}
-void DeferredInlineSmiAdd::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-void DeferredInlineSmiAddReversed::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, value_, dst_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-void DeferredInlineSmiSub::Generate() {
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-void DeferredInlineSmiOperation::Generate() {
- // For mod we don't generate all the Smi code inline.
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, src_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, value_, src_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode) {
- // Generate inline code for a binary operation when one of the
- // operands is a constant smi. Consumes the argument "operand".
- if (IsUnsafeSmi(value)) {
- Result unsafe_operand(value);
- if (reversed) {
- return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
- overwrite_mode);
- } else {
- return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
- overwrite_mode);
- }
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+ explicit DeferredReferenceGetKeyedValue(Register dst,
+ Register receiver,
+ Register key)
+ : dst_(dst), receiver_(receiver), key_(key) {
+ set_comment("[ DeferredReferenceGetKeyedValue");
}
- // Get the literal value.
- Smi* smi_value = Smi::cast(*value);
- int int_value = smi_value->value();
-
- Token::Value op = expr->op();
- Result answer;
- switch (op) {
- case Token::ADD: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred = NULL;
- if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(operand->reg(),
- smi_value,
- overwrite_mode);
- } else {
- deferred = new DeferredInlineSmiAdd(operand->reg(),
- smi_value,
- overwrite_mode);
- }
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiAddConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::SUB: {
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- // A smi currently fits in a 32-bit Immediate.
- __ SmiSubConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- answer = *operand;
- }
- break;
- }
-
- case Token::SAR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftArithmeticRightConstant(operand->reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- answer = *operand;
- }
- break;
-
- case Token::SHR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLogicalRightConstant(answer.reg(),
- operand->reg(),
- shift_value,
- deferred->entry_label());
- deferred->BindExit();
- operand->Unuse();
- }
- break;
-
- case Token::SHL:
- if (reversed) {
- operand->ToRegister();
-
- // We need rcx to be available to hold operand, and to be spilled.
- // SmiShiftLeft implicitly modifies rcx.
- if (operand->reg().is(rcx)) {
- frame_->Spill(operand->reg());
- answer = allocator()->Allocate();
- } else {
- Result rcx_reg = allocator()->Allocate(rcx);
- // answer must not be rcx.
- answer = allocator()->Allocate();
- // rcx_reg goes out of scope.
- }
-
- DeferredInlineSmiOperationReversed* deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- operand->reg(),
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
-
- __ Move(answer.reg(), smi_value);
- __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
- operand->Unuse();
+ virtual void Generate();
- deferred->BindExit();
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- if (shift_value == 0) {
- // Spill operand so it can be overwritten in the slow case.
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLeftConstant(answer.reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- operand->Unuse();
- }
- }
- break;
+ Label* patch_site() { return &patch_site_; }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- if (reversed) {
- // Bit operations with a constant smi are commutative.
- // We can swap left and right operands with no problem.
- // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
- overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
- }
- DeferredCode* deferred = new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- if (op == Token::BIT_AND) {
- __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
- } else if (op == Token::BIT_XOR) {
- if (int_value != 0) {
- __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
- }
- } else {
- ASSERT(op == Token::BIT_OR);
- if (int_value != 0) {
- __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
- }
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
+ private:
+ Label patch_site_;
+ Register dst_;
+ Register receiver_;
+ Register key_;
+};
- // Generate inline code for mod of powers of 2 and negative powers of 2.
- case Token::MOD:
- if (!reversed &&
- int_value != 0 &&
- (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- // Check for negative or non-Smi left hand side.
- __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
- if (int_value < 0) int_value = -int_value;
- if (int_value == 1) {
- __ Move(operand->reg(), Smi::FromInt(0));
- } else {
- __ SmiAndConstant(operand->reg(),
- operand->reg(),
- Smi::FromInt(int_value - 1));
- }
- deferred->BindExit();
- answer = *operand;
- break; // This break only applies if we generated code for MOD.
- }
- // Fall through if we did not find a power of 2 on the right hand side!
- // The next case must be the default.
- default: {
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- break;
+void DeferredReferenceGetKeyedValue::Generate() {
+ if (receiver_.is(rdx)) {
+ if (!key_.is(rax)) {
+ __ movq(rax, key_);
+ } // else do nothing.
+ } else if (receiver_.is(rax)) {
+ if (key_.is(rdx)) {
+ __ xchg(rax, rdx);
+ } else if (key_.is(rax)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rdx, receiver_);
+ __ movq(rax, key_);
}
+ } else if (key_.is(rax)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rax, key_);
+ __ movq(rdx, receiver_);
}
- ASSERT(answer.is_valid());
- return answer;
-}
-
+ // Calculate the delta from the IC call instruction to the map check
+ // movq instruction in the inlined version. This delta is stored in
+ // a test(rax, delta) instruction after the call so that we can find
+ // it in the IC initialization code and patch the movq instruction.
+ // This means that we cannot allow test instructions after calls to
+ // KeyedLoadIC stubs in other places.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The delta from the start of the map-compare instruction to the
+ // test instruction. We use masm_-> directly here instead of the __
+ // macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ // TODO(X64): Consider whether it's worth switching the test to a
+ // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
+ // be generated normally.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
-void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
- TypeInfo type,
- DeferredCode* deferred) {
- if (!type.IsSmi()) {
- __ JumpIfNotSmi(reg, deferred->entry_label());
- }
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(reg);
- }
+ if (!dst_.is(rax)) __ movq(dst_, rax);
}
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred) {
- if (!left_info.IsSmi() && !right_info.IsSmi()) {
- __ JumpIfNotBothSmi(left, right, deferred->entry_label());
- } else if (!left_info.IsSmi()) {
- __ JumpIfNotSmi(left, deferred->entry_label());
- } else if (!right_info.IsSmi()) {
- __ JumpIfNotSmi(right, deferred->entry_label());
- }
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+ DeferredReferenceSetKeyedValue(Register value,
+ Register key,
+ Register receiver)
+ : value_(value), key_(key), receiver_(receiver) {
+ set_comment("[ DeferredReferenceSetKeyedValue");
}
-}
+ virtual void Generate();
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- // Copy the type info because left and right may be overwritten.
- TypeInfo left_type_info = left->type_info();
- TypeInfo right_type_info = right->type_info();
- Token::Value op = expr->op();
- Result answer;
- // Special handling of div and mod because they use fixed registers.
- if (op == Token::DIV || op == Token::MOD) {
- // We need rax as the quotient register, rdx as the remainder
- // register, neither left nor right in rax or rdx, and left copied
- // to rax.
- Result quotient;
- Result remainder;
- bool left_is_in_rax = false;
- // Step 1: get rax for quotient.
- if ((left->is_register() && left->reg().is(rax)) ||
- (right->is_register() && right->reg().is(rax))) {
- // One or both is in rax. Use a fresh non-rdx register for
- // them.
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (fresh.reg().is(rdx)) {
- remainder = fresh;
- fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- }
- if (left->is_register() && left->reg().is(rax)) {
- quotient = *left;
- *left = fresh;
- left_is_in_rax = true;
- }
- if (right->is_register() && right->reg().is(rax)) {
- quotient = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rax);
- } else {
- // Neither left nor right is in rax.
- quotient = allocator_->Allocate(rax);
- }
- ASSERT(quotient.is_register() && quotient.reg().is(rax));
- ASSERT(!(left->is_register() && left->reg().is(rax)));
- ASSERT(!(right->is_register() && right->reg().is(rax)));
-
- // Step 2: get rdx for remainder if necessary.
- if (!remainder.is_valid()) {
- if ((left->is_register() && left->reg().is(rdx)) ||
- (right->is_register() && right->reg().is(rdx))) {
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (left->is_register() && left->reg().is(rdx)) {
- remainder = *left;
- *left = fresh;
- }
- if (right->is_register() && right->reg().is(rdx)) {
- remainder = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rdx);
- } else {
- // Neither left nor right is in rdx.
- remainder = allocator_->Allocate(rdx);
- }
- }
- ASSERT(remainder.is_register() && remainder.reg().is(rdx));
- ASSERT(!(left->is_register() && left->reg().is(rdx)));
- ASSERT(!(right->is_register() && right->reg().is(rdx)));
-
- left->ToRegister();
- right->ToRegister();
- frame_->Spill(rax);
- frame_->Spill(rdx);
-
- // Check that left and right are smi tagged.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- (op == Token::DIV) ? rax : rdx,
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
-
- if (op == Token::DIV) {
- __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = quotient;
- } else {
- ASSERT(op == Token::MOD);
- __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = remainder;
- }
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Special handling of shift operations because they use fixed
- // registers.
- if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
- // Move left out of rcx if necessary.
- if (left->is_register() && left->reg().is(rcx)) {
- *left = allocator_->Allocate();
- ASSERT(left->is_valid());
- __ movq(left->reg(), rcx);
- }
- right->ToRegister(rcx);
- left->ToRegister();
- ASSERT(left->is_register() && !left->reg().is(rcx));
- ASSERT(right->is_register() && right->reg().is(rcx));
+ Label* patch_site() { return &patch_site_; }
- // We will modify right, it must be spilled.
- frame_->Spill(rcx);
+ private:
+ Register value_;
+ Register key_;
+ Register receiver_;
+ Label patch_site_;
+};
- // Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
- // Check that both operands are smis using the answer register as a
- // temporary.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- rcx,
- overwrite_mode);
- Label do_op;
- if (right_type_info.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(right->reg());
- }
- __ movq(answer.reg(), left->reg());
- // If left is not known to be a smi, check if it is.
- // If left is not known to be a number, and it isn't a smi, check if
- // it is a HeapNumber.
- if (!left_type_info.IsSmi()) {
- __ JumpIfSmi(answer.reg(), &do_op);
- if (!left_type_info.IsNumber()) {
- // Branch if not a heapnumber.
- __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
- deferred->Branch(not_equal);
- }
- // Load integer value into answer register using truncation.
- __ cvttsd2si(answer.reg(),
- FieldOperand(answer.reg(), HeapNumber::kValueOffset));
- // Branch if we might have overflowed.
- // (False negative for Smi::kMinValue)
- __ cmpq(answer.reg(), Immediate(0x80000000));
- deferred->Branch(equal);
- // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
- __ Integer32ToSmi(answer.reg(), answer.reg());
- } else {
- // Fast case - both are actually smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left->reg());
- }
- }
+void DeferredReferenceSetKeyedValue::Generate() {
+ __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+ // Move value, receiver, and key to registers rax, rdx, and rcx, as
+ // the IC stub expects.
+ // Move value to rax, using xchg if the receiver or key is in rax.
+ if (!value_.is(rax)) {
+ if (!receiver_.is(rax) && !key_.is(rax)) {
+ __ movq(rax, value_);
} else {
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
- left_type_info, right_type_info, deferred);
- }
- __ bind(&do_op);
-
- // Perform the operation.
- switch (op) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
- break;
- case Token::SHR: {
- __ SmiShiftLogicalRight(answer.reg(),
- left->reg(),
- rcx,
- deferred->entry_label());
- break;
+ __ xchg(rax, value_);
+ // Update receiver_ and key_ if they are affected by the swap.
+ if (receiver_.is(rax)) {
+ receiver_ = value_;
+ } else if (receiver_.is(value_)) {
+ receiver_ = rax;
}
- case Token::SHL: {
- __ SmiShiftLeft(answer.reg(),
- left->reg(),
- rcx);
- break;
+ if (key_.is(rax)) {
+ key_ = value_;
+ } else if (key_.is(value_)) {
+ key_ = rax;
}
- default:
- UNREACHABLE();
}
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
}
-
- // Handle the other binary operations.
- left->ToRegister();
- right->ToRegister();
- // A newly allocated register answer is used to hold the answer. The
- // registers containing left and right are not modified so they don't
- // need to be spilled in the fast case.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- // Perform the smi tag check.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
-
- switch (op) {
- case Token::ADD:
- __ SmiAdd(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
-
- case Token::SUB:
- __ SmiSub(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
-
- case Token::MUL: {
- __ SmiMul(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
+ // Value is now in rax. Its original location is remembered in value_,
+ // and the value is restored to value_ before returning.
+ // The variables receiver_ and key_ are not preserved.
+ // Move receiver and key to rdx and rcx, swapping if necessary.
+ if (receiver_.is(rdx)) {
+ if (!key_.is(rcx)) {
+ __ movq(rcx, key_);
+ } // Else everything is already in the right place.
+ } else if (receiver_.is(rcx)) {
+ if (key_.is(rdx)) {
+ __ xchg(rcx, rdx);
+ } else if (key_.is(rcx)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rdx, receiver_);
+ __ movq(rcx, key_);
}
-
- case Token::BIT_OR:
- __ SmiOr(answer.reg(), left->reg(), right->reg());
- break;
-
- case Token::BIT_AND:
- __ SmiAnd(answer.reg(), left->reg(), right->reg());
- break;
-
- case Token::BIT_XOR:
- __ SmiXor(answer.reg(), left->reg(), right->reg());
- break;
-
- default:
- UNREACHABLE();
- break;
+ } else if (key_.is(rcx)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rcx, key_);
+ __ movq(rdx, receiver_);
}
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
+
+ // Call the IC stub.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The delta from the start of the map-compare instructions (initial movq)
+ // to the test instruction. We use masm_-> directly here instead of the
+ // __ macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ // Restore value (returned from store IC).
+ if (!value_.is(rax)) __ movq(value_, rax);
}
@@ -8143,90 +8337,701 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
}
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = Heap::undefined_value();
- switch (op) {
- case Token::ADD:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
- answer_object = Smi::FromInt(left + right);
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ SetArgsReversed();
+ } else {
+ __ xchg(left, right);
+ }
+ } else if (left.is(left_arg)) {
+ __ movq(right_arg, right);
+ } else if (right.is(right_arg)) {
+ __ movq(left_arg, left);
+ } else if (left.is(right_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying left argument.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ } else if (right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying right argument.
+ __ movq(right_arg, right);
+ __ movq(left_arg, left);
+ }
+ } else {
+ // Order of moves is not important.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
}
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Smi* right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ Push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (left.is(left_arg)) {
+ __ Move(right_arg, right);
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ Move(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, left and right_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite left before moving
+ // it to left_arg.
+ __ movq(left_arg, left);
+ __ Move(right_arg, right);
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Smi* left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ Push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (right.is(right_arg)) {
+ __ Move(left_arg, left);
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ Move(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, right and left_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite right before moving
+ // it to right_arg.
+ __ movq(right_arg, right);
+ __ Move(left_arg, left);
+ }
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right) {
+ if (ArgsInRegistersSupported()) {
+ SetArgsInRegisters();
+ return frame->CallStub(this, left, right);
+ } else {
+ frame->Push(left);
+ frame->Push(right);
+ return frame->CallStub(this, 2);
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
+ // dividend in rax and rdx free for the division. Use rax, rbx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = rdx;
+ Register right = rax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = rax;
+ right = rbx;
+ if (HasArgsInRegisters()) {
+ __ movq(rbx, rax);
+ __ movq(rax, rdx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ movq(right, Operand(rsp, 1 * kPointerSize));
+ __ movq(left, Operand(rsp, 2 * kPointerSize));
+ }
+
+ Label not_smis;
+ // 2. Smi check both operands.
+ if (static_operands_type_.IsSmi()) {
+ // Skip smi check if we know that both arguments are smis.
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+ if (op_ == Token::BIT_OR) {
+ // Handle OR here, since we do extra smi-checking in the or code below.
+ __ SmiOr(right, right, left);
+ GenerateReturn(masm);
+ return;
+ }
+ } else {
+ if (op_ != Token::BIT_OR) {
+ // Skip the check for OR as it is better combined with the
+ // actual operation.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ __ JumpIfNotBothSmi(left, right, &not_smis);
+ }
+ }
+
+ // 3. Operands are both smis (except for OR), perform the operation leaving
+ // the result in rax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
+ switch (op_) {
+ case Token::ADD: {
+ ASSERT(right.is(rax));
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
break;
- case Token::SUB:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
- answer_object = Smi::FromInt(left - right);
- }
+ }
+
+ case Token::SUB: {
+ __ SmiSub(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
break;
- case Token::MUL: {
- double answer = static_cast<double>(left) * right;
- if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
- // If the product is zero and the non-zero factor is negative,
- // the spec requires us to return floating point negative zero.
- if (answer != 0 || (left + right) >= 0) {
- answer_object = Smi::FromInt(static_cast<int>(answer));
- }
- }
- }
+ }
+
+ case Token::MUL:
+ ASSERT(right.is(rax));
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
break;
+
case Token::DIV:
+ ASSERT(left.is(rax));
+ __ SmiDiv(left, left, right, &use_fp_on_smis);
+ break;
+
case Token::MOD:
+ ASSERT(left.is(rax));
+ __ SmiMod(left, left, right, slow);
break;
+
case Token::BIT_OR:
- answer_object = Smi::FromInt(left | right);
+ ASSERT(right.is(rax));
+ __ movq(rcx, right); // Save the right operand.
+ __ SmiOr(right, right, left); // BIT_OR is commutative.
+ __ testb(right, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smis);
break;
+
case Token::BIT_AND:
- answer_object = Smi::FromInt(left & right);
+ ASSERT(right.is(rax));
+ __ SmiAnd(right, right, left); // BIT_AND is commutative.
break;
+
case Token::BIT_XOR:
- answer_object = Smi::FromInt(left ^ right);
+ ASSERT(right.is(rax));
+ __ SmiXor(right, right, left); // BIT_XOR is commutative.
break;
- case Token::SHL: {
- int shift_amount = right & 0x1F;
- if (Smi::IsValid(left << shift_amount)) {
- answer_object = Smi::FromInt(left << shift_amount);
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ switch (op_) {
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(left, left, right);
+ break;
+ case Token::SHR:
+ __ SmiShiftLogicalRight(left, left, right, slow);
+ break;
+ case Token::SHL:
+ __ SmiShiftLeft(left, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ movq(rax, left);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // 4. Emit return of result in rax.
+ GenerateReturn(masm);
+
+ // 5. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ ASSERT(use_fp_on_smis.is_linked());
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV) {
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ }
+ // left is rdx, right is rax.
+ __ AllocateHeapNumber(rbx, rcx, slow);
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rbx);
+ GenerateReturn(masm);
+ }
+ default:
+ break;
+ }
+
+ // 6. Non-smi operands, fall out to the non-smi code with the operands in
+ // rdx and rax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+
+ switch (op_) {
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in rax, rbx at this point.
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ break;
+
+ case Token::BIT_OR:
+ // Right operand is saved in rcx and rax was destroyed by the smi
+ // operation.
+ __ movq(rax, rcx);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (ShouldGenerateSmiCode()) {
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) {
+ if (!HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+ }
+ // Floating point case.
+ if (ShouldGenerateFPCode()) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-smi argument occurs
+ // (and only if smi code is generated). This is the right moment to
+ // patch to HEAP_NUMBERS state. The transition is attempted only for
+ // the four basic operations. The stub stays in the DEFAULT state
+ // forever for all other operations (also if smi code is skipped).
+ GenerateTypeTransition(masm);
+ break;
+ }
+
+ Label not_floats;
+ // rax: y
+ // rdx: x
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadSSE2NumberOperands(masm);
+ } else {
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
+ }
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
+ case OVERWRITE_LEFT:
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
+ __ movq(rax, rdx);
+ break;
+ case OVERWRITE_RIGHT:
+ // If the argument in rax is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rax, rbx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ __ bind(&not_floats);
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ !HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-number argument
+ // occurs (and only if smi code is skipped from the stub, otherwise
+ // the patching has already been done earlier in this case branch).
+ // A perfect moment to try patching to STRINGS for ADD operation.
+ if (op_ == Token::ADD) {
+ GenerateTypeTransition(masm);
+ }
}
break;
}
- case Token::SHR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- unsigned_left >>= shift_amount;
- if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
- answer_object = Smi::FromInt(unsigned_left);
- }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
break;
}
- case Token::SAR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- if (left < 0) {
- // Perform arithmetic shift of a negative number by
- // complementing number, logical shifting, complementing again.
- unsigned_left = ~unsigned_left;
- unsigned_left >>= shift_amount;
- unsigned_left = ~unsigned_left;
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label skip_allocation, non_smi_shr_result;
+ Register heap_number_map = r9;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadNumbersAsIntegers(masm);
} else {
- unsigned_left >>= shift_amount;
+ FloatingPointHelper::LoadAsIntegers(masm,
+ &call_runtime,
+ heap_number_map);
}
- ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
- answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
+ switch (op_) {
+ case Token::BIT_OR: __ orl(rax, rcx); break;
+ case Token::BIT_AND: __ andl(rax, rcx); break;
+ case Token::BIT_XOR: __ xorl(rax, rcx); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: {
+ __ shrl_cl(rax);
+ // Check if result is negative. This can only happen for a shift
+ // by zero.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_shr_result);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ STATIC_ASSERT(kSmiValueSize == 32);
+ // Tag smi result and return.
+ __ Integer32ToSmi(rax, rax);
+ GenerateReturn(masm);
+
+ // All bit-ops except SHR return a signed int32 that can be
+ // returned immediately as a smi.
+ // We might need to allocate a HeapNumber if we shift a negative
+ // number right by zero (i.e., convert to UInt32).
+ if (op_ == Token::SHR) {
+ ASSERT(non_smi_shr_result.is_linked());
+ __ bind(&non_smi_shr_result);
+ // Allocate a heap number if needed.
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate heap number in new space.
+ // Not using AllocateHeapNumber macro in order to reuse
+ // already loaded heap_number_map.
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ rax,
+ rcx,
+ no_reg,
+ &call_runtime,
+ TAG_OBJECT);
+ // Set the map.
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+ heap_number_map);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ __ cvtqsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ }
+
break;
}
+ default: UNREACHABLE(); break;
+ }
+ }
+
+ // If all else fails, use the runtime system to get the correct
+ // result. If arguments was passed in registers now place them on the
+ // stack in the correct order below the return address.
+ __ bind(&call_runtime);
+
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ switch (op_) {
+ case Token::ADD: {
+ // Registers containing left and right operands respectively.
+ Register lhs, rhs;
+
+ if (HasArgsReversed()) {
+ lhs = rax;
+ rhs = rdx;
+ } else {
+ lhs = rdx;
+ rhs = rax;
+ }
+
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1, string1_smi2;
+
+ // If this stub has already generated FP-specific code then the arguments
+ // are already in rdx and rax.
+ if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+
+ Condition is_smi;
+ is_smi = masm->CheckSmi(lhs);
+ __ j(is_smi, &not_string1);
+ __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
+ __ j(above_equal, &not_string1);
+
+ // First argument is a a string, test second.
+ is_smi = masm->CheckSmi(rhs);
+ __ j(is_smi, &string1_smi2);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, rhs, rbx, rcx, r8, true, &string1);
+
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+ __ TailCallStub(&string_add_stub);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ is_smi = masm->CheckSmi(rhs);
+ __ j(is_smi, &not_strings);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
+ __ j(above_equal, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(&not_strings);
+ // Neither argument is a string.
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ }
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
default:
UNREACHABLE();
- break;
}
- if (answer_object == Heap::undefined_value()) {
- return false;
+}
+
+
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+ ASSERT(!HasArgsInRegisters());
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+ // If arguments are not passed in registers remove them from the stack before
+ // returning.
+ if (!HasArgsInRegisters()) {
+ __ ret(2 * kPointerSize); // Remove both operands
+ } else {
+ __ ret(0);
}
- frame_->Push(Handle<Object>(answer_object));
- return true;
}
-// End of CodeGenerator implementation.
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ ASSERT(HasArgsInRegisters());
+ __ pop(rcx);
+ if (HasArgsReversed()) {
+ __ push(rax);
+ __ push(rdx);
+ } else {
+ __ push(rdx);
+ __ push(rax);
+ }
+ __ push(rcx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ // Ensure the operands are on the stack.
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ // Left and right arguments are already on stack.
+ __ pop(rcx); // Save the return address.
+
+ // Push this stub's key.
+ __ Push(Smi::FromInt(MinorKey()));
+
+ // Although the operation and the type info are encoded into the key,
+ // the encoding is opaque, so push them too.
+ __ Push(Smi::FromInt(op_));
+
+ __ Push(Smi::FromInt(runtime_operands_type_));
+
+ __ push(rcx); // The return address.
+
+ // Perform patching to an appropriate fast case and return the result.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Input on stack:
@@ -8504,6 +9309,148 @@ void IntegerConvert(MacroAssembler* masm,
}
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
+ // Check float operands.
+ Label done;
+ Label rax_is_smi;
+ Label rax_is_object;
+ Label rdx_is_object;
+
+ __ JumpIfNotSmi(rdx, &rdx_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ JumpIfSmi(rax, &rax_is_smi);
+
+ __ bind(&rax_is_object);
+ IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
+ __ jmp(&done);
+
+ __ bind(&rdx_is_object);
+ IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
+ __ JumpIfNotSmi(rax, &rax_is_object);
+ __ bind(&rax_is_smi);
+ __ SmiToInteger32(rcx, rax);
+
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
+
+
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ Label* conversion_failure,
+ Register heap_number_map) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ __ JumpIfNotSmi(rdx, &arg1_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rdx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the edx heap number in rcx.
+ IntegerConvert(masm, rdx, rdx);
+
+ // Here rdx has the untagged integer, rax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ JumpIfNotSmi(rax, &arg2_is_object);
+ __ SmiToInteger32(rax, rax);
+ __ movl(rcx, rax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rcx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, rcx, rax);
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
+
+
+void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+}
+
+
+void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
+ // Load operand in rdx into xmm0.
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1.
+ __ JumpIfSmi(rax, &load_smi_rax);
+ __ bind(&load_nonsmi_rax);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
+ // Load operand in rdx into xmm0, or branch to not_numbers.
+ __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers); // Argument in rdx is not a number.
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1, or branch to not_numbers.
+ __ JumpIfSmi(rax, &load_smi_rax);
+
+ __ bind(&load_nonsmi_rax);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+ __ bind(&done);
+}
+
+
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done;
@@ -8588,6 +9535,172 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
}
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in rdx and the parameter count is in rax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(rdx, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register rax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmpq(rdx, rax);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
+ index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+ __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ Ret();
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmpq(rdx, rcx);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
+ __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
+ index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+ __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ Ret();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(rbx); // Return address.
+ __ push(rdx);
+ __ push(rbx);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // rsp[0] : return address
+ // rsp[8] : number of parameters
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
+
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ jmp(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ SmiToInteger32(rcx,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Space on stack must already hold a smi.
+ __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
+ // Do not clobber the length index for the indexing operation since
+ // it is used compute the size for allocation later.
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ testl(rcx, rcx);
+ __ j(zero, &add_arguments_object);
+ __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi, Operand(rdi, offset));
+
+ // Copy the JS object part.
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
+ __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
+ __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
+ __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
+ __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
+ __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
+
+ // Setup the callee in-object property.
+ ASSERT(Heap::arguments_callee_index == 0);
+ __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ ASSERT(Heap::arguments_length_index == 1);
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ SmiTest(rcx);
+ __ j(zero, &done);
+
+ // Get the parameters pointer from the stack and untag the length.
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+ __ addq(rdi, Immediate(kPointerSize));
+ __ subq(rdx, Immediate(kPointerSize));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -8935,18 +10048,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
@@ -9026,6 +10127,18 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
}
+void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
+ Register hash,
+ Register mask) {
+ __ and_(hash, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ __ shl(hash, Immediate(kPointerSizeLog2 + 1));
+}
+
+
void NumberToStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -9041,12 +10154,6 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- masm->RecordWriteHelper(object_, addr_, scratch_);
- masm->ret(0);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -9056,6 +10163,8 @@ static int NegativeComparisonResult(Condition cc) {
void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
Label check_unequal_objects, done;
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
@@ -9257,7 +10366,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(V8_UINT64_C(1), kSmiTagMask);
+ ASSERT_EQ(static_cast<int64_t>(1), kSmiTagMask);
__ lea(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, &not_both_objects);
@@ -9322,280 +10431,73 @@ void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
}
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(rax);
+ __ Push(Smi::FromInt(0));
+ __ push(rax);
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- Result answer = frame_->CallStub(&call_function, arg_count + 1);
- // Restore context and replace function on the stack with the
- // result of the stub invocation.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
}
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Implements "value instanceof function" operator.
- // Expected input state:
- // rsp[0] : return address
- // rsp[1] : function pointer
- // rsp[2] : value
- // Returns a bitwise zero to indicate that the value
- // is and instance of the function and anything else to
- // indicate that the value is not an instance.
-
- // Get the object - go slow case if it's a smi.
+void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(rax, &slow);
-
- // Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
- __ j(below, &slow);
- __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- // rdx is function, rax is map.
-
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss);
- __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- __ bind(&miss);
- __ TryGetFunctionPrototype(rdx, rbx, &slow);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
- __ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Register mapping:
- // rax is object map.
- // rdx is function.
- // rbx is function prototype.
- __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
-
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
- __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmpq(rcx, rbx);
- __ j(equal, &is_instance);
- __ cmpq(rcx, kScratchRegister);
- // The code at is_not_instance assumes that kScratchRegister contains a
- // non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- ASSERT_EQ(0, kSmiTag);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- __ bind(&is_not_instance);
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
- // rsp[16] : receiver displacement
- // rsp[24] : function
-
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ SmiToInteger32(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- // Space on stack must already hold a smi.
- __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
- // Do not clobber the length index for the indexing operation since
- // it is used compute the size for allocation later.
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ testl(rcx, rcx);
- __ j(zero, &add_arguments_object);
- __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
- __ movq(rdi, Operand(rdi, offset));
-
- // Copy the JS object part.
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
- __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
- __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
- __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
- __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
- __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
-
- // Setup the callee in-object property.
- ASSERT(Heap::arguments_callee_index == 0);
- __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::arguments_length_index == 1);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ SmiTest(rcx);
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack and untag the length.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in rdx and the parameter count is in rax.
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
+ // Check if receiver is a smi (which is a number value).
+ __ JumpIfSmi(rax, &receiver_is_value);
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(rdx, &slow);
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
+ __ j(above_equal, &receiver_is_js_object);
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor);
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
- // Check index against formal parameters count limit passed in
- // through register rax. Use unsigned comparison to get negative
- // check for free.
- __ cmpq(rdx, rax);
- __ j(above_equal, &slow);
+ __ bind(&receiver_is_js_object);
+ }
- // Read the argument from the stack and return it.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
- __ j(above_equal, &slow);
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(rdi, &slow);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
- // Read the argument from the stack and return it.
- index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
- __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
+ // Slow-case: Non-function called.
__ bind(&slow);
- __ pop(rbx); // Return address.
- __ push(rdx);
- __ push(rbx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
+ __ Set(rax, argc_);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
}
@@ -9628,6 +10530,11 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
}
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ UNREACHABLE();
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -9818,62 +10725,6 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ JumpIfSmi(rax, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &slow);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
- __ Set(rax, argc_);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
void CEntryStub::Generate(MacroAssembler* masm) {
// rax: number of arguments including receiver
// rbx: pointer to C function (C callee-saved)
@@ -9942,11 +10793,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
-void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
- UNREACHABLE();
-}
-
-
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
#ifdef ENABLE_LOGGING_AND_PROFILING
@@ -10078,887 +10924,88 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
-// -----------------------------------------------------------------------------
-// Implementation of stubs.
-
-// Stub classes have public member named masm, not masm_.
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(rax);
- __ Push(Smi::FromInt(0));
- __ push(rax);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
- // Load operand in rdx into xmm0, or branch to not_numbers.
- __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(rax, &load_smi_rax);
-
- __ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rdx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in rcx.
- IntegerConvert(masm, rdx, rdx);
-
- // Here rdx has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rax, rax);
- __ movl(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rcx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, rcx, rax);
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
- __ JumpIfNotSmi(rax, &rax_is_object);
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int len = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(len);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ movq(right_arg, right);
- } else if (right.is(right_arg)) {
- __ movq(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ movq(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ movq(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ movq(right_arg, right);
- __ movq(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ Push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (left.is(left_arg)) {
- __ Move(right_arg, right);
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ Move(left_arg, right);
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ movq(left_arg, left);
- __ Move(right_arg, right);
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ Push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (right.is(right_arg)) {
- __ Move(left_arg, left);
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ Move(right_arg, left);
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ movq(right_arg, right);
- __ Move(left_arg, left);
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right) {
- if (ArgsInRegistersSupported()) {
- SetArgsInRegisters();
- return frame->CallStub(this, left, right);
- } else {
- frame->Push(left);
- frame->Push(right);
- return frame->CallStub(this, 2);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
- // dividend in rax and rdx free for the division. Use rax, rbx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = rdx;
- Register right = rax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = rax;
- right = rbx;
- if (HasArgsInRegisters()) {
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ movq(right, Operand(rsp, 1 * kPointerSize));
- __ movq(left, Operand(rsp, 2 * kPointerSize));
- }
-
- Label not_smis;
- // 2. Smi check both operands.
- if (static_operands_type_.IsSmi()) {
- // Skip smi check if we know that both arguments are smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- // Handle OR here, since we do extra smi-checking in the or code below.
- __ SmiOr(right, right, left);
- GenerateReturn(masm);
- return;
- }
- } else {
- if (op_ != Token::BIT_OR) {
- // Skip the check for OR as it is better combined with the
- // actual operation.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
- }
-
- // 3. Operands are both smis (except for OR), perform the operation leaving
- // the result in rax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::ADD: {
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
- }
-
- case Token::SUB: {
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
- }
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- ASSERT(left.is(rax));
- __ SmiDiv(left, left, right, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- ASSERT(left.is(rax));
- __ SmiMod(left, left, right, slow);
- break;
-
- case Token::BIT_OR:
- ASSERT(right.is(rax));
- __ movq(rcx, right); // Save the right operand.
- __ SmiOr(right, right, left); // BIT_OR is commutative.
- __ testb(right, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis);
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- switch (op_) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, slow);
- break;
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- break;
- default:
- UNREACHABLE();
- }
- __ movq(rax, left);
- break;
-
- default:
- UNREACHABLE();
- break;
- }
-
- // 4. Emit return of result in rax.
- GenerateReturn(masm);
-
- // 5. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- ASSERT(use_fp_on_smis.is_linked());
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV) {
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- }
- // left is rdx, right is rax.
- __ AllocateHeapNumber(rbx, rcx, slow);
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rbx);
- GenerateReturn(masm);
- }
- default:
- break;
- }
-
- // 6. Non-smi operands, fall out to the non-smi code with the operands in
- // rdx and rax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
-
- switch (op_) {
- case Token::DIV:
- case Token::MOD:
- // Operands are in rax, rbx at this point.
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- break;
-
- case Token::BIT_OR:
- // Right operand is saved in rcx and rax was destroyed by the smi
- // operation.
- __ movq(rax, rcx);
- break;
-
- default:
- break;
- }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) {
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
-
- Label not_floats;
- // rax: y
- // rdx: x
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadSSE2NumberOperands(masm);
- } else {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Allocate a heap number, if needed.
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT:
- __ JumpIfNotSmi(rdx, &skip_allocation);
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- __ movq(rax, rdx);
- break;
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- __ bind(&not_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // A perfect moment to try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label skip_allocation, non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadNumbersAsIntegers(masm);
- } else {
- FloatingPointHelper::LoadAsIntegers(masm,
- &call_runtime,
- heap_number_map);
- }
- switch (op_) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
-
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- GenerateReturn(masm);
-
- // All bit-ops except SHR return a signed int32 that can be
- // returned immediately as a smi.
- // We might need to allocate a HeapNumber if we shift a negative
- // number right by zero (i.e., convert to UInt32).
- if (op_ == Token::SHR) {
- ASSERT(non_smi_shr_result.is_linked());
- __ bind(&non_smi_shr_result);
- // Allocate a heap number if needed.
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rcx,
- no_reg,
- &call_runtime,
- TAG_OBJECT);
- // Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- }
-
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
-
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
- __ bind(&call_runtime);
-
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- switch (op_) {
- case Token::ADD: {
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
-
- if (HasArgsReversed()) {
- lhs = rax;
- rhs = rdx;
- } else {
- lhs = rdx;
- rhs = rax;
- }
-
- // Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1, string1_smi2;
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in rdx and rax.
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- Condition is_smi;
- is_smi = masm->CheckSmi(lhs);
- __ j(is_smi, &not_string1);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &not_string1);
-
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, rbx, rcx, r8, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &not_strings);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
- __ j(above_equal, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
- __ bind(&not_strings);
- // Neither argument is a string.
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- ASSERT(!HasArgsInRegisters());
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
-}
-
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Implements "value instanceof function" operator.
+ // Expected input state:
+ // rsp[0] : return address
+ // rsp[1] : function pointer
+ // rsp[2] : value
+ // Returns a bitwise zero to indicate that the value
+ // is and instance of the function and anything else to
+ // indicate that the value is not an instance.
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(rcx);
- if (HasArgsReversed()) {
- __ push(rax);
- __ push(rdx);
- } else {
- __ push(rdx);
- __ push(rax);
- }
- __ push(rcx);
-}
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ JumpIfSmi(rax, &slow);
+ // Check that the left hand is a JS object. Leave its map in rax.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ j(below, &slow);
+ __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
+ // Get the prototype of the function.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ // rdx is function, rax is map.
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &miss);
+ __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &miss);
+ __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
- // Left and right arguments are already on stack.
- __ pop(rcx); // Save the return address.
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(rdx, rbx, &slow);
- // Push this stub's key.
- __ Push(Smi::FromInt(MinorKey()));
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(rbx, &slow);
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ j(below, &slow);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
- // Although the operation and the type info are encoded into the key,
- // the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(op_));
+ // Register mapping:
+ // rax is object map.
+ // rdx is function.
+ // rbx is function prototype.
+ __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ Push(Smi::FromInt(runtime_operands_type_));
+ __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
- __ push(rcx); // The return address.
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
+ __ bind(&loop);
+ __ cmpq(rcx, rbx);
+ __ j(equal, &is_instance);
+ __ cmpq(rcx, kScratchRegister);
+ // The code at is_not_instance assumes that kScratchRegister contains a
+ // non-zero GCable value (the null object in this case).
+ __ j(equal, &is_not_instance);
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ jmp(&loop);
- // Perform patching to an appropriate fast case and return the result.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
-}
+ __ bind(&is_instance);
+ __ xorl(rax, rax);
+ // Store bitwise zero in the cache. This is a Smi in GC terms.
+ ASSERT_EQ(0, kSmiTag);
+ __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+ __ bind(&is_not_instance);
+ // We have to store a non-zero value in the cache.
+ __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
@@ -10966,8 +11013,10 @@ int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the
// condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
| StrictField::encode(strict_)
| NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
| IncludeNumberCompareField::encode(include_number_compare_);
@@ -10977,6 +11026,8 @@ int CompareStub::MinorKey() {
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
@@ -11494,7 +11545,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Make count the number of bytes to copy.
if (!ascii) {
- ASSERT_EQ(2, sizeof(uc16)); // NOLINT
+ ASSERT_EQ(2, static_cast<int>(sizeof(uc16))); // NOLINT
__ addl(count, count);
}
@@ -12067,6 +12118,11 @@ ModuloFunction CreateModuloFunction() {
#undef __
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ masm->RecordWriteHelper(object_, addr_, scratch_);
+ masm->ret(0);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index b9a3b706..dc6f583d 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -521,6 +521,17 @@ class CodeGenerator: public AstVisitor {
Condition cc,
bool strict,
ControlDestination* destination);
+
+ // If at least one of the sides is a constant smi, generate optimized code.
+ void ConstantSmiComparison(Condition cc,
+ bool strict,
+ ControlDestination* destination,
+ Result* left_side,
+ Result* right_side,
+ bool left_side_constant_smi,
+ bool right_side_constant_smi,
+ bool is_loop_condition);
+
void GenerateInlineNumberComparison(Result* left_side,
Result* right_side,
Condition cc,
@@ -578,6 +589,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index c6be5033..0b3b7c4d 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1991,6 +1991,25 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ j(above_equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2243,11 +2262,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ movq(rbx, rax);
__ bind(&heapnumber_allocated);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 76200d7e..a5634a79 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2322,101 +2322,6 @@ void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
}
-Register MacroAssembler::CheckMaps(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between scratch and the other
- // registers.
- ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
-
- // Keep track of the current object in register reg. On the first
- // iteration, reg is an alias for object_reg, on later iterations,
- // it is an alias for holder_reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- movq(Operand(rsp, kPointerSize), object_reg);
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- while (object != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- JSObject* prototype = JSObject::cast(object->GetPrototype());
- if (Heap::InNewSpace(prototype)) {
- // Get the map of the current object.
- movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- Cmp(scratch, Handle<Map>(object->map()));
- // Branch on the result of the map check.
- j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
- movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- }
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- movq(reg, FieldOperand(scratch, Map::kPrototypeOffset));
-
- } else {
- // Check the map of the current object.
- Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- // Branch on the result of the map check.
- j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- Move(reg, Handle<JSObject>(prototype));
- }
-
- if (save_at_depth == depth) {
- movq(Operand(rsp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- object = prototype;
- }
-
- // Check the holder map.
- Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
- j(not_equal, miss);
-
- // Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object and return
- // the holder register.
- ASSERT(object == holder);
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- }
- return reg;
-}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index a256ab82..64f35e10 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -596,24 +596,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support
- // Generates code that verifies that the maps of objects in the
- // prototype chain of object hasn't changed since the code was
- // generated and branches to the miss label if any map has. If
- // necessary the function also generates code for security check
- // in case of global object holders. The scratch and holder
- // registers are always clobbered, but the object register is only
- // clobbered if it the same as the holder register. The function
- // returns a register containing the holder - either object_reg or
- // holder_reg.
- // The function can optionally (when save_at_depth !=
- // kInvalidProtoDepth) save the object at the given depth by moving
- // it to [rsp + kPointerSize].
- Register CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss);
-
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register and kScratchRegister,
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 383399ea..80318648 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -960,7 +960,6 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
Handle<Code> code = Factory::NewCode(code_desc,
- NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
PROFILE(RegExpCodeCreateEvent(*code, *source));
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index ab75b968..2a918f16 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -81,6 +81,106 @@ static void ProbeTable(MacroAssembler* masm,
}
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register r0,
+ Register r1) {
+ ASSERT(name->IsSymbol());
+ __ IncrementCounter(&Counters::negative_lookups, 1);
+ __ IncrementCounter(&Counters::negative_lookups_miss, 1);
+
+ Label done;
+ __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ __ testb(FieldOperand(r0, Map::kBitFieldOffset),
+ Immediate(kInterceptorOrAccessCheckNeededMask));
+ __ j(not_zero, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+ __ j(below, miss_label);
+
+ // Load properties array.
+ Register properties = r0;
+ __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Check that the properties array is a dictionary.
+ __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(not_equal, miss_label);
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up.
+ static const int kProbes = 4;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kProbes; i++) {
+ // r0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = r1;
+ // Capacity is smi 2^n.
+ __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
+ __ decl(index);
+ __ and_(index,
+ Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+
+ Register entity_name = r1;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ movq(entity_name, Operand(properties, index, times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ Cmp(entity_name, Factory::undefined_value());
+ // __ jmp(miss_label);
+ if (i != kProbes - 1) {
+ __ j(equal, &done);
+
+ // Stop if found the property.
+ __ Cmp(entity_name, Handle<String>(name));
+ __ j(equal, miss_label);
+
+ // Check if the entry name is not a symbol.
+ __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ Immediate(kIsSymbolMask));
+ __ j(zero, miss_label);
+ } else {
+ // Give up probing if still not found the undefined value.
+ __ j(not_equal, miss_label);
+ }
+ }
+
+ __ bind(&done);
+ __ DecrementCounter(&Counters::negative_lookups_miss, 1);
+}
+
+
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
@@ -497,6 +597,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -512,6 +613,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
receiver,
scratch1,
scratch2,
+ scratch3,
holder,
lookup,
name,
@@ -523,6 +625,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
receiver,
scratch1,
scratch2,
+ scratch3,
name,
holder,
miss);
@@ -535,6 +638,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
@@ -574,7 +678,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver,
interceptor_holder, scratch1,
- scratch2, name, depth1, miss);
+ scratch2, scratch3, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -590,7 +694,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
- scratch2, name, depth2, miss);
+ scratch2, scratch3, name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -626,12 +730,13 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
JSObject* interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
+ scratch1, scratch2, scratch3, name,
miss_label);
__ EnterInternalFrame();
@@ -784,7 +889,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, name, depth, &miss);
+ rbx, rax, rdi, name, depth, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -807,7 +912,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, name, &miss);
+ rbx, rdx, rdi, name, &miss);
}
break;
@@ -826,7 +931,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, name, &miss);
+ rbx, rdx, rdi, name, &miss);
}
break;
}
@@ -847,7 +952,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, name, &miss);
+ rbx, rdx, rdi, name, &miss);
}
break;
}
@@ -902,7 +1007,8 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
__ JumpIfSmi(rdx, &miss);
// Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, name, &miss);
+ Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi,
+ name, &miss);
GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
@@ -965,6 +1071,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
holder,
rbx,
rax,
+ rdi,
name,
&miss);
@@ -1119,7 +1226,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
CheckPrototypes(JSObject::cast(object), rdx,
holder, rbx,
- rax, name, &miss);
+ rax, rdi, name, &miss);
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1226,6 +1333,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
rdx,
rbx,
rdi,
+ rax,
&miss);
// Restore receiver.
@@ -1288,7 +1396,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
// Check that the maps haven't changed.
- CheckPrototypes(object, rdx, holder, rbx, rax, name, &miss);
+ CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, &miss);
// Get the value from the cell.
__ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
@@ -1353,7 +1461,7 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Label miss;
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
+ bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx, rdi,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1376,7 +1484,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
// -----------------------------------
Label miss;
- GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss);
+ GenerateLoadConstant(object, holder, rax, rbx, rdx, rdi, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1401,7 +1509,7 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
// Check the maps of the full prototype chain. Also check that
// global property cells up to (but not including) the last object
// in the prototype chain are empty.
- CheckPrototypes(object, rax, last, rbx, rdx, name, &miss);
+ CheckPrototypes(object, rax, last, rbx, rdx, rdi, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@@ -1438,7 +1546,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
// -----------------------------------
Label miss;
- GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss);
+ GenerateLoadField(object, holder, rax, rbx, rdx, rdi, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1469,6 +1577,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
rcx,
rdx,
rbx,
+ rdi,
name,
&miss);
@@ -1500,7 +1609,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
// Check that the maps haven't changed.
- CheckPrototypes(object, rax, holder, rbx, rdx, name, &miss);
+ CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
// Get the value from the cell.
__ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
@@ -1546,7 +1655,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ j(not_equal, &miss);
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx,
+ bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1600,7 +1709,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadConstant(receiver, holder, rdx, rbx, rcx,
+ GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
value, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_constant_function, 1);
@@ -1660,6 +1769,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
rax,
rcx,
rbx,
+ rdi,
name,
&miss);
__ bind(&miss);
@@ -1875,7 +1985,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadField(receiver, holder, rdx, rbx, rcx, index, name, &miss);
+ GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_field, 1);
@@ -1954,6 +2064,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
@@ -1981,7 +2092,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3,
+ name, miss);
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
// Save necessary data before invoking an interceptor.
@@ -2029,6 +2141,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
lookup->holder(),
scratch1,
scratch2,
+ scratch3,
name,
miss);
}
@@ -2068,7 +2181,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3,
+ name, miss);
__ pop(scratch2); // save old return address
PushInterceptorArguments(masm(), receiver, holder_reg,
name_reg, interceptor_holder);
@@ -2087,6 +2201,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
@@ -2097,7 +2212,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
@@ -2122,41 +2237,143 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
String* name,
int save_at_depth,
- Label* miss,
- Register extra) {
- // Check that the maps haven't changed.
- Register result =
- masm()->CheckMaps(object,
- object_reg,
- holder,
- holder_reg,
- scratch,
- save_at_depth,
- miss);
+ Label* miss) {
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg. On the first
+ // iteration, reg is an alias for object_reg, on later iterations,
+ // it is an alias for holder_reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ __ movq(Operand(rsp, kPointerSize), object_reg);
+ }
+
+ // Check the maps in the prototype chain.
+ // Traverse the prototype chain from the object and do map checks.
+ JSObject* current = object;
+ while (current != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+ JSObject* prototype = JSObject::cast(current->GetPrototype());
+ if (!current->HasFastProperties() &&
+ !current->IsJSGlobalObject() &&
+ !current->IsJSGlobalProxy()) {
+ if (!name->IsSymbol()) {
+ Object* lookup_result = Heap::LookupSymbol(name);
+ if (lookup_result->IsFailure()) {
+ set_failure(Failure::cast(lookup_result));
+ return reg;
+ } else {
+ name = String::cast(lookup_result);
+ }
+ }
+ ASSERT(current->property_dictionary()->FindEntry(name) ==
+ StringDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
+ __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // from now the object is in holder_reg
+ __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else if (Heap::InNewSpace(prototype)) {
+ // Get the map of the current object.
+ __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ Cmp(scratch1, Handle<Map>(current->map()));
+ // Branch on the result of the map check.
+ __ j(not_equal, miss);
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+
+ // Restore scratch register to be the map of the object.
+ // We load the prototype from the map in the scratch register.
+ __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ }
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ reg = holder_reg; // from now the object is in holder_reg
+ __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+
+ } else {
+ // Check the map of the current object.
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Handle<Map>(current->map()));
+ // Branch on the result of the map check.
+ __ j(not_equal, miss);
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+ // The prototype is in old space; load it directly.
+ reg = holder_reg; // from now the object is in holder_reg
+ __ Move(reg, Handle<JSObject>(prototype));
+ }
+
+ if (save_at_depth == depth) {
+ __ movq(Operand(rsp, kPointerSize), reg);
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ }
+
+ // Check the holder map.
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
+ __ j(not_equal, miss);
+
+ // Log the check depth.
+ LOG(IntEvent("check-maps-depth", depth + 1));
+
+ // Perform security check for access to the global object and return
+ // the holder register.
+ ASSERT(current == holder);
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
- while (object != holder) {
- if (object->IsGlobalObject()) {
+ current = object;
+ while (current != holder) {
+ if (current->IsGlobalObject()) {
Object* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(object),
+ GlobalObject::cast(current),
name,
- scratch,
+ scratch1,
miss);
if (cell->IsFailure()) {
set_failure(Failure::cast(cell));
- return result;
+ return reg;
}
}
- object = JSObject::cast(object->GetPrototype());
+ current = JSObject::cast(current->GetPrototype());
}
// Return the register containing the holder.
- return result;
+ return reg;
}
@@ -2165,6 +2382,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss) {
@@ -2174,7 +2392,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
// Check the prototype chain.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
// Get the value from the properties.
GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
@@ -2187,6 +2405,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss) {
@@ -2196,7 +2415,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
__ Move(rax, Handle<Object>(value));