aboutsummaryrefslogtreecommitdiff
path: root/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'src/x64')
-rw-r--r--src/x64/code-stubs-x64.cc189
-rw-r--r--src/x64/code-stubs-x64.h100
-rw-r--r--src/x64/codegen-x64.cc16
-rw-r--r--src/x64/full-codegen-x64.cc192
-rw-r--r--src/x64/lithium-x64.cc71
-rw-r--r--src/x64/lithium-x64.h207
-rw-r--r--src/x64/macro-assembler-x64.cc2
7 files changed, 696 insertions, 81 deletions
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 60ec35d0..59522d22 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -988,8 +988,195 @@ Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
+ TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+ return stub.GetCode();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ pop(rcx); // Save return address.
+ __ push(rdx);
+ __ push(rax);
+ // Left and right arguments are now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ Push(Smi::FromInt(MinorKey()));
+ __ Push(Smi::FromInt(op_));
+ __ Push(Smi::FromInt(operands_type_));
+
+ __ push(rcx); // Push return address.
+
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+// Prepare for a type transition runtime call when the args are already on
+// the stack, under the return address.
+void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+ MacroAssembler* masm) {
+ __ pop(rcx); // Save return address.
+ // Left and right arguments are already on top of the stack.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ Push(Smi::FromInt(MinorKey()));
+ __ Push(Smi::FromInt(op_));
+ __ Push(Smi::FromInt(operands_type_));
+
+ __ push(rcx); // Push return address.
+
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operands_type_) {
+ case TRBinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case TRBinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case TRBinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case TRBinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case TRBinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case TRBinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+const char* TypeRecordingBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "TypeRecordingBinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ TRBinaryOpIC::GetName(operands_type_));
+ return name_;
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+ Label* slow,
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
UNIMPLEMENTED();
- return Handle<Code>::null();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ break;
+ case Token::MOD:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ GenerateRegisterArgsPush(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+ result_type_ == TRBinaryOpIC::SMI) {
+ GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
+ } else {
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ }
+ __ bind(&call_runtime);
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ GenerateTypeTransition(masm);
+ break;
+ case Token::MOD:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ GenerateTypeTransitionWithSavedArgs(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ UNIMPLEMENTED();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ UNIMPLEMENTED();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ UNIMPLEMENTED();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ UNIMPLEMENTED();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+ MacroAssembler* masm,
+ Label* alloc_failure) {
+ UNIMPLEMENTED();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ pop(rcx);
+ __ push(rdx);
+ __ push(rax);
+ __ push(rcx);
}
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 0fe4f8ad..5056f348 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -131,7 +131,7 @@ class GenericBinaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n",
+ "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
@@ -200,6 +200,104 @@ class GenericBinaryOpStub: public CodeStub {
friend class CodeGenerator;
};
+
+class TypeRecordingBinaryOpStub: public CodeStub {
+ public:
+ TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(TRBinaryOpIC::UNINITIALIZED),
+ result_type_(TRBinaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ TypeRecordingBinaryOpStub(
+ int key,
+ TRBinaryOpIC::TypeInfo operands_type,
+ TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type),
+ name_(NULL) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+
+ // Operand type information determined at runtime.
+ TRBinaryOpIC::TypeInfo operands_type_;
+ TRBinaryOpIC::TypeInfo result_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ TRBinaryOpIC::GetName(operands_type_));
+ }
+#endif
+
+ // Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 9, 3> {};
+ class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 12, 3> {};
+
+ Major MajorKey() { return TypeRecordingBinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* slow,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return TRBinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_type_recording_binary_op_type(operands_type_);
+ code->set_type_recording_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index aa5d3357..a543a504 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -627,10 +627,10 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
Comment cmnt(masm_, "[ store arguments object");
if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the hole value
+ // When using lazy arguments allocation, we store the arguments marker value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
- frame_->Push(Factory::the_hole_value());
+ frame_->Push(Factory::arguments_marker());
} else {
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
frame_->PushFunction();
@@ -655,9 +655,9 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
if (probe.is_constant()) {
// We have to skip updating the arguments object if it has
// been assigned a proper value.
- skip_arguments = !probe.handle()->IsTheHole();
+ skip_arguments = !probe.handle()->IsArgumentsMarker();
} else {
- __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
probe.Unuse();
done.Branch(not_equal);
}
@@ -2516,9 +2516,9 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
Label slow, done;
bool try_lazy = true;
if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
+ try_lazy = probe.handle()->IsArgumentsMarker();
} else {
- __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
probe.Unuse();
__ j(not_equal, &slow);
}
@@ -4417,7 +4417,7 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// If the loaded value is a constant, we know if the arguments
// object has been lazily loaded yet.
if (value.is_constant()) {
- if (value.handle()->IsTheHole()) {
+ if (value.handle()->IsArgumentsMarker()) {
Result arguments = StoreArgumentsObject(false);
frame_->Push(&arguments);
} else {
@@ -4430,7 +4430,7 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// indicates that we haven't loaded the arguments object yet, we
// need to do it now.
JumpTarget exit;
- __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(value.reg(), Heap::kArgumentsMarkerRootIndex);
frame_->Push(&value);
exit.Branch(not_equal);
Result arguments = StoreArgumentsObject(false);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 66bc4ede..724a7c59 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -210,10 +210,17 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
__ j(above_equal, &ok);
StackCheckStub stub;
__ CallStub(&stub);
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordStackCheck(stmt->OsrEntryId());
+
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
- RecordStackCheck(stmt->OsrEntryId());
}
@@ -459,7 +466,10 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ true,
+ true_label_,
+ false_label_);
if (flag) {
if (true_label_ != fall_through_) __ jmp(true_label_);
} else {
@@ -555,6 +565,25 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
bool should_normalize,
Label* if_true,
Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ NearLabel skip;
+ if (should_normalize) __ jmp(&skip);
+
+ ForwardBailoutStack* current = forward_bailout_stack_;
+ while (current != NULL) {
+ PrepareForBailout(current->expr(), state);
+ current = current->parent();
+ }
+
+ if (should_normalize) {
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ Split(equal, if_true, if_false, NULL);
+ __ bind(&skip);
+ }
}
@@ -669,8 +698,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
+
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -735,6 +766,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
__ bind(nested_statement.break_target());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
}
@@ -1224,6 +1256,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
}
break;
}
@@ -1311,6 +1344,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Update the write barrier for the array store.
__ RecordWrite(rbx, offset, result_register(), rcx);
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
if (result_saved) {
@@ -1355,17 +1390,34 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
}
break;
- case KEYED_PROPERTY:
+ case KEYED_PROPERTY: {
if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
+ if (property->is_arguments_access()) {
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+ MemOperand slot_operand =
+ EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
+ __ push(slot_operand);
+ __ Move(rax, property->key()->AsLiteral()->handle());
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ }
__ movq(rdx, Operand(rsp, 0));
__ push(rax);
} else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
+ if (property->is_arguments_access()) {
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+ MemOperand slot_operand =
+ EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
+ __ push(slot_operand);
+ __ Push(property->key()->AsLiteral()->handle());
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
}
break;
+ }
}
if (expr->is_compound()) {
@@ -1383,6 +1435,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
}
+ // For property compound assignments we need another deoptimization
+ // point after the property load.
+ if (property != NULL) {
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ }
+
Token::Value op = expr->binary_op();
ConstantOperand constant = ShouldInlineSmiCase(op)
? GetConstantOperand(op, expr->target(), expr->value())
@@ -1408,6 +1466,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
} else {
EmitBinaryOp(op, mode);
}
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1420,6 +1480,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
break;
case NAMED_PROPERTY:
@@ -1529,7 +1590,7 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
}
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_id) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@@ -1577,6 +1638,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_id) {
break;
}
}
+ PrepareForBailoutForId(bailout_ast_id, TOS_REG);
context()->Plug(rax);
}
@@ -1688,6 +1750,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(rax);
__ Drop(1);
}
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
}
@@ -1726,6 +1789,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(rax);
}
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
}
@@ -1766,6 +1830,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
EmitCallIC(ic, mode);
+ RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
@@ -1799,6 +1864,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
__ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
EmitCallIC(ic, mode);
+ RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, rax); // Drop the key still on the stack.
@@ -1819,6 +1885,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
+ RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -1827,6 +1894,12 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1834,7 +1907,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
if (var != NULL && var->is_possibly_eval()) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
- // call. The we call the resolved function using the given
+ // call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1871,6 +1944,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
+ RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, rax);
@@ -1893,32 +1967,31 @@ void FullCodeGenerator::VisitCall(Call* expr) {
&done);
__ bind(&slow);
- // Call the runtime to find the function to call (returned in rax)
- // and the object holding it (returned in rdx).
- __ push(context_register());
- __ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(rax); // Function.
- __ push(rdx); // Receiver.
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- NearLabel call;
- __ jmp(&call);
- __ bind(&done);
- // Push function.
- __ push(rax);
- // Push global receiver.
+ }
+ // Call the runtime to find the function to call (returned in rax)
+ // and the object holding it (returned in rdx).
+ __ push(context_register());
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ push(rax); // Function.
+ __ push(rdx); // Receiver.
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ NearLabel call;
+ __ jmp(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(rax);
+ // Push global receiver.
__ movq(rbx, GlobalObjectOperand());
__ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&call);
- }
}
EmitCallWithStub(expr);
-
} else if (fun->AsProperty() != NULL) {
// Call to an object property.
Property* prop = fun->AsProperty();
@@ -1932,24 +2005,23 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use KeyedCallIC.
+ // for a regular property use keyed EmitCallIC.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(prop->obj());
}
if (prop->is_synthetic()) {
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForAccumulatorValue(prop->key());
- __ movq(rdx, Operand(rsp, 0));
}
// Record source code position for IC call.
SetSourcePosition(prop->position());
+ __ pop(rdx); // We do not need to keep the receiver.
+
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
- // Pop receiver.
- __ pop(rbx);
// Push result (function).
__ push(rax);
- // Push receiver object on stack.
+ // Push Global receiver.
__ movq(rcx, GlobalObjectOperand());
__ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
EmitCallWithStub(expr);
@@ -1960,7 +2032,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to some other expression. If the expression is an anonymous
// function literal not called in a loop, mark it as one that should
- // also use the fast code generator.
+ // also use the full code generator.
FunctionLiteral* lit = fun->AsFunctionLiteral();
if (lit != NULL &&
lit->name()->Equals(Heap::empty_string()) &&
@@ -1976,6 +2048,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Emit function call.
EmitCallWithStub(expr);
}
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
}
@@ -2023,6 +2100,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ JumpIfSmi(rax, if_true);
__ jmp(if_false);
@@ -2042,6 +2120,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
Split(non_negative_smi, if_true, if_false, fall_through);
@@ -2073,6 +2152,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmpq(rbx, Immediate(FIRST_JS_OBJECT_TYPE));
__ j(below, if_false);
__ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2093,6 +2173,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2115,6 +2196,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(not_zero, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2137,6 +2219,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
// this compiler.
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ jmp(if_false);
context()->Plug(if_true, if_false);
}
@@ -2156,6 +2239,7 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2176,6 +2260,7 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2196,6 +2281,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2227,6 +2313,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ SmiCompare(Operand(rax, StandardFrameConstants::kMarkerOffset),
Smi::FromInt(StackFrame::CONSTRUCT));
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2249,6 +2336,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(rbx);
__ cmpq(rax, rbx);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2822,6 +2910,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ testl(FieldOperand(rax, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ j(zero, if_true);
__ jmp(if_false);
@@ -2943,6 +3032,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through);
+ if (context()->IsTest()) ForwardBailoutToChild(expr);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
context()->Plug(if_false, if_true); // Labels swapped.
break;
@@ -3056,14 +3146,26 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
+ if (prop->is_arguments_access()) {
+ VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
+ MemOperand slot_operand =
+ EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
+ __ push(slot_operand);
+ __ Move(rax, prop->key()->AsLiteral()->handle());
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ }
__ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
__ push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
}
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ PrepareForBailout(expr->increment(), TOS_REG);
+
// Call ToNumber only if operand is not a smi.
NearLabel no_conversion;
Condition is_smi;
@@ -3133,6 +3235,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(rax);
}
// For all contexts except kEffect: We have the result on
@@ -3144,6 +3247,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
}
break;
@@ -3152,6 +3256,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3166,6 +3271,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3192,6 +3298,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
// Use a regular load, not a contextual load, to avoid a reference
// error.
EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL &&
proxy->var()->AsSlot() != NULL &&
@@ -3207,12 +3314,13 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ push(rsi);
__ Push(proxy->name());
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
__ bind(&done);
context()->Plug(rax);
} else {
// This expression cannot throw a reference error at the top level.
- Visit(expr);
+ context()->HandleExpression(expr);
}
}
@@ -3237,6 +3345,7 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
{ AccumulatorValueContext context(this);
VisitForTypeofValue(left_unary->expression());
}
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
if (check->Equals(Heap::number_symbol())) {
Condition is_smi = masm_->CheckSmi(rax);
@@ -3330,6 +3439,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
break;
@@ -3338,6 +3448,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
@@ -3396,6 +3507,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
: NO_COMPARE_FLAGS;
CompareStub stub(cc, strict, flags);
__ CallStub(&stub);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
@@ -3417,6 +3530,7 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(expr->expression());
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
if (expr->is_strict()) {
Split(equal, if_true, if_false, fall_through);
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
new file mode 100644
index 00000000..8afa9d47
--- /dev/null
+++ b/src/x64/lithium-x64.cc
@@ -0,0 +1,71 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "x64/lithium-x64.h"
+#include "x64/lithium-codegen-x64.h"
+
+namespace v8 {
+namespace internal {
+
+LChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new LChunk(graph());
+ HPhase phase("Building chunk", chunk_);
+ status_ = BUILDING;
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* next = NULL;
+ if (i < blocks->length() - 1) next = blocks->at(i + 1);
+ DoBasicBlock(blocks->at(i), next);
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+ if (FLAG_trace_bailout) {
+ SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+ PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+ PrintF("\n");
+ }
+ status_ = ABORTED;
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+ ASSERT(is_building());
+ Abort("Lithium not implemented on x64.");
+}
+
+
+} } // namespace v8::internal
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index f66ec168..fcab2356 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#include "hydrogen.h"
#include "lithium-allocator.h"
+#include "lithium.h"
#include "safepoint-table.h"
namespace v8 {
@@ -45,6 +46,9 @@ class LInstruction: public ZoneObject {
LInstruction() { }
virtual ~LInstruction() { }
+ virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+ virtual void PrintDataTo(StringStream* stream) const { }
+
// Predicates should be generated by macro as in lithium-ia32.h.
virtual bool IsLabel() const {
UNIMPLEMENTED();
@@ -55,23 +59,43 @@ class LInstruction: public ZoneObject {
return false;
}
- LPointerMap* pointer_map() const {
- UNIMPLEMENTED();
- return NULL;
- }
+ void set_environment(LEnvironment* env) { environment_.set(env); }
+ LEnvironment* environment() const { return environment_.get(); }
+ bool HasEnvironment() const { return environment_.is_set(); }
- bool HasPointerMap() const {
- UNIMPLEMENTED();
- return false;
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_result(LOperand* operand) { result_.set(operand); }
+ LOperand* result() const { return result_.get(); }
+ bool HasResult() const { return result_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ void set_deoptimization_environment(LEnvironment* env) {
+ deoptimization_environment_.set(env);
+ }
+ LEnvironment* deoptimization_environment() const {
+ return deoptimization_environment_.get();
+ }
+ bool HasDeoptimizationEnvironment() const {
+ return deoptimization_environment_.is_set();
}
- virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+ private:
+ SetOncePointer<LEnvironment> environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ SetOncePointer<LOperand> result_;
+ HValue* hydrogen_value_;
+ SetOncePointer<LEnvironment> deoptimization_environment_;
};
class LParallelMove : public ZoneObject {
public:
- LParallelMove() { }
+ LParallelMove() : move_operands_(4) { }
void AddMove(LOperand* from, LOperand* to) {
UNIMPLEMENTED();
@@ -81,6 +105,9 @@ class LParallelMove : public ZoneObject {
UNIMPLEMENTED();
return NULL;
}
+
+ private:
+ ZoneList<LMoveOperands> move_operands_;
};
@@ -111,12 +138,20 @@ class LGap: public LInstruction {
UNIMPLEMENTED();
return NULL;
}
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
};
class LLabel: public LGap {
public:
explicit LLabel(HBasicBlock* block) : LGap(block) { }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
};
@@ -144,12 +179,21 @@ class LOsrEntry: public LInstruction {
LOperand* spill_operand) {
UNIMPLEMENTED();
}
+
+ private:
+ // Arrays of spill slot operands for registers with an assigned spill
+ // slot, i.e., that must also be restored to the spill slot on OSR entry.
+ // NULL if the register has no assigned spill slot. Indexed by allocation
+ // index.
+ LOperand* register_spills_[Register::kNumAllocatableRegisters];
+ LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
};
class LPointerMap: public ZoneObject {
public:
- explicit LPointerMap(int position) { }
+ explicit LPointerMap(int position)
+ : pointer_operands_(8), position_(position), lithium_position_(-1) { }
int lithium_position() const {
UNIMPLEMENTED();
@@ -157,21 +201,80 @@ class LPointerMap: public ZoneObject {
}
void RecordPointer(LOperand* op) { UNIMPLEMENTED(); }
+
+ private:
+ ZoneList<LOperand*> pointer_operands_;
+ int position_;
+ int lithium_position_;
};
-class LChunk: public ZoneObject {
+class LEnvironment: public ZoneObject {
public:
- explicit LChunk(HGraph* graph) { }
-
- HGraph* graph() const {
- UNIMPLEMENTED();
- return NULL;
+ LEnvironment(Handle<JSFunction> closure,
+ int ast_id,
+ int parameter_count,
+ int argument_count,
+ int value_count,
+ LEnvironment* outer)
+ : closure_(closure),
+ arguments_stack_height_(argument_count),
+ deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
+ translation_index_(-1),
+ ast_id_(ast_id),
+ parameter_count_(parameter_count),
+ values_(value_count),
+ representations_(value_count),
+ spilled_registers_(NULL),
+ spilled_double_registers_(NULL),
+ outer_(outer) {
}
- const ZoneList<LPointerMap*>* pointer_maps() const {
- UNIMPLEMENTED();
- return NULL;
+ Handle<JSFunction> closure() const { return closure_; }
+ int arguments_stack_height() const { return arguments_stack_height_; }
+ int deoptimization_index() const { return deoptimization_index_; }
+ int translation_index() const { return translation_index_; }
+ int ast_id() const { return ast_id_; }
+ int parameter_count() const { return parameter_count_; }
+ const ZoneList<LOperand*>* values() const { return &values_; }
+ LEnvironment* outer() const { return outer_; }
+
+ private:
+ Handle<JSFunction> closure_;
+ int arguments_stack_height_;
+ int deoptimization_index_;
+ int translation_index_;
+ int ast_id_;
+ int parameter_count_;
+ ZoneList<LOperand*> values_;
+ ZoneList<Representation> representations_;
+
+ // Allocation index indexed arrays of spill slot operands for registers
+ // that are also in spill slots at an OSR entry. NULL for environments
+ // that do not correspond to an OSR entry.
+ LOperand** spilled_registers_;
+ LOperand** spilled_double_registers_;
+
+ LEnvironment* outer_;
+};
+
+
+class LChunkBuilder;
+class LChunk: public ZoneObject {
+ public:
+ explicit LChunk(HGraph* graph)
+ : spill_slot_count_(0),
+ graph_(graph),
+ instructions_(32),
+ pointer_maps_(8),
+ inlined_closures_(1) { }
+
+ int spill_slot_count() const { return spill_slot_count_; }
+ HGraph* graph() const { return graph_; }
+ const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+ const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+ const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+ return &inlined_closures_;
}
LOperand* GetNextSpillSlot(bool double_slot) {
@@ -189,11 +292,6 @@ class LChunk: public ZoneObject {
return NULL;
}
- const ZoneList<LInstruction*>* instructions() const {
- UNIMPLEMENTED();
- return NULL;
- }
-
int GetParameterStackSlot(int index) const {
UNIMPLEMENTED();
return 0;
@@ -219,20 +317,35 @@ class LChunk: public ZoneObject {
void MarkEmptyBlocks() { UNIMPLEMENTED(); }
#ifdef DEBUG
- void Verify() { UNIMPLEMENTED(); }
+ void Verify() { }
#endif
+
+ private:
+ int spill_slot_count_;
+ HGraph* const graph_;
+ ZoneList<LInstruction*> instructions_;
+ ZoneList<LPointerMap*> pointer_maps_;
+ ZoneList<Handle<JSFunction> > inlined_closures_;
};
class LChunkBuilder BASE_EMBEDDED {
public:
- LChunkBuilder(HGraph* graph, LAllocator* allocator) { }
+ LChunkBuilder(HGraph* graph, LAllocator* allocator)
+ : chunk_(NULL),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ next_block_(NULL),
+ argument_count_(0),
+ allocator_(allocator),
+ position_(RelocInfo::kNoPosition),
+ instructions_pending_deoptimization_environment_(NULL),
+ pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
// Build the sequence for the graph.
- LChunk* Build() {
- UNIMPLEMENTED();
- return NULL;
- };
+ LChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
@@ -242,6 +355,38 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ LChunk* chunk() const { return chunk_; }
+ HGraph* graph() const { return graph_; }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ void Abort(const char* format, ...);
+
+ void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+
+ LChunk* chunk_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ HBasicBlock* next_block_;
+ int argument_count_;
+ LAllocator* allocator_;
+ int position_;
+ LInstruction* instructions_pending_deoptimization_environment_;
+ int pending_deoptimization_ast_id_;
+
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 70a3dab6..2846fe26 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -288,7 +288,7 @@ void MacroAssembler::Abort(const char* msg) {
}
#endif
// Disable stub call restrictions to always allow calls to abort.
- set_allow_stub_calls(true);
+ AllowStubCallsScope allow_scope(this, true);
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE);