aboutsummaryrefslogtreecommitdiff
path: root/src/x64
diff options
context:
space:
mode:
authorLeon Clarke <leonclarke@google.com>2010-01-27 17:25:45 +0000
committerLeon Clarke <leonclarke@google.com>2010-01-27 17:31:21 +0000
commitd91b9f7d46489a9ee00f9cb415630299c76a502b (patch)
tree741552f95883bb7461cf7c1d36335cef68804a5b /src/x64
parenteab96aab0834f21954b5d6aa6366bcfb348ed811 (diff)
downloadv8-d91b9f7d46489a9ee00f9cb415630299c76a502b.tar.gz
Merge from v8 at revision 3723
Diffstat (limited to 'src/x64')
-rw-r--r--src/x64/assembler-x64.cc44
-rw-r--r--src/x64/assembler-x64.h8
-rw-r--r--src/x64/codegen-x64.cc1383
-rw-r--r--src/x64/codegen-x64.h126
-rw-r--r--src/x64/disasm-x64.cc30
-rw-r--r--src/x64/full-codegen-x64.cc (renamed from src/x64/fast-codegen-x64.cc)201
-rw-r--r--src/x64/ic-x64.cc7
-rw-r--r--src/x64/macro-assembler-x64.cc50
-rw-r--r--src/x64/macro-assembler-x64.h16
-rw-r--r--src/x64/regexp-macro-assembler-x64.cc16
-rw-r--r--src/x64/regexp-macro-assembler-x64.h14
-rw-r--r--src/x64/simulator-x64.h4
12 files changed, 1253 insertions, 646 deletions
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 4ac39339..9cfe98ab 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1537,6 +1537,40 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
}
+void Assembler::repmovsb() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit(0xA4);
+}
+
+
+void Assembler::repmovsw() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66); // Operand size override.
+ emit(0xF3);
+ emit(0xA4);
+}
+
+
+void Assembler::repmovsl() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit(0xA5);
+}
+
+
+void Assembler::repmovsq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit_rex_64();
+ emit(0xA5);
+}
+
+
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2079,6 +2113,16 @@ void Assembler::fisttp_s(const Operand& adr) {
}
+void Assembler::fisttp_d(const Operand& adr) {
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(adr);
+ emit(0xDD);
+ emit_operand(1, adr);
+}
+
+
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 1bddb2fb..5d17edf8 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -574,6 +574,13 @@ class Assembler : public Malloced {
void movzxwq(Register dst, const Operand& src);
void movzxwl(Register dst, const Operand& src);
+ // Repeated moves.
+
+ void repmovsb();
+ void repmovsw();
+ void repmovsl();
+ void repmovsq();
+
// New x64 instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
@@ -1052,6 +1059,7 @@ class Assembler : public Malloced {
void fistp_d(const Operand& adr);
void fisttp_s(const Operand& adr);
+ void fisttp_d(const Operand& adr);
void fabs();
void fchs();
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 0cf68ebb..1a0138f9 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -224,20 +224,17 @@ class FloatingPointHelper : public AllStatic {
Register lhs,
Register rhs);
- // Code pattern for loading a floating point value and converting it
- // to a 32 bit integer. Input value must be either a smi or a heap number
- // object.
- // Returns operands as 32-bit sign extended integers in a general purpose
- // registers.
- static void LoadInt32Operand(MacroAssembler* masm,
- const Operand& src,
- Register dst);
-
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in rax, operand_2 in rdx; falls through on float or smi
// operands, jumps to the non_float label otherwise.
static void CheckNumberOperands(MacroAssembler* masm,
Label* non_float);
+
+ // Takes the operands in rdx and rax and loads them as integers in rax
+ // and rcx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* operand_conversion_failure);
};
@@ -654,20 +651,29 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
-void CodeGenerator::CallApplyLazy(Property* apply,
+void CodeGenerator::CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position) {
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments).
+ // If the arguments object of the scope has not been allocated,
+ // and x.apply is Function.prototype.apply, this optimization
+ // just copies y and the arguments of the current function on the
+ // stack, as receiver and arguments, and calls x.
+ // In the implementation comments, we call x the applicand
+ // and y the receiver.
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
- JumpTarget slow, done;
-
- // Load the apply function onto the stack. This will usually
+ // Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
- Reference ref(this, apply);
- ref.GetValue();
- ASSERT(ref.type() == Reference::NAMED);
+ Load(applicand);
+ Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ frame()->Push(name);
+ Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+ __ nop();
+ frame()->Push(&answer);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
@@ -677,6 +683,11 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
+ // Contents of frame at this point:
+ // Frame[0]: arguments object of the current function or the hole.
+ // Frame[1]: receiver
+ // Frame[2]: applicand.apply
+ // Frame[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
@@ -684,143 +695,149 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
- } else {
- __ Cmp(probe.reg(), Factory::the_hole_value());
- probe.Unuse();
- slow.Branch(not_equal);
- }
-
- if (try_lazy) {
- JumpTarget build_args;
-
- // Get rid of the arguments object probe.
- frame_->Drop();
-
- // Before messing with the execution stack, we sync all
- // elements. This is bound to happen anyway because we're
- // about to call a function.
- frame_->SyncRange(0, frame_->element_count() - 1);
+ { VirtualFrame::SpilledScope spilled_scope;
+ Label slow, done;
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+ probe.Unuse();
+ __ j(not_equal, &slow);
+ }
- // Check that the receiver really is a JavaScript object.
- {
- frame_->PushElementAt(0);
- Result receiver = frame_->Pop();
- receiver.ToRegister();
- Condition is_smi = masm_->CheckSmi(receiver.reg());
- build_args.Branch(is_smi);
+ if (try_lazy) {
+ Label build_args;
+ // Get rid of the arguments object probe.
+ frame_->Drop(); // Can be called on a spilled frame.
+ // Stack now has 3 elements on it.
+ // Contents of stack at this point:
+ // rsp[0]: receiver
+ // rsp[1]: applicand.apply
+ // rsp[2]: applicand.
+
+ // Check that the receiver really is a JavaScript object.
+ __ movq(rax, Operand(rsp, 0));
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
- build_args.Branch(below);
- }
-
- // Verify that we're invoking Function.prototype.apply.
- {
- frame_->PushElementAt(1);
- Result apply = frame_->Pop();
- apply.ToRegister();
- Condition is_smi = masm_->CheckSmi(apply.reg());
- build_args.Branch(is_smi);
- Result tmp = allocator_->Allocate();
- __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
- build_args.Branch(not_equal);
- __ movq(tmp.reg(),
- FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &build_args);
+
+ // Check that applicand.apply is Function.prototype.apply.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &build_args);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &build_args);
+ __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
- apply_code);
- build_args.Branch(not_equal);
- }
-
- // Get the function receiver from the stack. Check that it
- // really is a function.
- __ movq(rdi, Operand(rsp, 2 * kPointerSize));
- Condition is_smi = masm_->CheckSmi(rdi);
- build_args.Branch(is_smi);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- build_args.Branch(not_equal);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ movq(rax, Immediate(scope_->num_parameters()));
- for (int i = 0; i < scope_->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
+ __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
+ __ j(not_equal, &build_args);
+
+ // Check that applicand is a function.
+ __ movq(rdi, Operand(rsp, 2 * kPointerSize));
+ is_smi = masm_->CheckSmi(rdi);
+ __ j(is_smi, &build_args);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &build_args);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ movq(rax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
+
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger32(rax, rax);
+ __ movq(rcx, rax);
+ __ cmpq(rax, Immediate(kArgumentsLimit));
+ __ j(above, &build_args);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ // rcx is a small non-negative integer, due to the test above.
+ __ testl(rcx, rcx);
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ // Drop applicand.apply and applicand from the stack, and push
+ // the result of the function call, but leave the spilled frame
+ // unchanged, with 3 elements, so it is correct when we compile the
+ // slow-case code.
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ __ push(rax);
+ // Stack now has 1 element:
+ // rsp[0]: result
+ __ jmp(&done);
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // applicand.apply.
+ __ bind(&build_args);
+ // Stack now has 3 elements, because we have jumped from where:
+ // rsp[0]: receiver
+ // rsp[1]: applicand.apply
+ // rsp[2]: applicand.
+
+ // StoreArgumentsObject requires a correct frame, and may modify it.
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->SpillAll();
+ arguments_object.ToRegister();
+ frame_->EmitPush(arguments_object.reg());
+ arguments_object.Unuse();
+ // Stack and frame now have 4 elements.
+ __ bind(&slow);
}
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger32(rax, rax);
- __ movq(rcx, rax);
- __ cmpq(rax, Immediate(kArgumentsLimit));
- build_args.Branch(above);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- __ testl(rcx, rcx);
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Invoke the function. The virtual frame knows about the receiver
- // so make sure to forget that explicitly.
- __ bind(&invoke);
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
- frame_->Forget(1);
- Result result = allocator()->Allocate(rax);
- frame_->SetElementAt(0, &result);
- done.Jump();
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // Function.prototype.apply.
- build_args.Bind();
- Result arguments_object = StoreArgumentsObject(false);
- frame_->Push(&arguments_object);
- slow.Bind();
- }
- // Flip the apply function and the function to call on the stack, so
- // the function looks like the receiver of the apply call. This way,
- // the generic Function.prototype.apply implementation can deal with
- // the call like it usually does.
- Result a2 = frame_->Pop();
- Result a1 = frame_->Pop();
- Result ap = frame_->Pop();
- Result fn = frame_->Pop();
- frame_->Push(&ap);
- frame_->Push(&fn);
- frame_->Push(&a1);
- frame_->Push(&a2);
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- frame_->Push(&res);
-
- // All done. Restore context register after call.
- if (try_lazy) done.Bind();
+ // Generic computation of x.apply(y, args) with no special optimization.
+ // Flip applicand.apply and applicand on the stack, so
+ // applicand looks like the receiver of the applicand.apply call.
+ // Then process it as a normal function call.
+ __ movq(rax, Operand(rsp, 3 * kPointerSize));
+ __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movq(Operand(rsp, 3 * kPointerSize), rbx);
+
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+ Result res = frame_->CallStub(&call_function, 3);
+ // The function and its two arguments have been dropped.
+ frame_->Drop(1); // Drop the receiver as well.
+ res.ToRegister();
+ frame_->EmitPush(res.reg());
+ // Stack now has 1 element:
+ // rsp[0]: result
+ if (try_lazy) __ bind(&done);
+ } // End of spilled scope.
+ // Restore the context register after a call.
frame_->RestoreContextRegister();
}
@@ -1817,28 +1834,20 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (!each.is_illegal()) {
if (each.size() > 0) {
frame_->EmitPush(frame_->ElementAt(each.size()));
- }
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- if (each.size() > 0) {
- // It's safe to pop the value lying on top of the reference before
- // unloading the reference itself (which preserves the top of stack,
- // ie, now the topmost value of the non-zero sized reference), since
- // we will discard the top of stack after unloading the reference
- // anyway.
- frame_->Drop();
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(2); // Drop the original and the copy of the element.
+ } else {
+ // If the reference has size zero then we can use the value below
+ // the reference as if it were above the reference, instead of pushing
+ // a new copy of it above the reference.
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(); // Drop the original of the element.
}
}
}
// Unloading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
- // Discard the i'th entry pushed above or else the remainder of the
- // reference, whichever is currently on top of the stack.
- frame_->Drop();
-
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -2549,7 +2558,7 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target());
+ { Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -2571,12 +2580,27 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
+ if (node->ends_initialization_block()) {
+ // Add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ if (target.type() == Reference::NAMED) {
+ frame_->Dup();
+ // Dup target receiver on stack.
+ } else {
+ ASSERT(target.type() == Reference::KEYED);
+ Result temp = frame_->Pop();
+ frame_->Dup();
+ frame_->Push(&temp);
+ }
+ }
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
Load(node->value());
- } else {
+ } else { // Assignment is a compound assignment.
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
@@ -2602,6 +2626,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
+ UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -2613,13 +2638,15 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.SetValue(NOT_CONST_INIT);
}
if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
+ ASSERT(target.type() == Reference::UNLOADED);
// End of initialization block. Revert to fast case. The
- // argument to the runtime call is the receiver, which is the
- // first value pushed as part of the reference, which is below
- // the lhs value.
- frame_->PushElementAt(target.size());
+ // argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment.
+ // Swap the receiver and the value of the assignment expression.
+ Result lhs = frame_->Pop();
+ Result receiver = frame_->Pop();
+ frame_->Push(&lhs);
+ frame_->Push(&receiver);
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
@@ -2787,7 +2814,7 @@ void CodeGenerator::VisitCall(Call* node) {
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
- CallApplyLazy(property,
+ CallApplyLazy(property->obj(),
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
@@ -2819,16 +2846,24 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// Load the function to call from the property through a reference.
- Reference ref(this, property);
- ref.GetValue();
-
- // Pass receiver to called function.
if (property->is_synthetic()) {
+ Reference ref(this, property, false);
+ ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
- // The reference's size is non-negative.
- frame_->PushElementAt(ref.size());
+ Reference ref(this, property, false);
+ ASSERT(ref.size() == 2);
+ Result key = frame_->Pop();
+ frame_->Dup(); // Duplicate the receiver.
+ frame_->Push(&key);
+ ref.GetValue();
+ // Top of frame contains function to call, with duplicate copy of
+ // receiver below it. Swap them.
+ Result function = frame_->Pop();
+ Result receiver = frame_->Pop();
+ frame_->Push(&function);
+ frame_->Push(&receiver);
}
// Call the function.
@@ -3012,6 +3047,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
}
} else {
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Load(node->expression());
switch (op) {
case Token::NOT:
@@ -3021,9 +3059,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
@@ -3042,10 +3077,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Condition is_smi = masm_->CheckSmi(operand.reg());
smi_label.Branch(is_smi, &operand);
- frame_->Push(&operand); // undo popping of TOS
- Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
- CALL_FUNCTION, 1);
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ Result answer = frame_->CallStub(&stub, &operand);
continue_label.Jump(&answer);
+
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
@@ -3167,7 +3202,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
- { Reference target(this, node->expression());
+ // A constant reference is not saved to, so the reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -3622,6 +3659,22 @@ void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kBitFieldOffset));
+ __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
+ obj.Unuse();
+ destination()->Split(not_zero);
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -3926,7 +3979,8 @@ void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
Load(args->at(1));
Load(args->at(2));
- Result answer = frame_->CallRuntime(Runtime::kSubString, 3);
+ SubStringStub stub;
+ Result answer = frame_->CallStub(&stub, 3);
frame_->Push(&answer);
}
@@ -4239,14 +4293,19 @@ bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
//------------------------------------------------------------------------------
// CodeGenerator implementation of variables, lookups, and stores.
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
- : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- cgen_->UnloadReference(this);
+ ASSERT(is_unloaded() || is_illegal());
}
@@ -4296,6 +4355,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
+ ref->set_unloaded();
}
@@ -5014,31 +5074,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
return;
}
- // Set the flags based on the operation, type and loop nesting level.
- GenericBinaryFlags flags;
- switch (op) {
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- flags = (loop_nesting() > 0)
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
-
- default:
- // By default only inline the Smi check code for likely smis if this
- // operation is part of a loop.
- flags = ((loop_nesting() > 0) && type->IsLikelySmi())
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
- }
-
Result right = frame_->Pop();
Result left = frame_->Pop();
@@ -5072,7 +5107,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
- bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
if (left_is_smi && right_is_smi) {
// Compute the constant result at compile time, and leave it on the frame.
@@ -5081,34 +5115,35 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
if (FoldConstantSmis(op, left_int, right_int)) return;
}
+ Result answer;
if (left_is_non_smi || right_is_non_smi) {
- // Set flag so that we go straight to the slow case, with no smi code.
- generate_no_smi_code = true;
+ // Go straight to the slow case, with no smi code
+ frame_->Push(&left);
+ frame_->Push(&right);
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+ answer = frame_->CallStub(&stub, 2);
} else if (right_is_smi) {
- ConstantSmiBinaryOperation(op, &left, right.handle(),
- type, false, overwrite_mode);
- return;
+ answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
+ type, false, overwrite_mode);
} else if (left_is_smi) {
- ConstantSmiBinaryOperation(op, &right, left.handle(),
- type, true, overwrite_mode);
- return;
- }
-
- if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
- LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
+ type, true, overwrite_mode);
} else {
- frame_->Push(&left);
- frame_->Push(&right);
- // If we know the arguments aren't smis, use the binary operation stub
- // that does not check for the fast smi case.
- // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
- if (generate_no_smi_code) {
- flags = NO_SMI_CODE_IN_STUB;
+ // Set the flags based on the operation, type and loop nesting level.
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ // For all other operations only inline the Smi check code for likely smis
+ // if the operation is part of a loop.
+ if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ } else {
+ frame_->Push(&left);
+ frame_->Push(&right);
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+ answer = frame_->CallStub(&stub, 2);
}
- GenericBinaryOpStub stub(op, overwrite_mode, flags);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
}
+ frame_->Push(&answer);
}
@@ -5189,12 +5224,12 @@ void DeferredInlineSmiOperation::Generate() {
}
-void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> value,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> value,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
@@ -5205,20 +5240,19 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
overwrite_mode);
}
- ASSERT(!operand->is_valid());
- return;
}
// Get the literal value.
Smi* smi_value = Smi::cast(*value);
int int_value = smi_value->value();
+ Result answer;
switch (op) {
case Token::ADD: {
operand->ToRegister();
@@ -5239,15 +5273,15 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
deferred->entry_label());
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
case Token::SUB: {
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
@@ -5261,7 +5295,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
deferred->entry_label());
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
}
break;
}
@@ -5269,8 +5303,8 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::SAR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -5288,21 +5322,21 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
shift_value);
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
}
break;
case Token::SHR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
operand->ToRegister();
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -5317,15 +5351,14 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->entry_label());
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
}
break;
case Token::SHL:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -5342,10 +5375,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
overwrite_mode);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
} else {
// Use a fresh temporary for nonzero shift values.
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -5360,7 +5393,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->entry_label());
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
}
}
break;
@@ -5395,7 +5427,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
@@ -5423,7 +5455,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Smi::FromInt(int_value - 1));
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break; // This break only applies if we generated code for MOD.
}
// Fall through if we did not find a power of 2 on the right hand side!
@@ -5432,22 +5464,24 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
default: {
Result constant_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &constant_operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
}
break;
}
}
- ASSERT(!operand->is_valid());
+ ASSERT(answer.is_valid());
+ return answer;
}
-void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ Result answer;
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
// We need rax as the quotient register, rdx as the remainder
@@ -5529,16 +5563,17 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&quotient);
+ answer = quotient;
} else {
ASSERT(op == Token::MOD);
__ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&remainder);
+ answer = remainder;
}
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Special handling of shift operations because they use fixed
@@ -5559,7 +5594,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
frame_->Spill(rcx);
// Use a fresh answer register to avoid spilling the left operand.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a
// temporary.
@@ -5598,8 +5633,8 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Handle the other binary operations.
@@ -5608,7 +5643,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't
// need to be spilled in the fast case.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Perform the smi tag check.
@@ -5662,7 +5697,122 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
+ ASSERT(answer.is_valid());
+ return answer;
+}
+
+
+Result CodeGenerator::EmitKeyedLoad(bool is_global) {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+ if (loop_nesting() > 0) {
+ Comment cmnt(masm_, "[ Inlined load from keyed Property");
+
+ Result key = frame_->Pop();
+ Result receiver = frame_->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
+ if (!is_global) {
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+ }
+
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching. Do not use
+ // root array to load null_value, since it must be patched with
+ // the expected receiver map.
+ masm_->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is a non-negative smi.
+ __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ movq(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ deferred->Branch(not_equal);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ __ SmiToInteger32(index.reg(), key.reg());
+ __ cmpl(index.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // The index register holds the un-smi-tagged key. It has been
+ // zero-extended to 64-bits, so it can be used directly as index in the
+ // operand below.
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is rax, the we can reuse that one because the value
+ // coming from the deferred code will be in rax.
+ Result value = index;
+ __ movq(value.reg(),
+ Operand(elements.reg(),
+ index.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ frame_->Push(&receiver);
+ frame_->Push(&key);
+ return value;
+
+ } else {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = frame_->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ return answer;
+ }
}
@@ -5795,119 +5945,18 @@ void Reference::GetValue() {
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (cgen_->loop_nesting() > 0) {
- Comment cmnt(masm, "[ Inlined load from keyed Property");
-
- Result key = cgen_->frame()->Pop();
- Result receiver = cgen_->frame()->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = cgen_->allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- // Use a fresh temporary for the index and later the loaded
- // value.
- Result index = cgen_->allocator()->Allocate();
- ASSERT(index.is_valid());
-
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(index.reg(),
- receiver.reg(),
- key.reg(),
- is_global);
-
- // Check that the receiver is not a smi (only needed if this
- // is not a load from the global context) and that it has the
- // expected map.
- if (!is_global) {
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
- }
-
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching.
- masm->movq(kScratchRegister, Factory::null_value(),
- RelocInfo::EMBEDDED_OBJECT);
- masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- // Check that the key is a non-negative smi.
- __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
-
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ movq(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Factory::fixed_array_map());
- deferred->Branch(not_equal);
-
- // Shift the key to get the actual index value and check that
- // it is within bounds.
- __ SmiToInteger32(index.reg(), key.reg());
- __ cmpl(index.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // The index register holds the un-smi-tagged key. It has been
- // zero-extended to 64-bits, so it can be used directly as index in the
- // operand below.
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is rax, the we can reuse that one because the value
- // coming from the deferred code will be in rax.
- Result value = index;
- __ movq(value.reg(),
- Operand(elements.reg(),
- index.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- elements.Unuse();
- index.Unuse();
- __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- // Restore the receiver and key to the frame and push the
- // result on top of it.
- cgen_->frame()->Push(&receiver);
- cgen_->frame()->Push(&key);
- cgen_->frame()->Push(&value);
-
- } else {
- Comment cmnt(masm, "[ Load from keyed Property");
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- cgen_->frame()->Push(&answer);
- }
+ Result value = cgen_->EmitKeyedLoad(is_global);
+ cgen_->frame()->Push(&value);
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ cgen_->UnloadReference(this);
+ }
}
@@ -5944,6 +5993,9 @@ void Reference::TakeValue() {
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
+
+ ASSERT(persist_after_get_);
+ // Do not unload the reference, because it is used in SetValue.
}
@@ -6072,6 +6124,7 @@ void Reference::SetValue(InitState init_state) {
default:
UNREACHABLE();
}
+ cgen_->UnloadReference(this);
}
@@ -6213,19 +6266,17 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- // TODO(X64): This method is identical to the ia32 version.
- // Either find a reason to change it, or move it somewhere where it can be
- // shared. (Notice: It assumes that a Smi can fit in an int).
-
Object* answer_object = Heap::undefined_value();
switch (op) {
case Token::ADD:
- if (Smi::IsValid(left + right)) {
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
answer_object = Smi::FromInt(left + right);
}
break;
case Token::SUB:
- if (Smi::IsValid(left - right)) {
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
answer_object = Smi::FromInt(left - right);
}
break;
@@ -6299,56 +6350,216 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
// End of CodeGenerator implementation.
+// Get the integer part of a heap number. Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the
+// trashed registers.
+void IntegerConvert(MacroAssembler* masm,
+ Register source,
+ bool use_sse3,
+ Label* conversion_failure) {
+ ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
+ Label done, right_exponent, normal_exponent;
+ Register scratch = rbx;
+ Register scratch2 = rdi;
+ // Get exponent word.
+ __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ movl(scratch2, scratch);
+ __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
+ if (use_sse3) {
+ CpuFeatures::Scope scope(SSE3);
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmpl(scratch2, Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ // Load x87 register with heap number.
+ __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+ // Reserve space for 64 bit answer.
+ __ subq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(rsp, 0));
+ __ movl(rcx, Operand(rsp, 0)); // Load low word of answer into rcx.
+ __ addq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
+ } else {
+ // Load rcx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(rcx, rcx);
+ // Check whether the exponent matches a 32 bit signed int that cannot be
+ // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
+ // exponent is 30 (biased). This is the exponent that we are fastest at and
+ // also the highest exponent we can handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmpl(scratch2, Immediate(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ __ j(equal, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ j(less, &normal_exponent);
+
+ {
+ // Handle a big exponent. The only reason we have this code is that the
+ // >>> operator has a tendency to generate numbers with an exponent of 31.
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmpl(scratch2, Immediate(big_non_smi_exponent));
+ __ j(not_equal, conversion_failure);
+ // We have the big exponent, typically from >>>. This means the number is
+ // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
+ __ movl(scratch2, scratch);
+ __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to use the full unsigned range so we subtract 1 bit from the
+ // shift distance.
+ const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch2, Immediate(big_shift_distance));
+ // Get the second half of the double.
+ __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 21 bits to get the most significant 11 bits or the low
+ // mantissa word.
+ __ shr(rcx, Immediate(32 - big_shift_distance));
+ __ or_(rcx, scratch2);
+ // We have the answer in rcx, but we may need to negate it.
+ __ testl(scratch, scratch);
+ __ j(positive, &done);
+ __ neg(rcx);
+ __ jmp(&done);
+ }
+
+ __ bind(&normal_exponent);
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in rcx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ subl(scratch2, Immediate(zero_exponent));
+ // rcx already has a Smi zero.
+ __ j(less, &done);
+
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
+ __ movl(rcx, Immediate(30));
+ __ subl(rcx, scratch2);
+
+ __ bind(&right_exponent);
+ // Here rcx is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ shl(scratch, Immediate(shift_distance));
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch2, Immediate(32 - shift_distance));
+ __ or_(scratch2, scratch);
+ // Move down according to the exponent.
+ __ shr_cl(scratch2);
+ // Now the unsigned answer is in scratch2. We need to move it to rcx and
+ // we may need to fix the sign.
+ Label negative;
+ __ xor_(rcx, rcx);
+ __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
+ __ j(greater, &negative);
+ __ movl(rcx, scratch2);
+ __ jmp(&done);
+ __ bind(&negative);
+ __ subl(rcx, scratch2);
+ __ bind(&done);
+ }
+}
+
+
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- ASSERT(op_ == Token::SUB);
+ Label slow, done;
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ JumpIfNotSmi(rax, &try_float);
+
+ // Enter runtime system if the value of the smi is zero
+ // to make sure that we switch between 0 and -0.
+ // Also enter it if the value of the smi is Smi::kMinValue.
+ __ SmiNeg(rax, rax, &done);
+
+ // Either zero or Smi::kMinValue, neither of which become a smi when
+ // negated.
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(not_equal, &slow);
+ __ Move(rax, Factory::minus_zero_value());
+ __ jmp(&done);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
+ // Operand is a float, negate its value by flipping sign bit.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(kScratchRegister, Immediate(0x01));
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ // rdx is value to store.
+ if (overwrite_) {
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+ } else {
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // rcx: allocated 'empty' number
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
- Label slow;
- Label done;
- Label try_float;
- // Check whether the value is a smi.
- __ JumpIfNotSmi(rax, &try_float);
+ // Convert the heap number in rax to an untagged integer in rcx.
+ IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
- // Enter runtime system if the value of the smi is zero
- // to make sure that we switch between 0 and -0.
- // Also enter it if the value of the smi is Smi::kMinValue.
- __ SmiNeg(rax, rax, &done);
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ not_(rcx);
+ // Tag the result as a smi and we're done.
+ ASSERT(kSmiTagSize == 1);
+ __ Integer32ToSmi(rax, rcx);
+ }
- // Either zero or Smi::kMinValue, neither of which become a smi when negated.
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(not_equal, &slow);
- __ Move(rax, Factory::minus_zero_value());
- __ jmp(&done);
+ // Return from the stub.
+ __ bind(&done);
+ __ StubReturn(1);
- // Enter runtime system.
+ // Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ pop(rcx); // pop return address
__ push(rax);
__ push(rcx); // push return address
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- __ jmp(&done);
-
- // Try floating point case.
- __ bind(&try_float);
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ Cmp(rdx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
}
-
- __ bind(&done);
- __ StubReturn(1);
}
@@ -7297,15 +7508,6 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
}
-void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
- const Operand& src,
- Register dst) {
- // TODO(X64): Convert number operands to int32 values.
- // Don't convert a Smi to a double first.
- UNIMPLEMENTED();
-}
-
-
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
Label load_smi_1, load_smi_2, done_load_1, done;
__ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
@@ -7335,6 +7537,61 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
}
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ __ JumpIfNotSmi(rdx, &arg1_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rdx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the edx heap number in rcx.
+ IntegerConvert(masm, rdx, use_sse3, conversion_failure);
+ __ movl(rdx, rcx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ JumpIfNotSmi(rax, &arg2_is_object);
+ __ SmiToInteger32(rax, rax);
+ __ movl(rcx, rax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rcx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, rax, use_sse3, conversion_failure);
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
+
+
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs) {
@@ -7575,7 +7832,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::SHL:
case Token::SHR:
case Token::SAR:
- // Move the second operand into register ecx.
+ // Move the second operand into register rcx.
__ movq(rcx, rbx);
// Perform the operation.
switch (op_) {
@@ -7671,44 +7928,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
- // TODO(X64): Don't convert a Smi to float and then back to int32
- // afterwards.
- FloatingPointHelper::LoadFloatOperands(masm);
-
- Label skip_allocation, non_smi_result, operand_conversion_failure;
-
- // Reserve space for converted numbers.
- __ subq(rsp, Immediate(2 * kPointerSize));
-
- if (use_sse3_) {
- // Truncate the operands to 32-bit integers and check for
- // exceptions in doing so.
- CpuFeatures::Scope scope(SSE3);
- __ fisttp_s(Operand(rsp, 0 * kPointerSize));
- __ fisttp_s(Operand(rsp, 1 * kPointerSize));
- __ fnstsw_ax();
- __ testl(rax, Immediate(1));
- __ j(not_zero, &operand_conversion_failure);
- } else {
- // Check if right operand is int32.
- __ fist_s(Operand(rsp, 0 * kPointerSize));
- __ fild_s(Operand(rsp, 0 * kPointerSize));
- __ FCmp();
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
-
- // Check if left operand is int32.
- __ fist_s(Operand(rsp, 1 * kPointerSize));
- __ fild_s(Operand(rsp, 1 * kPointerSize));
- __ FCmp();
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
- }
-
- // Get int32 operands and perform bitop.
- __ pop(rcx);
- __ pop(rax);
+ Label skip_allocation, non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
@@ -7756,22 +7977,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
GenerateReturn(masm);
}
- // Clear the FPU exception flag and reset the stack before calling
- // the runtime system.
- __ bind(&operand_conversion_failure);
- __ addq(rsp, Immediate(2 * kPointerSize));
- if (use_sse3_) {
- // If we've used the SSE3 instructions for truncating the
- // floating point values to integers and it failed, we have a
- // pending #IA exception. Clear it.
- __ fnclex();
- } else {
- // The non-SSE3 variant does early bailout if the right
- // operand isn't a 32-bit integer, so we may have a single
- // value on the FPU stack we need to get rid of.
- __ ffree(0);
- }
-
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
@@ -7991,8 +8196,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Both strings are non-empty.
// rax: first string
// rbx: length of first string
- // ecx: length of second string
- // edx: second string
+ // rcx: length of second string
+ // rdx: second string
// r8: instance type of first string if string check was performed above
// r9: instance type of first string if string check was performed above
Label string_add_flat_result;
@@ -8148,11 +8353,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
-void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
+void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
@@ -8173,6 +8378,174 @@ void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
}
+void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords. Align destination on 4 byte
+ // boundary before starting rep movs. Copy remaining characters after running
+ // rep movs.
+ ASSERT(dest.is(rdi)); // rep movs destination
+ ASSERT(src.is(rsi)); // rep movs source
+ ASSERT(count.is(rcx)); // rep movs count
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ testq(count, count);
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ ASSERT_EQ(2, sizeof(uc16)); // NOLINT
+ __ addq(count, count);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ testq(count, Immediate(~7));
+ __ j(zero, &last_bytes);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ movq(kScratchRegister, count);
+ __ sar(count, Immediate(3)); // Number of doublewords to copy.
+ __ repmovsq();
+
+ // Find number of bytes left.
+ __ movq(count, kScratchRegister);
+ __ and_(count, Immediate(7));
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ testq(count, count);
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ movb(kScratchRegister, Operand(src, 0));
+ __ movb(Operand(dest, 0), kScratchRegister);
+ __ addq(src, Immediate(1));
+ __ addq(dest, Immediate(1));
+ __ subq(count, Immediate(1));
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // rsp[0]: return address
+ // rsp[8]: to
+ // rsp[16]: from
+ // rsp[24]: string
+
+ const int kToOffset = 1 * kPointerSize;
+ const int kFromOffset = kToOffset + kPointerSize;
+ const int kStringOffset = kFromOffset + kPointerSize;
+ const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
+
+ // Make sure first argument is a string.
+ __ movq(rax, Operand(rsp, kStringOffset));
+ ASSERT_EQ(0, kSmiTag);
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // rax: string
+ // rbx: instance type
+ // Calculate length of sub string using the smi values.
+ __ movq(rcx, Operand(rsp, kToOffset));
+ __ movq(rdx, Operand(rsp, kFromOffset));
+ __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
+
+ __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
+ __ j(negative, &runtime);
+ // Handle sub-strings of length 2 and less in the runtime system.
+ __ SmiToInteger32(rcx, rcx);
+ __ cmpl(rcx, Immediate(2));
+ __ j(below_equal, &runtime);
+
+ // rax: string
+ // rbx: instance type
+ // rcx: result string length
+ // Check for flat ascii string
+ Label non_ascii_flat;
+ __ and_(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ cmpb(rbx, Immediate(kSeqStringTag | kAsciiStringTag));
+ __ j(not_equal, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+ // rax: result string
+ // rcx: result string length
+ __ movq(rdx, rsi); // esi used by following code.
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ // Load string argument and locate character of sub string start.
+ __ movq(rsi, Operand(rsp, kStringOffset));
+ __ movq(rbx, Operand(rsp, kFromOffset));
+ {
+ SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
+ __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ }
+
+ // rax: result string
+ // rcx: result length
+ // rdx: original value of rsi
+ // rdi: first character of result
+ // rsi: character of sub string start
+ GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
+ __ movq(rsi, rdx); // Restore rsi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(kArgumentsSize);
+
+ __ bind(&non_ascii_flat);
+ // rax: string
+ // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
+ // rcx: result string length
+ // Check for sequential two byte string
+ __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
+ __ j(not_equal, &runtime);
+
+ // Allocate the result.
+ __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+ // rax: result string
+ // rcx: result string length
+ __ movq(rdx, rsi); // esi used by following code.
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ // Load string argument and locate character of sub string start.
+ __ movq(rsi, Operand(rsp, kStringOffset));
+ __ movq(rbx, Operand(rsp, kFromOffset));
+ {
+ SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
+ __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ }
+
+ // rax: result string
+ // rcx: result length
+ // rdx: original value of rsi
+ // rdi: first character of result
+ // rsi: character of sub string start
+ GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
+ __ movq(rsi, rdx); // Restore esi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(kArgumentsSize);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+}
+
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
@@ -8241,7 +8614,6 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Result is EQUAL.
__ Move(rax, Smi::FromInt(EQUAL));
- __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
Label result_greater;
@@ -8251,13 +8623,11 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Result is LESS.
__ Move(rax, Smi::FromInt(LESS));
- __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
// Result is GREATER.
__ bind(&result_greater);
__ Move(rax, Smi::FromInt(GREATER));
- __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
}
@@ -8287,6 +8657,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
// Inline comparison of ascii strings.
+ __ IncrementCounter(&Counters::string_compare_native, 1);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 50bb0231..72c84162 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,57 +43,70 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame. The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen, Expression* expression);
+ enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+
+ Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
+ ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
+ void set_unloaded() {
+ ASSERT_NE(ILLEGAL, type_);
+ ASSERT_NE(UNLOADED, type_);
+ type_ = UNLOADED;
+ }
// The size the reference takes up on the stack.
- int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+ int size() const {
+ return (type_ < SLOT) ? 0 : type_;
+ }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+ bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is left in place with its value above it.
+ // the expression stack, and it is consumed by the call unless the
+ // reference is for a compound assignment.
+ // If the reference is not consumed, it is left in place under its value.
void GetValue();
// Like GetValue except that the slot is expected to be written to before
- // being read from again. Thae value of the reference may be invalidated,
+ // being read from again. The value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The stored value is left in place (with the
- // reference intact below it) to support chained assignments.
+ // on the expression stack. The value is stored in the location specified
+ // by the reference, and is left on top of the stack, after the reference
+ // is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
+ bool persist_after_get_;
};
@@ -422,6 +435,11 @@ class CodeGenerator: public AstVisitor {
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
+ // Load a property of an object, returning it in a Result.
+ // The object and the property name are passed on the stack, and
+ // not changed.
+ Result EmitKeyedLoad(bool is_global);
+
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
@@ -446,20 +464,20 @@ class CodeGenerator: public AstVisitor {
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
- void ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> constant_operand,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode);
+ Result ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> constant_operand,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
- void LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
+ Result LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode);
void Comparison(Condition cc,
bool strict,
@@ -478,10 +496,10 @@ class CodeGenerator: public AstVisitor {
CallFunctionFlags flags,
int position);
- // Use an optimized version of Function.prototype.apply that avoid
- // allocating the arguments object and just copies the arguments
- // from the stack.
- void CallApplyLazy(Property* apply,
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments). We call x the applicand and y the receiver.
+ // The optimization avoids allocating an arguments object if possible.
+ void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
@@ -514,6 +532,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
+ void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -610,8 +629,8 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget;
friend class Reference;
friend class Result;
- friend class FastCodeGenerator;
- friend class CodeGenSelector;
+ friend class FullCodeGenerator;
+ friend class FullCodeGenSyntaxChecker;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@@ -712,6 +731,29 @@ class GenericBinaryOpStub: public CodeStub {
};
+class StringStubBase: public CodeStub {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersREP adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii);
+
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
+ // not supported.
+ void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be rdi.
+ Register src, // Must be rsi.
+ Register count, // Must be rcx.
+ bool ascii);
+};
+
+
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
@@ -719,7 +761,7 @@ enum StringAddFlags {
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public StringStubBase {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -731,17 +773,23 @@ class StringAddStub: public CodeStub {
void Generate(MacroAssembler* masm);
- void GenerateCopyCharacters(MacroAssembler* masm,
- Register desc,
- Register src,
- Register count,
- bool ascii);
-
// Should the stub check whether arguments are strings?
bool string_check_;
};
+class SubStringStub: public StringStubBase {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
class StringCompareStub: public CodeStub {
public:
explicit StringCompareStub() {}
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 0b43e766..ce3aae8a 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -114,6 +114,10 @@ static ByteMnemonic zero_operands_instr[] = {
{ 0x9E, UNSET_OP_ORDER, "sahf" },
{ 0x99, UNSET_OP_ORDER, "cdq" },
{ 0x9B, UNSET_OP_ORDER, "fwait" },
+ { 0xA4, UNSET_OP_ORDER, "movs" },
+ { 0xA5, UNSET_OP_ORDER, "movs" },
+ { 0xA6, UNSET_OP_ORDER, "cmps" },
+ { 0xA7, UNSET_OP_ORDER, "cmps" },
{ -1, UNSET_OP_ORDER, "" }
};
@@ -157,6 +161,16 @@ enum InstructionType {
};
+enum Prefixes {
+ ESCAPE_PREFIX = 0x0F,
+ OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
+ ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
+ REPNE_PREFIX = 0xF2,
+ REP_PREFIX = 0xF3,
+ REPEQ_PREFIX = REP_PREFIX
+};
+
+
struct InstructionDesc {
const char* mnem;
InstructionType type;
@@ -1128,12 +1142,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
// Scan for prefixes.
while (true) {
current = *data;
- if (current == 0x66) { // Group 3 prefix.
+ if (current == OPERAND_SIZE_OVERRIDE_PREFIX) { // Group 3 prefix.
operand_size_ = current;
} else if ((current & 0xF0) == 0x40) { // REX prefix.
setRex(current);
if (rex_w()) AppendToBuffer("REX.W ");
- } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix.
+ } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix (0xF2 or 0xF3).
group_1_prefix_ = current;
} else { // Not a prefix - an opcode.
break;
@@ -1145,7 +1159,17 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
byte_size_operand_ = idesc.byte_size_operation;
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
- AppendToBuffer(idesc.mnem);
+ if (current >= 0xA4 && current <= 0xA7) {
+ // String move or compare operations.
+ if (group_1_prefix_ == REP_PREFIX) {
+ // REP.
+ AppendToBuffer("rep ");
+ }
+ if (rex_w()) AppendToBuffer("REX.W ");
+ AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
+ } else {
+ AppendToBuffer("%s", idesc.mnem, operand_size_code());
+ }
data++;
break;
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 0f284332..37551092 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -30,7 +30,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
#include "parser.h"
namespace v8 {
@@ -51,7 +51,7 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-x64.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
SetFunctionPosition(fun);
@@ -161,7 +161,7 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
}
-void FastCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence(int position) {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -200,7 +200,7 @@ void FastCodeGenerator::EmitReturnSequence(int position) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -243,7 +243,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -285,7 +285,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -324,7 +324,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
}
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -364,7 +364,7 @@ void FastCodeGenerator::ApplyTOS(Expression::Context context) {
}
-void FastCodeGenerator::DropAndApply(int count,
+void FullCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
ASSERT(count > 0);
@@ -415,7 +415,7 @@ void FastCodeGenerator::DropAndApply(int count,
}
-void FastCodeGenerator::Apply(Expression::Context context,
+void FullCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
switch (context) {
@@ -480,7 +480,7 @@ void FastCodeGenerator::Apply(Expression::Context context,
}
-void FastCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::DoTest(Expression::Context context) {
// The value to test is in the accumulator. If the value might be needed
// on the stack (value/test and test/value contexts with a stack location
// desired), then the value is already duplicated on the stack.
@@ -614,7 +614,7 @@ void FastCodeGenerator::DoTest(Expression::Context context) {
}
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -633,13 +633,13 @@ MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
}
-void FastCodeGenerator::Move(Register destination, Slot* source) {
+void FullCodeGenerator::Move(Register destination, Slot* source) {
MemOperand location = EmitSlotSearch(source, destination);
__ movq(destination, location);
}
-void FastCodeGenerator::Move(Slot* dst,
+void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
@@ -655,7 +655,7 @@ void FastCodeGenerator::Move(Slot* dst,
}
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
@@ -754,7 +754,7 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
}
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(rsi); // The context is the first argument.
__ Push(pairs);
@@ -764,7 +764,7 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -782,17 +782,21 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), context_);
}
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
- Expression* rewrite = var->rewrite();
- if (rewrite == NULL) {
- ASSERT(var->is_global());
+ // Four cases: non-this global variables, lookup slots, all other
+ // types of slots, and parameters that rewrite to explicit property
+ // accesses on the arguments object.
+ Slot* slot = var->slot();
+ Property* property = var->AsProperty();
+
+ if (var->is_global() && !var->is_this()) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
@@ -805,34 +809,24 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// is no test rax instruction here.
__ nop();
DropAndApply(1, context, rax);
- } else if (rewrite->AsSlot() != NULL) {
- Slot* slot = rewrite->AsSlot();
- if (FLAG_debug_code) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL: {
- Comment cmnt(masm_, "Stack slot");
- break;
- }
- case Slot::CONTEXT: {
- Comment cmnt(masm_, "Context slot");
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- break;
- }
- }
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ Comment cmnt(masm_, "Lookup slot");
+ __ push(rsi); // Context.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ Apply(context, rax);
+
+ } else if (slot != NULL) {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
Apply(context, slot);
+
} else {
- Comment cmnt(masm_, "Variable rewritten to property");
- // A variable has been rewritten into an explicit access to an object
- // property.
- Property* property = rewrite->AsProperty();
+ Comment cmnt(masm_, "Rewritten parameter");
ASSERT_NOT_NULL(property);
-
- // The only property expressions that can occur are of the form
- // "slot[literal]".
+ // Rewritten parameter accesses are of the form "slot[literal]".
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -864,7 +858,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
}
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
@@ -890,7 +884,7 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -960,7 +954,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
@@ -1010,7 +1004,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
@@ -1020,7 +1014,7 @@ void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1028,7 +1022,7 @@ void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
-void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
__ push(result_register());
GenericBinaryOpStub stub(op,
@@ -1039,11 +1033,16 @@ void FastCodeGenerator::EmitBinaryOp(Token::Value op,
}
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
+ // Three main cases: non-this global variables, lookup slots, and
+ // all other types of slots. Left-hand-side parameters that rewrite
+ // to explicit property accesses do not reach here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
+ Slot* slot = var->slot();
if (var->is_global()) {
+ ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in rax, variable name in
// rcx, and the global object on the stack.
@@ -1054,8 +1053,14 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
// Overwrite the global object on the stack with the result if needed.
DropAndApply(1, context, rax);
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ __ push(result_register()); // Value.
+ __ push(rsi); // Context.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ Apply(context, rax);
+
} else if (var->slot() != NULL) {
- Slot* slot = var->slot();
switch (slot->type()) {
case Slot::LOCAL:
case Slot::PARAMETER:
@@ -1078,6 +1083,7 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
break;
}
Apply(context, result_register());
+
} else {
// Variables rewritten as properties are not treated as variables in
// assignments.
@@ -1086,7 +1092,7 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
}
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
@@ -1121,7 +1127,7 @@ void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// If the assignment starts a block of assignments to the same object,
@@ -1157,7 +1163,7 @@ void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
-void FastCodeGenerator::VisitProperty(Property* expr) {
+void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
@@ -1177,7 +1183,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
@@ -1200,7 +1206,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
}
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1218,7 +1224,7 @@ void FastCodeGenerator::EmitCallWithStub(Call* expr) {
}
-void FastCodeGenerator::VisitCall(Call* expr) {
+void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1280,7 +1286,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
if (lit != NULL &&
lit->name()->Equals(Heap::empty_string()) &&
loop_depth() == 0) {
- lit->set_try_fast_codegen(true);
+ lit->set_try_full_codegen(true);
}
VisitForValue(fun, kStack);
// Load global receiver object.
@@ -1292,7 +1298,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
}
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -1327,7 +1333,7 @@ void FastCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
@@ -1360,7 +1366,7 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1464,13 +1470,27 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForValue(expr->expression(), kAccumulator);
+ Label no_conversion;
+ Condition is_smi;
+ is_smi = masm_->CheckSmi(result_register());
+ __ j(is_smi, &no_conversion);
+ __ push(result_register());
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
+ Apply(context_, result_register());
+ break;
+ }
+
default:
UNREACHABLE();
}
}
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
// Expression can only be a property, a global or a (parameter or local)
@@ -1489,7 +1509,7 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
Expression::kValue);
location_ = saved_location;
@@ -1505,11 +1525,16 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForValue(prop->key(), kStack);
EmitKeyedPropertyLoad(prop);
}
- __ push(rax);
}
- // Convert to number.
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ Condition is_smi;
+ is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &no_conversion);
+ __ push(rax);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -1541,6 +1566,27 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ if (loop_depth() > 0) {
+ if (expr->op() == Token::INC) {
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ } else {
+ __ SmiSubConstant(rax, rax, Smi::FromInt(1));
+ }
+ __ j(overflow, &stub_call);
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &done);
+ __ bind(&stub_call);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ SmiSubConstant(rax, rax, Smi::FromInt(1));
+ } else {
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ }
+ }
// Call stub for +1/-1.
__ push(rax);
__ Push(Smi::FromInt(1));
@@ -1548,6 +1594,7 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
+ __ bind(&done);
// Store the value returned in rax.
switch (assign_type) {
@@ -1601,7 +1648,7 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
@@ -1636,7 +1683,7 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
}
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
// Always perform the comparison for its control flow. Pack the result
@@ -1748,25 +1795,25 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, rax);
}
-Register FastCodeGenerator::result_register() { return rax; }
+Register FullCodeGenerator::result_register() { return rax; }
-Register FastCodeGenerator::context_register() { return rsi; }
+Register FullCodeGenerator::context_register() { return rsi; }
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
__ movq(Operand(rbp, frame_offset), value);
}
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
__ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
}
@@ -1775,7 +1822,7 @@ void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
// Non-local control flow support.
-void FastCodeGenerator::EnterFinallyBlock() {
+void FullCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Cook return address on top of stack (smi encoded Code* delta)
@@ -1789,7 +1836,7 @@ void FastCodeGenerator::EnterFinallyBlock() {
}
-void FastCodeGenerator::ExitFinallyBlock() {
+void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore result register from stack.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 457ece58..e293247d 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -271,11 +271,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
__ j(below, &slow);
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks. The map is already in rdx.
+
+ // Check bit field.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
+ Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow);
// Check that the key is a smi.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 65a408b4..b06b8c8a 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -581,6 +581,20 @@ Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
}
+Condition MacroAssembler::CheckBothPositiveSmi(Register first,
+ Register second) {
+ if (first.is(second)) {
+ return CheckPositiveSmi(first);
+ }
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ rol(kScratchRegister, Immediate(1));
+ testl(kScratchRegister, Immediate(0x03));
+ return zero;
+}
+
+
+
Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
@@ -660,7 +674,17 @@ void MacroAssembler::SmiSub(Register dst,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
+ if (on_not_smi_result == NULL) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible (e.g., subtracting two positive smis).
+ if (dst.is(src1)) {
+ subq(dst, src2);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ }
+ Assert(no_overflow, "Smi substraction onverflow");
+ } else if (dst.is(src1)) {
subq(dst, src2);
Label smi_result;
j(no_overflow, &smi_result);
@@ -1292,6 +1316,14 @@ void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
}
+void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
+ Label* on_not_both_smi) {
+ Condition both_smi = CheckBothPositiveSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
Register scratch1,
@@ -1311,8 +1343,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringBits =
- kNotStringTag | kSeqStringTag | kAsciiStringTag;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
@@ -1320,7 +1351,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
- Immediate(kFlatAsciiStringBits + (kFlatAsciiStringBits << 3)));
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail);
}
@@ -1518,6 +1549,17 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+Condition MacroAssembler::IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzxbq(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ testb(instance_type, Immediate(kIsNotStringMask));
+ return zero;
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Label* miss) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index ce2848c0..8d4a8f2e 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -207,6 +207,9 @@ class MacroAssembler: public Assembler {
// Are both values tagged smis.
Condition CheckBothSmi(Register first, Register second);
+ // Are both values tagged smis.
+ Condition CheckBothPositiveSmi(Register first, Register second);
+
// Are either value a tagged smi.
Condition CheckEitherSmi(Register first, Register second);
@@ -248,6 +251,10 @@ class MacroAssembler: public Assembler {
// Jump if either or both register are not smi values.
void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
+ // Jump if either or both register are not positive smi values.
+ void JumpIfNotBothPositiveSmi(Register src1, Register src2,
+ Label* on_not_both_smi);
+
// Operations on tagged smi values.
// Smis represent a subset of integers. The subset is always equivalent to
@@ -452,6 +459,15 @@ class MacroAssembler: public Assembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if the object in register heap_object is a string. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type);
+
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 75bbf3e2..6142ce3c 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -71,8 +71,6 @@ namespace internal {
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, we are starting at the start of the
- * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
@@ -82,6 +80,8 @@ namespace internal {
* - backup of callee save registers (rbx, possibly rsi and rdi).
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
+ * - At start of string (if 1, we are starting at the start of the
+ * string, otherwise 0)
* - register 0 rbp[-n] (Only positions must be stored in the first
* - register 1 rbp[-n-8] num_saved_registers_ registers)
* - ...
@@ -661,7 +661,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ASSERT_EQ(kInputStart, -3 * kPointerSize);
ASSERT_EQ(kInputEnd, -4 * kPointerSize);
ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kAtStart, -6 * kPointerSize);
+ ASSERT_EQ(kStackHighEnd, -6 * kPointerSize);
__ push(rdi);
__ push(rsi);
__ push(rdx);
@@ -672,6 +672,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ push(rbx); // Callee-save
#endif
__ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -716,6 +717,15 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Store this value in a local variable, for use when clearing
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ movq(rbx, Operand(rbp, kStartIndex));
+ __ xor_(rcx, rcx); // setcc only operates on cl (lower byte of rcx).
+ __ testq(rbx, rbx);
+ __ setcc(zero, rcx); // 1 if 0 (start of string), 0 if positive.
+ __ movq(Operand(rbp, kAtStart), rcx);
+
if (num_saved_registers_ > 0) {
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 694cba00..c17f2b87 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -138,9 +138,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- // AtStart is passed as 32 bit int (values 0 or 1).
- static const int kAtStart = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
static const int kDirectCall = kStackHighEnd + kPointerSize;
#else
@@ -152,9 +150,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex - kPointerSize;
static const int kInputEnd = kInputStart - kPointerSize;
static const int kRegisterOutput = kInputEnd - kPointerSize;
- static const int kAtStart = kRegisterOutput - kPointerSize;
- static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput - kPointerSize;
+ static const int kDirectCall = kFrameAlign;
#endif
#ifdef _WIN64
@@ -168,7 +165,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
- static const int kBackup_rbx = kAtStart - kPointerSize;
+ static const int kBackup_rbx = kStackHighEnd - kPointerSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
@@ -176,9 +173,10 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// the frame in GetCode.
static const int kInputStartMinusOne =
kLastCalleeSaveRegister - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index 015ba131..a0fc3cbf 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -54,8 +54,8 @@ class SimulatorStack : public v8::internal::AllStatic {
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)