aboutsummaryrefslogtreecommitdiff
path: root/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'src/x64')
-rw-r--r--src/x64/assembler-x64.cc54
-rw-r--r--src/x64/assembler-x64.h15
-rw-r--r--src/x64/codegen-x64.cc113
-rw-r--r--src/x64/codegen-x64.h2
-rw-r--r--src/x64/full-codegen-x64.cc174
-rw-r--r--src/x64/ic-x64.cc1
-rw-r--r--src/x64/stub-cache-x64.cc10
7 files changed, 141 insertions, 228 deletions
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index bf5ee5bb..caed7c8a 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -296,7 +296,7 @@ static void InitCoverageLog();
byte* Assembler::spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size)
- : code_targets_(100) {
+ : code_targets_(100), positions_recorder_(this) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -337,10 +337,7 @@ Assembler::Assembler(void* buffer, int buffer_size)
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
last_pc_ = NULL;
- current_statement_position_ = RelocInfo::kNoPosition;
- current_position_ = RelocInfo::kNoPosition;
- written_statement_position_ = current_statement_position_;
- written_position_ = current_position_;
+
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
#endif
@@ -845,7 +842,7 @@ void Assembler::call(Label* L) {
void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
- WriteRecordedPositions();
+ positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// 1110 1000 #32-bit disp.
@@ -2935,14 +2932,14 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
void Assembler::RecordJSReturn() {
- WriteRecordedPositions();
+ positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::JS_RETURN);
}
void Assembler::RecordDebugBreakSlot() {
- WriteRecordedPositions();
+ positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
}
@@ -2956,47 +2953,6 @@ void Assembler::RecordComment(const char* msg) {
}
-void Assembler::RecordPosition(int pos) {
- ASSERT(pos != RelocInfo::kNoPosition);
- ASSERT(pos >= 0);
- current_position_ = pos;
-}
-
-
-void Assembler::RecordStatementPosition(int pos) {
- ASSERT(pos != RelocInfo::kNoPosition);
- ASSERT(pos >= 0);
- current_statement_position_ = pos;
-}
-
-
-bool Assembler::WriteRecordedPositions() {
- bool written = false;
-
- // Write the statement position if it is different from what was written last
- // time.
- if (current_statement_position_ != written_statement_position_) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
- written_statement_position_ = current_statement_position_;
- written = true;
- }
-
- // Write the position if it is different from what was written last time and
- // also different from the written statement position.
- if (current_position_ != written_position_ &&
- current_position_ != written_statement_position_) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::POSITION, current_position_);
- written_position_ = current_position_;
- written = true;
- }
-
- // Return whether something was written.
- return written;
-}
-
-
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::INTERNAL_REFERENCE;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index bbc10106..c7f76322 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -1174,13 +1174,9 @@ class Assembler : public Malloced {
// Use --debug_code to enable.
void RecordComment(const char* msg);
- void RecordPosition(int pos);
- void RecordStatementPosition(int pos);
- bool WriteRecordedPositions();
-
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
- int current_statement_position() const { return current_statement_position_; }
- int current_position() const { return current_position_; }
+
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
@@ -1404,11 +1400,8 @@ class Assembler : public Malloced {
// push-pop elimination
byte* last_pc_;
- // source position information
- int current_statement_position_;
- int current_position_;
- int written_statement_position_;
- int written_position_;
+ PositionsRecorder positions_recorder_;
+ friend class PositionsRecorder;
};
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 9e6ef3b5..e0e40950 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -2956,7 +2956,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
CodeForStatementPosition(node);
Load(node->expression());
Result return_value = frame_->Pop();
- masm()->WriteRecordedPositions();
+ masm()->positions_recorder()->WriteRecordedPositions();
if (function_return_is_shadowed_) {
function_return_.Jump(&return_value);
} else {
@@ -4866,6 +4866,11 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
}
frame_->Push(&clone);
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ node->CalculateEmitStore();
+
for (int i = 0; i < node->properties()->length(); i++) {
ObjectLiteral::Property* property = node->properties()->at(i);
switch (property->kind()) {
@@ -4880,13 +4885,17 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// Duplicate the object as the IC receiver.
frame_->Dup();
Load(property->value());
- Result ignored =
- frame_->CallStoreIC(Handle<String>::cast(key), false);
- // A test rax instruction following the store IC call would
- // indicate the presence of an inlined version of the
- // store. Add a nop to indicate that there is no such
- // inlined version.
- __ nop();
+ if (property->emit_store()) {
+ Result ignored =
+ frame_->CallStoreIC(Handle<String>::cast(key), false);
+ // A test rax instruction following the store IC call would
+ // indicate the presence of an inlined version of the
+ // store. Add a nop to indicate that there is no such
+ // inlined version.
+ __ nop();
+ } else {
+ frame_->Drop(2);
+ }
break;
}
// Fall through
@@ -4896,8 +4905,12 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
frame_->Dup();
Load(property->key());
Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
- // Ignore the result.
+ if (property->emit_store()) {
+ // Ignore the result.
+ Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
+ } else {
+ frame_->Drop(3);
+ }
break;
}
case ObjectLiteral::Property::SETTER: {
@@ -6551,86 +6564,6 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
- ASSERT_EQ(1, args->length());
-
- Load(args->at(0));
- Result object_result = frame_->Pop();
- object_result.ToRegister(rax);
- object_result.Unuse();
- {
- VirtualFrame::SpilledScope spilled_scope;
-
- Label done;
- __ JumpIfSmi(rax, &done);
-
- // Load JSRegExpResult map into rdx.
- // Arguments to this function should be results of calling RegExp exec,
- // which is either an unmodified JSRegExpResult or null. Anything not having
- // the unmodified JSRegExpResult map is returned unmodified.
- // This also ensures that elements are fast.
-
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
- __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
- __ cmpq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &done);
-
- if (FLAG_debug_code) {
- // Check that object really has empty properties array, as the map
- // should guarantee.
- __ CompareRoot(FieldOperand(rax, JSObject::kPropertiesOffset),
- Heap::kEmptyFixedArrayRootIndex);
- __ Check(equal, "JSRegExpResult: default map but non-empty properties.");
- }
-
- DeferredAllocateInNewSpace* allocate_fallback =
- new DeferredAllocateInNewSpace(JSRegExpResult::kSize,
- rbx,
- rdx.bit() | rax.bit());
-
- // All set, copy the contents to a new object.
- __ AllocateInNewSpace(JSRegExpResult::kSize,
- rbx,
- no_reg,
- no_reg,
- allocate_fallback->entry_label(),
- TAG_OBJECT);
- __ bind(allocate_fallback->exit_label());
-
- STATIC_ASSERT(JSRegExpResult::kSize % (2 * kPointerSize) == 0);
- // There is an even number of fields, so unroll the loop once
- // for efficiency.
- for (int i = 0; i < JSRegExpResult::kSize; i += 2 * kPointerSize) {
- STATIC_ASSERT(JSObject::kMapOffset % (2 * kPointerSize) == 0);
- if (i != JSObject::kMapOffset) {
- // The map was already loaded into edx.
- __ movq(rdx, FieldOperand(rax, i));
- }
- __ movq(rcx, FieldOperand(rax, i + kPointerSize));
-
- STATIC_ASSERT(JSObject::kElementsOffset % (2 * kPointerSize) == 0);
- if (i == JSObject::kElementsOffset) {
- // If the elements array isn't empty, make it copy-on-write
- // before copying it.
- Label empty;
- __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
- __ j(equal, &empty);
- __ LoadRoot(kScratchRegister, Heap::kFixedCOWArrayMapRootIndex);
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), kScratchRegister);
- __ bind(&empty);
- }
- __ movq(FieldOperand(rbx, i), rdx);
- __ movq(FieldOperand(rbx, i + kPointerSize), rcx);
- }
- __ movq(rax, rbx);
-
- __ bind(&done);
- }
- frame_->Push(rax);
-}
-
-
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst,
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 79573245..1853c832 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -656,8 +656,6 @@ class CodeGenerator: public AstVisitor {
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
- void GenerateRegExpCloneResult(ZoneList<Expression*>* args);
-
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 32d62426..00ea6845 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1158,6 +1158,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// result_saved is false the result is in rax.
bool result_saved = false;
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore();
+
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1179,8 +1184,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->handle());
__ movq(rdx, Operand(rsp, 0));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ if (property->emit_store()) {
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ }
break;
}
// Fall through.
@@ -1188,7 +1195,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
- __ CallRuntime(Runtime::kSetProperty, 3);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ } else {
+ __ Drop(3);
+ }
break;
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
@@ -1706,12 +1717,14 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
+ { PreserveStatementPositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ __ Move(rcx, name);
}
- __ Move(rcx, name);
// Record source position for debugger.
- SetSourcePosition(expr->position());
+ SetSourcePosition(expr->position(), FORCED_POSITION);
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
@@ -1729,13 +1742,15 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
+ { PreserveStatementPositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(key);
+ __ movq(rcx, rax);
}
- VisitForAccumulatorValue(key);
- __ movq(rcx, rax);
// Record source position for debugger.
- SetSourcePosition(expr->position());
+ SetSourcePosition(expr->position(), FORCED_POSITION);
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
@@ -1751,11 +1766,13 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
+ { PreserveStatementPositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
}
// Record source position for debugger.
- SetSourcePosition(expr->position());
+ SetSourcePosition(expr->position(), FORCED_POSITION);
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
@@ -1776,37 +1793,38 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// resolve the function we need to call and the receiver of the
// call. The we call the resolved function using the given
// arguments.
- VisitForStackValue(fun);
- __ PushRoot(Heap::kUndefinedValueRootIndex); // Reserved receiver slot.
-
- // Push the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ { PreserveStatementPositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(fun);
+ __ PushRoot(Heap::kUndefinedValueRootIndex); // Reserved receiver slot.
- // Push copy of the function - found below the arguments.
- __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ push(Operand(rsp, arg_count * kPointerSize));
- } else {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
+ // Push copy of the function - found below the arguments.
+ __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
- // Push the receiver of the enclosing function and do runtime call.
- __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ push(Operand(rsp, arg_count * kPointerSize));
+ } else {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
- // The runtime call returns a pair of values in rax (function) and
- // rdx (receiver). Touch up the stack with the right values.
- __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
- __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+ // Push the receiver of the enclosing function and do runtime call.
+ __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // The runtime call returns a pair of values in rax (function) and
+ // rdx (receiver). Touch up the stack with the right values.
+ __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
+ __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+ }
// Record source position for debugger.
- SetSourcePosition(expr->position());
+ SetSourcePosition(expr->position(), FORCED_POSITION);
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
@@ -1823,35 +1841,37 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Call to a lookup slot (dynamically introduced variable).
Label slow, done;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow,
- &done);
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in rax)
- // and the object holding it (returned in rdx).
- __ push(context_register());
- __ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(rax); // Function.
- __ push(rdx); // Receiver.
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- NearLabel call;
- __ jmp(&call);
- __ bind(&done);
- // Push function.
- __ push(rax);
- // Push global receiver.
- __ movq(rbx, CodeGenerator::GlobalObject());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
- __ bind(&call);
+ { PreserveStatementPositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
+ NOT_INSIDE_TYPEOF,
+ &slow,
+ &done);
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in rax)
+ // and the object holding it (returned in rdx).
+ __ push(context_register());
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ push(rax); // Function.
+ __ push(rdx); // Receiver.
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ NearLabel call;
+ __ jmp(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(rax);
+ // Push global receiver.
+ __ movq(rbx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ bind(&call);
+ }
}
EmitCallWithStub(expr);
@@ -1862,18 +1882,24 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
- VisitForStackValue(prop->obj());
+ { PreserveStatementPositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use KeyedCallIC.
- VisitForStackValue(prop->obj());
+ { PreserveStatementPositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
if (prop->is_synthetic()) {
- VisitForAccumulatorValue(prop->key());
- __ movq(rdx, Operand(rsp, 0));
+ { PreserveStatementPositionScope scope(masm()->positions_recorder());
+ VisitForAccumulatorValue(prop->key());
+ __ movq(rdx, Operand(rsp, 0));
+ }
// Record source code position for IC call.
- SetSourcePosition(prop->position());
+ SetSourcePosition(prop->position(), FORCED_POSITION);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Pop receiver.
@@ -1898,7 +1924,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
loop_depth() == 0) {
lit->set_try_full_codegen(true);
}
- VisitForStackValue(fun);
+ { PreserveStatementPositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(fun);
+ }
// Load global receiver object.
__ movq(rbx, CodeGenerator::GlobalObject());
__ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 1d95b7f6..9ec78148 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -33,7 +33,6 @@
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
-#include "utils.h"
namespace v8 {
namespace internal {
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 3891e1d5..24609bf6 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -273,9 +273,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register receiver,
Register name,
Register scratch,
- Register extra) {
+ Register extra,
+ Register extra2) {
Label miss;
- USE(extra); // The register extra is not used on the X64 platform.
+ USE(extra); // The register extra is not used on the X64 platform.
+ USE(extra2); // The register extra2 is not used on the X64 platform.
// Make sure that code is valid. The shifting code relies on the
// entry size being 16.
ASSERT(sizeof(Entry) == 16);
@@ -287,6 +289,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!scratch.is(receiver));
ASSERT(!scratch.is(name));
+ // Check scratch register is valid, extra and extra2 are unused.
+ ASSERT(!scratch.is(no_reg));
+ ASSERT(extra2.is(no_reg));
+
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);