aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLeon Clarke <leonclarke@google.com>2010-02-03 11:58:03 +0000
committerLeon Clarke <leonclarke@google.com>2010-02-03 11:58:03 +0000
commit4515c472dc3e5ed2448a564600976759e569a0a8 (patch)
tree67d539a5e9fa0e72e2490426693bf73d1e36173f
parentd91b9f7d46489a9ee00f9cb415630299c76a502b (diff)
downloadv8-4515c472dc3e5ed2448a564600976759e569a0a8.tar.gz
Update v8 to bleeding_edge revision 3784
-rw-r--r--Android.libv8.mk3
-rw-r--r--Android.v8common.mk2
-rw-r--r--ChangeLog29
-rw-r--r--V8_MERGE_REVISION2
-rwxr-xr-xsrc/SConscript4
-rw-r--r--src/accessors.cc6
-rw-r--r--src/arm/assembler-arm-inl.h14
-rw-r--r--src/arm/assembler-arm.h2
-rw-r--r--src/arm/codegen-arm.cc394
-rw-r--r--src/arm/codegen-arm.h19
-rw-r--r--src/arm/debug-arm.cc2
-rw-r--r--src/arm/fast-codegen-arm.cc140
-rw-r--r--src/arm/full-codegen-arm.cc197
-rw-r--r--src/arm/ic-arm.cc18
-rw-r--r--src/arm/macro-assembler-arm.cc23
-rw-r--r--src/arm/stub-cache-arm.cc462
-rw-r--r--src/arm/stub-cache-arm.cc.rej153
-rw-r--r--src/arm/virtual-frame-arm.cc101
-rw-r--r--src/arm/virtual-frame-arm.h34
-rw-r--r--src/assembler.cc2
-rw-r--r--src/ast.cc21
-rw-r--r--src/ast.h22
-rw-r--r--src/bootstrapper.cc20
-rw-r--r--src/builtins.cc22
-rw-r--r--src/code-stubs.h4
-rw-r--r--src/codegen.cc39
-rw-r--r--src/codegen.h70
-rwxr-xr-x[-rw-r--r--]src/compiler.cc90
-rw-r--r--src/compiler.h37
-rw-r--r--src/data-flow.cc267
-rw-r--r--src/data-flow.h67
-rw-r--r--src/dateparser.cc32
-rw-r--r--src/debug.cc15
-rw-r--r--src/debug.h2
-rw-r--r--src/disassembler.cc8
-rw-r--r--src/fast-codegen.cc293
-rw-r--r--src/fast-codegen.h72
-rw-r--r--src/flag-definitions.h1
-rw-r--r--src/frames.cc14
-rw-r--r--src/frames.h3
-rw-r--r--src/full-codegen.cc25
-rw-r--r--src/full-codegen.h13
-rw-r--r--src/globals.h1
-rw-r--r--src/handles.cc40
-rw-r--r--src/handles.h15
-rw-r--r--src/heap-inl.h6
-rw-r--r--src/heap.cc32
-rw-r--r--src/heap.h2
-rw-r--r--src/ia32/assembler-ia32.h5
-rw-r--r--src/ia32/codegen-ia32.cc273
-rw-r--r--src/ia32/codegen-ia32.h19
-rw-r--r--src/ia32/debug-ia32.cc9
-rw-r--r--src/ia32/fast-codegen-ia32.cc141
-rw-r--r--src/ia32/full-codegen-ia32.cc199
-rw-r--r--src/ia32/ic-ia32.cc26
-rw-r--r--src/ia32/macro-assembler-ia32.cc37
-rw-r--r--src/ia32/macro-assembler-ia32.h2
-rw-r--r--src/ia32/stub-cache-ia32.cc55
-rw-r--r--src/ia32/virtual-frame-ia32.cc36
-rw-r--r--src/ic.cc14
-rw-r--r--src/ic.h4
-rw-r--r--src/json-delay.js2
-rw-r--r--src/mirror-delay.js6
-rw-r--r--src/objects-inl.h17
-rw-r--r--src/objects.cc75
-rw-r--r--src/objects.h15
-rw-r--r--src/parser.cc370
-rw-r--r--src/parser.h3
-rw-r--r--src/prettyprinter.cc55
-rw-r--r--src/prettyprinter.h3
-rw-r--r--src/runtime.cc115
-rw-r--r--src/runtime.h3
-rwxr-xr-x[-rw-r--r--]src/scanner.cc211
-rw-r--r--src/scanner.h58
-rw-r--r--src/serialize.cc4
-rw-r--r--src/spaces.cc29
-rw-r--r--src/spaces.h5
-rw-r--r--src/stub-cache.cc13
-rw-r--r--src/stub-cache.h4
-rw-r--r--src/version.cc2
-rw-r--r--src/x64/assembler-x64.h3
-rw-r--r--src/x64/codegen-x64.cc860
-rw-r--r--src/x64/codegen-x64.h33
-rw-r--r--src/x64/debug-x64.cc9
-rw-r--r--src/x64/fast-codegen-x64.cc140
-rw-r--r--src/x64/full-codegen-x64.cc215
-rw-r--r--src/x64/ic-x64.cc26
-rw-r--r--src/x64/macro-assembler-x64.cc91
-rw-r--r--src/x64/macro-assembler-x64.h23
-rw-r--r--src/x64/regexp-macro-assembler-x64.cc61
-rw-r--r--src/x64/regexp-macro-assembler-x64.h15
-rw-r--r--src/x64/stub-cache-x64.cc139
-rw-r--r--src/x64/virtual-frame-x64.cc38
-rw-r--r--test/cctest/test-api.cc121
-rw-r--r--test/cctest/test-debug.cc27
-rw-r--r--test/message/bugs/.svn/entries28
-rw-r--r--test/mjsunit/compiler/thisfunction.js2
-rw-r--r--test/mjsunit/debug-compile-event-newfunction.js68
-rw-r--r--test/mjsunit/debug-compile-event.js2
-rw-r--r--test/mjsunit/for.js32
-rw-r--r--test/mjsunit/json.js98
-rw-r--r--test/mjsunit/math-round.js52
-rw-r--r--test/mjsunit/mirror-script.js4
-rw-r--r--test/mjsunit/mjsunit.js1
-rw-r--r--test/mjsunit/regress/regress-crbug-3867.js77
-rw-r--r--test/mjsunit/tools/csvparser.js4
-rw-r--r--tools/csvparser.js59
-rw-r--r--tools/gyp/v8.gyp5
-rw-r--r--tools/visual_studio/v8_base.vcproj20
-rw-r--r--tools/visual_studio/v8_base_arm.vcproj20
-rw-r--r--tools/visual_studio/v8_base_x64.vcproj26
111 files changed, 5154 insertions, 1795 deletions
diff --git a/Android.libv8.mk b/Android.libv8.mk
index 3dd9c43b..cced26e0 100644
--- a/Android.libv8.mk
+++ b/Android.libv8.mk
@@ -9,6 +9,9 @@ LOCAL_MODULE := libv8
LOCAL_MODULE_CLASS := STATIC_LIBRARIES
intermediates := $(call local-intermediates-dir)
+#ENABLE_V8_SNAPSHOT:=true
+#include $(LOCAL_PATH)/Android.mksnapshot.mk
+
# Android.v8common.mk defines common V8_LOCAL_SRC_FILES
# and V8_LOCAL_JS_LIBRARY_FILES
V8_LOCAL_SRC_FILES :=
diff --git a/Android.v8common.mk b/Android.v8common.mk
index 15c1c20c..75aa4bcb 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -16,6 +16,7 @@ V8_LOCAL_SRC_FILES := \
src/contexts.cc \
src/conversions.cc \
src/counters.cc \
+ src/data-flow.cc \
src/dateparser.cc \
src/disassembler.cc \
src/execution.cc \
@@ -77,6 +78,7 @@ ifeq ($(TARGET_ARCH),arm)
src/arm/cpu-arm.cc \
src/arm/disasm-arm.cc \
src/arm/frames-arm.cc \
+ src/arm/fast-codegen-arm.cc \
src/arm/full-codegen-arm.cc \
src/arm/ic-arm.cc \
src/arm/jump-target-arm.cc \
diff --git a/ChangeLog b/ChangeLog
index 192dd250..29ecccd7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,32 @@
+2010-02-03: Version 2.1.0
+
+ Values are now always wrapped in objects when used as a receiver.
+ (issue 223).
+
+ [ES5] Implemented Object.getOwnPropertyNames.
+
+ [ES5] Restrict JSON.parse to only accept strings that conforms to the
+ JSON grammar.
+
+ Improvement of debugger agent (issue 549 and 554).
+
+ Fixed problem with skipped stack frame in profiles (issue 553).
+
+ Solaris support by Erich Ocean <erich.ocean@me.com> and Ryan Dahl
+ <ry@tinyclouds.org>.
+
+ Fix a bug that Math.round() returns incorrect results for huge
+ integers.
+
+ Fix enumeration order for objects created from some constructor
+ functions (isue http://crbug.com/3867).
+
+ Fix arithmetic on some integer constants (issue 580).
+
+ Numerous performance improvements including porting of previous IA-32
+ optimizations to x64 and ARM architectures.
+
+
2010-01-14: Version 2.0.6
Added ES5 Object.getPrototypeOf, GetOwnPropertyDescriptor,
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index e1e84eff..af5b2350 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,4 @@
-http://v8.googlecode.com/svn/branches/bleeding_edge@3723
+http://v8.googlecode.com/svn/branches/bleeding_edge@3784
Note: We are currently not syncing with chromium release revisions
while we identify v8 performance problems.
diff --git a/src/SConscript b/src/SConscript
index 94428f2c..ebda77ac 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -50,6 +50,7 @@ SOURCES = {
contexts.cc
conversions.cc
counters.cc
+ data-flow.cc
dateparser.cc
debug-agent.cc
debug.cc
@@ -113,6 +114,7 @@ SOURCES = {
arm/cpu-arm.cc
arm/debug-arm.cc
arm/disasm-arm.cc
+ arm/fast-codegen-arm.cc
arm/frames-arm.cc
arm/full-codegen-arm.cc
arm/ic-arm.cc
@@ -136,6 +138,7 @@ SOURCES = {
ia32/cpu-ia32.cc
ia32/debug-ia32.cc
ia32/disasm-ia32.cc
+ ia32/fast-codegen-ia32.cc
ia32/frames-ia32.cc
ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc
@@ -153,6 +156,7 @@ SOURCES = {
x64/cpu-x64.cc
x64/debug-x64.cc
x64/disasm-x64.cc
+ x64/fast-codegen-x64.cc
x64/frames-x64.cc
x64/full-codegen-x64.cc
x64/ic-x64.cc
diff --git a/src/accessors.cc b/src/accessors.cc
index 56cf1359..5a029285 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -493,11 +493,11 @@ Object* Accessors::FunctionGetLength(Object* object, void*) {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
HandleScope scope;
- Handle<JSFunction> function_handle(function);
- if (!CompileLazy(function_handle, KEEP_EXCEPTION)) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!CompileLazyShared(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
- return Smi::FromInt(function_handle->shared()->length());
+ return Smi::FromInt(shared->length());
} else {
return Smi::FromInt(function->shared()->length());
}
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index fd2fcd30..354436cb 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -174,20 +174,6 @@ Operand::Operand(const ExternalReference& f) {
}
-Operand::Operand(Object** opp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(opp);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Context** cpp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(cpp);
- rmode_ = RelocInfo::NONE;
-}
-
-
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 8b65b7cd..208d583c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -398,8 +398,6 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
- INLINE(explicit Operand(Object** opp));
- INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 38f08d1f..7bee98d8 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -121,12 +121,13 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
// CodeGenerator implementation
-CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
+CodeGenerator::CodeGenerator(MacroAssembler* masm,
+ Handle<Script> script,
bool is_eval)
: is_eval_(is_eval),
script_(script),
deferred_(8),
- masm_(new MacroAssembler(NULL, buffer_size)),
+ masm_(masm),
scope_(NULL),
frame_(NULL),
allocator_(NULL),
@@ -142,7 +143,9 @@ CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
// r1: called JS function
// cp: callee's context
-void CodeGenerator::GenCode(FunctionLiteral* fun) {
+void CodeGenerator::Generate(FunctionLiteral* fun,
+ Mode mode,
+ CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(fun);
@@ -168,8 +171,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// r1: called JS function
// cp: callee's context
allocator_->Initialize();
- frame_->Enter();
- // tos: code slot
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -178,104 +180,118 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
}
#endif
- // Allocate space for locals and initialize them. This also checks
- // for stack overflow.
- frame_->AllocateStackSlots();
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
+ if (mode == PRIMARY) {
+ frame_->Enter();
+ // tos: code slot
- VirtualFrame::SpilledScope spilled_scope;
- int heap_slots = scope_->num_heap_slots();
- if (heap_slots > 0) {
- // Allocate local context.
- // Get outer context and create a new context based on it.
- __ ldr(r0, frame_->Function());
- frame_->EmitPush(r0);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ // Allocate space for locals and initialize them. This also checks
+ // for stack overflow.
+ frame_->AllocateStackSlots();
+
+ VirtualFrame::SpilledScope spilled_scope;
+ int heap_slots = scope_->num_heap_slots();
+ if (heap_slots > 0) {
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ __ ldr(r0, frame_->Function());
+ frame_->EmitPush(r0);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, Operand(cp));
- verified_true.Branch(eq);
- __ stop("NewContext: r0 is expected to be the same as cp");
- verified_true.Bind();
+ JumpTarget verified_true;
+ __ cmp(r0, Operand(cp));
+ verified_true.Branch(eq);
+ __ stop("NewContext: r0 is expected to be the same as cp");
+ verified_true.Bind();
#endif
- // Update context local.
- __ str(cp, frame_->Context());
- }
+ // Update context local.
+ __ str(cp, frame_->Context());
+ }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- ASSERT(!scope_->is_global_scope()); // no parameters in global scope
- __ ldr(r1, frame_->ParameterAt(i));
- // Loads r2 with context; used below in RecordWrite.
- __ str(r1, SlotOperand(slot, r2));
- // Load the offset into r3.
- int slot_offset =
- FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(slot_offset));
- __ RecordWrite(r2, r3, r1);
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // No parameters in global scope.
+ ASSERT(!scope_->is_global_scope());
+ __ ldr(r1, frame_->ParameterAt(i));
+ // Loads r2 with context; used below in RecordWrite.
+ __ str(r1, SlotOperand(slot, r2));
+ // Load the offset into r3.
+ int slot_offset =
+ FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ mov(r3, Operand(slot_offset));
+ __ RecordWrite(r2, r3, r1);
+ }
}
}
- }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in the
- // context.
- if (scope_->arguments() != NULL) {
- Comment cmnt(masm_, "[ allocate arguments object");
- ASSERT(scope_->arguments_shadow() != NULL);
- Variable* arguments = scope_->arguments()->var();
- Variable* shadow = scope_->arguments_shadow()->var();
- ASSERT(arguments != NULL && arguments->slot() != NULL);
- ASSERT(shadow != NULL && shadow->slot() != NULL);
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ ldr(r2, frame_->Function());
- // The receiver is below the arguments, the return address, and the
- // frame pointer on the stack.
- const int kReceiverDisplacement = 2 + scope_->num_parameters();
- __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
- __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
- frame_->Adjust(3);
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
- StoreToSlot(arguments->slot(), NOT_CONST_INIT);
- StoreToSlot(shadow->slot(), NOT_CONST_INIT);
- frame_->Drop(); // Value is no longer needed.
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in the
+ // context.
+ if (scope_->arguments() != NULL) {
+ Comment cmnt(masm_, "[ allocate arguments object");
+ ASSERT(scope_->arguments_shadow() != NULL);
+ Variable* arguments = scope_->arguments()->var();
+ Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ ldr(r2, frame_->Function());
+ // The receiver is below the arguments, the return address, and the
+ // frame pointer on the stack.
+ const int kReceiverDisplacement = 2 + scope_->num_parameters();
+ __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
+ __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ frame_->Adjust(3);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
+ frame_->CallStub(&stub, 3);
+ frame_->EmitPush(r0);
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+ frame_->Drop(); // Value is no longer needed.
+ }
- // Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
- __ mov(ip, Operand(Factory::the_hole_value()));
- frame_->EmitPush(ip);
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ __ mov(ip, Operand(Factory::the_hole_value()));
+ frame_->EmitPush(ip);
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+ } else {
+ // When used as the secondary compiler for splitting, r1, cp,
+ // fp, and lr have been pushed on the stack. Adjust the virtual
+ // frame to match this state.
+ frame_->Adjust(4);
+ allocator_->Unuse(r1);
+ allocator_->Unuse(lr);
}
+ // Initialize the function return target after the locals are set
+ // up, because it needs the expected frame height from the frame.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -2286,7 +2302,8 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
- frame_->CallRuntime(Runtime::kDebugBreak, 0);
+ DebuggerStatementStub ces;
+ frame_->CallStub(&ces, 0);
#endif
// Ignore the return value.
ASSERT(frame_->height() == original_height);
@@ -2589,13 +2606,12 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
// Load the global object.
LoadGlobal();
// Setup the name register.
- Result name(r2);
__ mov(r2, Operand(slot->var()->name()));
// Call IC stub.
if (typeof_state == INSIDE_TYPEOF) {
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
} else {
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, 0);
}
// Drop the global object. The result is in r0.
@@ -3158,22 +3174,15 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
}
// r0: the number of arguments.
- Result num_args(r0);
__ mov(r0, Operand(arg_count));
-
// Load the function into r1 as per calling convention.
- Result function(r1);
__ ldr(r1, frame_->ElementAt(arg_count + 1));
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
- frame_->CallCodeObject(ic,
- RelocInfo::CONSTRUCT_CALL,
- &num_args,
- &function,
- arg_count + 1);
+ frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
// Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
__ str(r0, frame_->Top());
@@ -3723,6 +3732,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0); // r0 has result
} else {
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
LoadAndSpill(node->expression());
frame_->EmitPop(r0);
switch (op) {
@@ -3733,9 +3745,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
break;
@@ -3748,10 +3757,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ tst(r0, Operand(kSmiTagMask));
smi_label.Branch(eq);
- frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
-
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ frame_->CallStub(&stub, 0);
continue_label.Jump();
+
smi_label.Bind();
__ mvn(r0, Operand(r0));
__ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
@@ -4330,13 +4339,12 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Setup the name register.
- Result name_reg(r2);
__ mov(r2, Operand(name));
ASSERT(var == NULL || var->is_global());
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- frame->CallCodeObject(ic, rmode, &name_reg, 0);
+ frame->CallCodeObject(ic, rmode, 0);
frame->EmitPush(r0);
break;
}
@@ -4377,6 +4385,7 @@ void Reference::SetValue(InitState init_state) {
Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
cgen_->StoreToSlot(slot, init_state);
+ cgen_->UnloadReference(this);
break;
}
@@ -4386,18 +4395,12 @@ void Reference::SetValue(InitState init_state) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Handle<String> name(GetName());
- Result value(r0);
frame->EmitPop(r0);
-
// Setup the name register.
- Result property_name(r2);
__ mov(r2, Operand(name));
- frame->CallCodeObject(ic,
- RelocInfo::CODE_TARGET,
- &value,
- &property_name,
- 0);
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPush(r0);
+ cgen_->UnloadReference(this);
break;
}
@@ -4410,17 +4413,16 @@ void Reference::SetValue(InitState init_state) {
// Call IC code.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
// TODO(1222589): Make the IC grab the values from the stack.
- Result value(r0);
frame->EmitPop(r0); // value
- frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPush(r0);
+ cgen_->UnloadReference(this);
break;
}
default:
UNREACHABLE();
}
- cgen_->UnloadReference(this);
}
@@ -6102,59 +6104,96 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- ASSERT(op_ == Token::SUB);
-
- Label undo;
- Label slow;
- Label not_smi;
-
- // Enter runtime system if the value is not a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
+ Label slow, done;
- // Enter runtime system if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ cmp(r0, Operand(0));
- __ b(eq, &slow);
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &try_float);
+
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ cmp(r0, Operand(0));
+ __ b(eq, &slow);
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ __ rsb(r1, r0, Operand(0), SetCC);
+ __ b(vs, &slow);
+
+ __ mov(r0, Operand(r1)); // Set r0 to result.
+ __ b(&done);
+
+ __ bind(&try_float);
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+ // r0 is a heap number. Get a new heap number in r1.
+ if (overwrite_) {
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ } else {
+ AllocateHeapNumber(masm, &slow, r1, r2, r3);
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+ __ mov(r0, Operand(r1));
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+
+ // Convert the heap number is r0 to an untagged integer in r1.
+ GetInt32(masm, r0, r1, r2, r3, &slow);
+
+ // Do the bitwise operation (move negated) and check if the result
+ // fits in a smi.
+ Label try_float;
+ __ mvn(r1, Operand(r1));
+ __ add(r2, r1, Operand(0x40000000), SetCC);
+ __ b(mi, &try_float);
+ __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+ __ b(&done);
+
+ __ bind(&try_float);
+ if (!overwrite_) {
+ // Allocate a fresh heap number, but don't overwrite r0 until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in r0.
+ AllocateHeapNumber(masm, &slow, r2, r3, r4);
+ __ mov(r0, Operand(r2));
+ }
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ rsb(r1, r0, Operand(0), SetCC);
- __ b(vs, &slow);
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ } else {
+ UNIMPLEMENTED();
+ }
- __ mov(r0, Operand(r1)); // Set r0 to result.
+ __ bind(&done);
__ StubReturn(1);
- // Enter runtime system.
+ // Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ push(r0);
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
-
- __ bind(&not_smi);
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (overwrite_) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- AllocateHeapNumber(masm, &slow, r1, r2, r3);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
}
- __ StubReturn(1);
-}
-
-
-int CEntryStub::MinorKey() {
- ASSERT(result_size_ <= 2);
- // Result returned in r0 or r0+r1 by default.
- return 0;
}
@@ -6265,7 +6304,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- ExitFrame::Mode mode,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -6325,7 +6363,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(mode);
+ __ LeaveExitFrame(mode_);
// check if we should retry or throw exception
Label retry;
@@ -6358,7 +6396,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
-void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
// r0: number of arguments including receiver
// r1: pointer to builtin function
@@ -6366,17 +6404,15 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ // Result returned in r0 or r0+r1 by default.
+
// NOTE: Invocations of builtins may return failure objects
// instead of a proper result. The builtin entry handles
// this by performing a garbage collection and retrying the
// builtin once.
- ExitFrame::Mode mode = is_debug_break
- ? ExitFrame::MODE_DEBUG
- : ExitFrame::MODE_NORMAL;
-
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode);
+ __ EnterExitFrame(mode_);
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -6391,7 +6427,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
false,
false);
@@ -6400,7 +6435,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
false);
@@ -6411,7 +6445,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
true);
@@ -6445,8 +6478,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r1: function
// r2: receiver
// r3: argc
- __ add(r4, sp, Operand((kNumCalleeSaved + 1)*kPointerSize));
- __ ldr(r4, MemOperand(r4)); // argv
+ __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
// Push a frame with special values setup to mark it as an entry frame.
// r0: code entry
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index ccca2e9e..0384485f 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -32,6 +32,7 @@ namespace v8 {
namespace internal {
// Forward declarations
+class CompilationInfo;
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
@@ -149,11 +150,21 @@ class CodeGenState BASE_EMBEDDED {
class CodeGenerator: public AstVisitor {
public:
+ // Compilation mode. Either the compiler is used as the primary
+ // compiler and needs to setup everything or the compiler is used as
+ // the secondary compiler for split compilation and has to handle
+ // bailouts.
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
- bool is_eval);
+ bool is_eval,
+ CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(FunctionLiteral* fun);
@@ -201,8 +212,7 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction
- CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
- virtual ~CodeGenerator() { delete masm_; }
+ CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
// Accessors
Scope* scope() const { return scope_; }
@@ -239,7 +249,7 @@ class CodeGenerator: public AstVisitor {
inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void GenCode(FunctionLiteral* fun);
+ void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
// The following are used by class Reference.
void LoadReference(Reference* ref);
@@ -443,6 +453,7 @@ class CodeGenerator: public AstVisitor {
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
+ friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index fc9808d5..6eb5239b 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -98,7 +98,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ mov(r0, Operand(0)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break()));
- CEntryDebugBreakStub ceb;
+ CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
new file mode 100644
index 00000000..1aeea7ab
--- /dev/null
+++ b/src/arm/fast-codegen-arm.cc
@@ -0,0 +1,140 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+ // Offset 2 is due to return address and saved frame pointer.
+ int index = 2 + function()->scope()->num_parameters();
+ __ ldr(reg, MemOperand(sp, index * kPointerSize));
+}
+
+
+void FastCodeGenerator::EmitReceiverMapCheck() {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(this)\n");
+ }
+
+ EmitLoadReceiver(r1);
+ __ BranchOnSmi(r1, bailout());
+
+ ASSERT(has_receiver() && receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+ Handle<Map> map(object->map());
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ mov(ip, Operand(map));
+ __ cmp(r3, ip);
+ __ b(ne, bailout());
+}
+
+
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+ // Compile global variable accesses as load IC calls. The only live
+ // registers are cp (context) and possibly r1 (this). Both are also saved
+ // in the stack and cp is preserved by the call.
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ __ mov(r2, Operand(name));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ if (has_this_properties()) {
+ // Restore this.
+ EmitLoadReceiver(r1);
+ }
+}
+
+
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ receiver()->Lookup(*name, &lookup);
+
+ ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
+
+ // Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ mov(r2, r1); // Copy receiver for write barrier.
+ } else {
+ offset += FixedArray::kHeaderSize;
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+ }
+ // Perform the store.
+ __ str(r0, FieldMemOperand(r2, offset));
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r2, r3, ip);
+}
+
+
+void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
+ ASSERT(function_ == NULL);
+ ASSERT(info_ == NULL);
+ function_ = fun;
+ info_ = info;
+
+ // Save the caller's frame pointer and set up our own.
+ Comment prologue_cmnt(masm(), ";; Prologue");
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ // Note that we keep a live register reference to cp (context) at
+ // this point.
+
+ // Receiver (this) is allocated to r1 if there are this properties.
+ if (has_this_properties()) EmitReceiverMapCheck();
+
+ VisitStatements(fun->body());
+
+ Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+
+ Comment epilogue_cmnt(masm(), ";; Epilogue");
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ int32_t sp_delta = (fun->scope()->num_parameters() + 1) * kPointerSize;
+ __ add(sp, sp, Operand(sp_delta));
+ __ Jump(lr);
+
+ __ bind(&bailout_);
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 8d1cfebc..9f240dd8 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -52,80 +52,90 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
function_ = fun;
SetFunctionPosition(fun);
- int locals_count = fun->scope()->num_stack_slots();
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- // Adjust fp to point to caller's fp.
- __ add(fp, sp, Operand(2 * kPointerSize));
+ if (mode == PRIMARY) {
+ int locals_count = fun->scope()->num_stack_slots();
- { Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
}
- }
+ // Adjust fp to point to caller's fp.
+ __ add(fp, sp, Operand(2 * kPointerSize));
- bool function_in_register = true;
+ { Comment cmnt(masm_, "[ Allocate locals");
+ for (int i = 0; i < locals_count; i++) {
+ __ push(ip);
+ }
+ }
- // Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- __ CallRuntime(Runtime::kNewContext, 1);
- function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = fun->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context
- __ str(r0, MemOperand(cp, Context::SlotOffset(slot->index())));
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, r1, r0);
+ }
}
}
- }
- Variable* arguments = fun->scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(r3, r1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
+ __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Duplicate the value; move-to-slot operation might clobber registers.
+ __ mov(r3, r0);
+ Move(arguments->slot(), r0, r1, r2);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, r3, r1, r2);
}
- // Receiver is just before the parameters on the caller's stack.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
- __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
- __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
- Move(arguments->slot(), r0, r1, r2);
- Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, r3, r1, r2);
}
// Check the stack for overflow or break request.
@@ -133,15 +143,15 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun) {
// added to the implicit 8 byte offset that always applies to operations
// with pc and gives a return address 12 bytes down.
{ Comment cmnt(masm_, "[ Stack check");
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- __ add(lr, pc, Operand(Assembler::kInstrSize));
- __ cmp(sp, Operand(r2));
- StackCheckStub stub;
- __ mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+ __ add(lr, pc, Operand(Assembler::kInstrSize));
+ __ cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ __ mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -581,7 +591,8 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
int offset = Context::SlotOffset(slot->index());
__ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
- __ RecordWrite(cp, r2, result_register());
+ __ mov(r1, Operand(cp));
+ __ RecordWrite(r1, r2, result_register());
}
break;
@@ -1372,6 +1383,46 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
+ case Token::SUB: {
+ Comment cmt(masm_, "[ UnaryOperation (SUB)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register r0.
+ VisitForValue(expr->expression(), kAccumulator);
+ __ CallStub(&stub);
+ Apply(context_, r0);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register r0.
+ VisitForValue(expr->expression(), kAccumulator);
+ // Avoid calling the stub for Smis.
+ Label smi, done;
+ __ tst(result_register(), Operand(kSmiTagMask));
+ __ b(eq, &smi);
+ // Non-smi: call stub leaving result in accumulator register.
+ __ CallStub(&stub);
+ __ b(&done);
+ // Perform operation directly on Smis.
+ __ bind(&smi);
+ __ mvn(result_register(), Operand(result_register()));
+ // Bit-clear inverted smi-tag.
+ __ bic(result_register(), result_register(), Operand(kSmiTagMask));
+ __ bind(&done);
+ Apply(context_, result_register());
+ break;
+ }
+
default:
UNREACHABLE();
}
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index b59c3f04..bae1e967 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -170,7 +170,6 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss;
__ ldr(r0, MemOperand(sp, 0));
@@ -204,7 +203,6 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss;
// Load receiver.
@@ -318,7 +316,6 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
-
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack into r1.
@@ -451,7 +448,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss, probe, global;
__ ldr(r0, MemOperand(sp, 0));
@@ -543,6 +539,8 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
+
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
@@ -555,6 +553,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
Label slow, fast;
// Get the key and receiver object from the stack.
@@ -622,6 +621,8 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
+
GenerateGeneric(masm);
}
@@ -640,6 +641,7 @@ void KeyedStoreIC::Generate(MacroAssembler* masm,
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
+ // -----------------------------------
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
@@ -654,7 +656,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
+ // -----------------------------------
Label slow, fast, array, extra, exit;
+
// Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver
// Check that the key is a smi.
@@ -806,7 +810,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+ GenerateMiss(masm);
}
@@ -827,7 +831,7 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
}
-void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r2 : name
@@ -839,7 +843,7 @@ void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6c3bbbb8..b39404e7 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -205,6 +205,11 @@ void MacroAssembler::LoadRoot(Register destination,
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object, Register offset,
Register scratch) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+
// This is how much we shift the remembered set bit offset to get the
// offset of the word in the remembered set. We divide by kBitsPerInt (32,
// shift right 5) and then multiply by kIntSize (4, shift left 2).
@@ -272,6 +277,14 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
str(scratch, MemOperand(object));
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Operand(bit_cast<int32_t>(kZapValue)));
+ mov(offset, Operand(bit_cast<int32_t>(kZapValue)));
+ mov(scratch, Operand(bit_cast<int32_t>(kZapValue)));
+ }
}
@@ -1035,9 +1048,13 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
return;
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r0, Operand(num_arguments));
+ mov(r1, Operand(ExternalReference(f)));
+ CEntryStub stub(1);
CallStub(&stub);
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 687fb1e7..d19a683d 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -362,6 +362,369 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+static void GenerateCallFunction(MacroAssembler* masm,
+ Object* object,
+ const ParameterCount& arguments,
+ Label* miss) {
+ // ----------- S t a t e -------------
+ // -- r0: receiver
+ // -- r1: function to call
+ // -----------------------------------
+
+ // Check that the function really is a function.
+ __ BranchOnSmi(r1, miss);
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+ __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
+ }
+
+ // Invoke the function.
+ __ InvokeFunction(r1, arguments, JUMP_FUNCTION);
+}
+
+
+static void GenerateCallConstFunction(MacroAssembler* masm,
+ JSFunction* function,
+ const ParameterCount& arguments) {
+ ASSERT(function->is_compiled());
+
+ // Get the function and setup the context.
+ __ mov(r1, Operand(Handle<JSFunction>(function)));
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+}
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(receiver, miss);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ if (lookup->IsValid() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
+ receiver,
+ reg,
+ scratch2,
+ holder,
+ miss);
+ }
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ __ push(receiver);
+ __ push(holder);
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!Heap::InNewSpace(interceptor));
+
+ Register scratch = receiver;
+ __ mov(scratch, Operand(Handle<Object>(interceptor)));
+ __ push(scratch);
+ __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
+ __ push(scratch);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+ __ mov(r0, Operand(5));
+ __ mov(r1, Operand(ref));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ AccessorInfo* callback = 0;
+ bool optimize = false;
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ if (lookup->type() == FIELD) {
+ optimize = true;
+ } else if (lookup->type() == CALLBACKS) {
+ Object* callback_object = lookup->GetCallbackObject();
+ if (callback_object->IsAccessorInfo()) {
+ callback = AccessorInfo::cast(callback_object);
+ optimize = callback->getter() != NULL;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Note: starting a frame here makes GC aware of pointers pushed below.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS) {
+ __ push(receiver);
+ }
+ __ push(holder);
+ __ push(name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ Label interceptor_failed;
+ // Compare with no_interceptor_result_sentinel.
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1);
+ __ b(eq, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(name_);
+ __ pop(holder);
+
+ if (lookup->type() == CALLBACKS) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ if (lookup->type() == FIELD) {
+ holder = stub_compiler->CheckPrototypes(holder_obj,
+ holder,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss_label);
+ stub_compiler->GenerateFastPropertyLoad(masm,
+ r0,
+ holder,
+ lookup->holder(),
+ lookup->GetFieldIndex());
+ __ Ret();
+ } else {
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ Label cleanup;
+ __ pop(scratch2);
+ __ push(receiver);
+ __ push(scratch2);
+
+ holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ &cleanup);
+
+ __ push(holder);
+ __ Move(holder, Handle<AccessorInfo>(callback));
+ __ push(holder);
+ __ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
+ __ push(scratch1);
+ __ push(name_);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(ref, 5, 1);
+
+ __ bind(&cleanup);
+ __ pop(scratch1);
+ __ pop(scratch2);
+ __ push(scratch1);
+ }
+ }
+
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallRuntime(ref, 5, 1);
+ }
+
+ private:
+ Register name_;
+};
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ CallInterceptorCompiler(const ParameterCount& arguments, Register name)
+ : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ JSFunction* function = 0;
+ bool optimize = false;
+ // So far the most popular case for failed interceptor is
+ // CONSTANT_FUNCTION sitting below.
+ if (lookup->type() == CONSTANT_FUNCTION) {
+ function = lookup->GetConstantFunction();
+ // JSArray holder is a special case for call constant function
+ // (see the corresponding code).
+ if (function->is_compiled() && !holder_obj->IsJSArray()) {
+ optimize = true;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Constant functions cannot sit on global object.
+ ASSERT(!lookup->holder()->IsGlobalObject());
+
+ __ EnterInternalFrame();
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ ASSERT(!r0.is(name_));
+ ASSERT(!r0.is(scratch1));
+ __ pop(name_); // Restore the name.
+ __ pop(scratch1); // Restore the holder.
+ __ LeaveInternalFrame();
+
+ // Compare with no_interceptor_result_sentinel.
+ __ LoadRoot(scratch2, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch2);
+ Label invoke;
+ __ b(ne, &invoke);
+
+ stub_compiler->CheckPrototypes(holder_obj, scratch1,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ miss_label);
+ GenerateCallConstFunction(masm, function, arguments_);
+
+ __ bind(&invoke);
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ EnterInternalFrame();
+ // Save the name_ register across the call.
+ __ push(name_);
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+ __ mov(r0, Operand(5));
+ __ mov(r1, Operand(ref));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+
+ // Restore the name_ register.
+ __ pop(name_);
+ __ LeaveInternalFrame();
+ }
+
+ private:
+ const ParameterCount& arguments_;
+ int argc_;
+ Register name_;
+};
+
+
#undef __
#define __ ACCESS_MASM(masm())
@@ -491,30 +854,18 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
-
- // Push the arguments on the JS stack of the caller.
- __ push(receiver); // receiver
- __ push(reg); // holder
- __ push(name_reg); // name
-
- InterceptorInfo* interceptor = holder->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
- __ mov(scratch1, Operand(Handle<Object>(interceptor)));
- __ push(scratch1);
- __ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset));
- __ push(scratch2);
-
- // Do tail-call to the runtime system.
- ExternalReference load_ic_property =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallRuntime(load_ic_property, 5, 1);
+ LoadInterceptorCompiler compiler(name_reg);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ object,
+ holder,
+ name,
+ lookup,
+ receiver,
+ scratch1,
+ scratch2,
+ miss);
}
@@ -572,22 +923,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
- // Check that the function really is a function.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
- // Get the map.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Invoke the function.
- __ InvokeFunction(r1, arguments(), JUMP_FUNCTION);
+ GenerateCallFunction(masm(), object, arguments(), &miss);
// Handle call cache miss.
__ bind(&miss);
@@ -715,16 +1051,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- // Get the function and setup the context.
- __ mov(r1, Operand(Handle<JSFunction>(function)));
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ GenerateCallConstFunction(masm(), function, arguments());
// Handle call cache miss.
__ bind(&miss);
@@ -748,7 +1075,34 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// -----------------------------------
Label miss;
- // TODO(1224669): Implement.
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ // Get the receiver from the stack into r0.
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+ // Load the name from the stack into r1.
+ __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
+
+ CallInterceptorCompiler compiler(arguments(), r1);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ JSObject::cast(object),
+ holder,
+ name,
+ &lookup,
+ r0,
+ r2,
+ r3,
+ &miss);
+
+ // Restore receiver.
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+
+ GenerateCallFunction(masm(), object, arguments(), &miss);
// Handle call cache miss.
__ bind(&miss);
@@ -921,7 +1275,6 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ mov(r2, Operand(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -973,7 +1326,6 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Handle store cache miss.
__ bind(&miss);
- __ mov(r2, Operand(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1099,7 +1451,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
__ ldr(r0, MemOperand(sp, 0));
LookupResult lookup;
- holder->LocalLookupRealNamedProperty(name, &lookup);
+ LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(object,
holder,
&lookup,
@@ -1265,7 +1617,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ b(ne, &miss);
LookupResult lookup;
- holder->LocalLookupRealNamedProperty(name, &lookup);
+ LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(receiver,
holder,
&lookup,
diff --git a/src/arm/stub-cache-arm.cc.rej b/src/arm/stub-cache-arm.cc.rej
new file mode 100644
index 00000000..f8baa413
--- /dev/null
+++ b/src/arm/stub-cache-arm.cc.rej
@@ -0,0 +1,153 @@
+*************** void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+*** 491,520 ****
+ Register scratch2,
+ String* name,
+ Label* miss) {
+- // Check that the receiver isn't a smi.
+- __ tst(receiver, Operand(kSmiTagMask));
+- __ b(eq, miss);
+-
+- // Check that the maps haven't changed.
+- Register reg =
+- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+-
+- // Push the arguments on the JS stack of the caller.
+- __ push(receiver); // receiver
+- __ push(reg); // holder
+- __ push(name_reg); // name
+-
+- InterceptorInfo* interceptor = holder->GetNamedInterceptor();
+- ASSERT(!Heap::InNewSpace(interceptor));
+- __ mov(scratch1, Operand(Handle<Object>(interceptor)));
+- __ push(scratch1);
+- __ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset));
+- __ push(scratch2);
+-
+- // Do tail-call to the runtime system.
+- ExternalReference load_ic_property =
+- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+- __ TailCallRuntime(load_ic_property, 5, 1);
+ }
+
+
+--- 854,871 ----
+ Register scratch2,
+ String* name,
+ Label* miss) {
++ LoadInterceptorCompiler compiler(name_reg);
++ CompileLoadInterceptor(&compiler,
++ this,
++ masm(),
++ object,
++ holder,
++ name,
++ lookup,
++ receiver,
++ scratch1,
++ scratch2,
++ miss);
+ }
+
+
+*************** Object* CallStubCompiler::CompileCallField(Object* object,
+*** 572,593 ****
+ CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
+ GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
+
+- // Check that the function really is a function.
+- __ tst(r1, Operand(kSmiTagMask));
+- __ b(eq, &miss);
+- // Get the map.
+- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+- __ b(ne, &miss);
+-
+- // Patch the receiver on the stack with the global proxy if
+- // necessary.
+- if (object->IsGlobalObject()) {
+- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+- __ str(r3, MemOperand(sp, argc * kPointerSize));
+- }
+-
+- // Invoke the function.
+- __ InvokeFunction(r1, arguments(), JUMP_FUNCTION);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+--- 923,929 ----
+ CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
+ GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
+
++ GenerateCallFunction(masm(), object, arguments(), &miss);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+*************** Object* CallStubCompiler::CompileCallConstant(Object* object,
+*** 715,730 ****
+ UNREACHABLE();
+ }
+
+- // Get the function and setup the context.
+- __ mov(r1, Operand(Handle<JSFunction>(function)));
+- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+-
+- // Jump to the cached code (tail call).
+- ASSERT(function->is_compiled());
+- Handle<Code> code(function->code());
+- ParameterCount expected(function->shared()->formal_parameter_count());
+- __ InvokeCode(code, expected, arguments(),
+- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+--- 1051,1057 ----
+ UNREACHABLE();
+ }
+
++ GenerateCallConstFunction(masm(), function, arguments());
+
+ // Handle call cache miss.
+ __ bind(&miss);
+*************** Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+*** 748,754 ****
+ // -----------------------------------
+ Label miss;
+
+- // TODO(1224669): Implement.
+
+ // Handle call cache miss.
+ __ bind(&miss);
+--- 1075,1108 ----
+ // -----------------------------------
+ Label miss;
+
++ // Get the number of arguments.
++ const int argc = arguments().immediate();
++
++ LookupResult lookup;
++ LookupPostInterceptor(holder, name, &lookup);
++
++ // Get the receiver from the stack into r0.
++ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
++ // Load the name from the stack into r1.
++ __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
++
++ CallInterceptorCompiler compiler(arguments(), r1);
++ CompileLoadInterceptor(&compiler,
++ this,
++ masm(),
++ JSObject::cast(object),
++ holder,
++ name,
++ &lookup,
++ r0,
++ r2,
++ r3,
++ &miss);
++
++ // Restore receiver.
++ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
++
++ GenerateCallFunction(masm(), object, arguments(), &miss);
+
+ // Handle call cache miss.
+ __ bind(&miss);
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index a33ebd42..7a8ac726 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -219,36 +219,15 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
}
-void VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallStub(stub);
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- PrepareForCall(0, 0);
- arg->Unuse();
- RawCallStub(stub);
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- PrepareForCall(0, 0);
- arg0->Unuse();
- arg1->Unuse();
- RawCallStub(stub);
-}
-
-
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
}
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
}
@@ -257,102 +236,34 @@ void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
__ InvokeBuiltin(id, flags);
}
-void VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
-}
-
-
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
- int spilled_args = 0;
switch (code->kind()) {
case Code::CALL_IC:
- spilled_args = dropped_args + 1;
- break;
case Code::FUNCTION:
- spilled_args = dropped_args + 1;
break;
case Code::KEYED_LOAD_IC:
- ASSERT(dropped_args == 0);
- spilled_args = 2;
- break;
- default:
- // The other types of code objects are called with values
- // in specific registers, and are handled in functions with
- // a different signature.
- UNREACHABLE();
- break;
- }
- PrepareForCall(spilled_args, dropped_args);
- RawCallCodeObject(code, rmode);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args) {
- int spilled_args = 0;
- switch (code->kind()) {
case Code::LOAD_IC:
- ASSERT(arg->reg().is(r2));
- ASSERT(dropped_args == 0);
- spilled_args = 1;
- break;
case Code::KEYED_STORE_IC:
- ASSERT(arg->reg().is(r0));
- ASSERT(dropped_args == 0);
- spilled_args = 2;
- break;
- default:
- // No other types of code objects are called with values
- // in exactly one register.
- UNREACHABLE();
- break;
- }
- PrepareForCall(spilled_args, dropped_args);
- arg->Unuse();
- RawCallCodeObject(code, rmode);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args) {
- int spilled_args = 1;
- switch (code->kind()) {
case Code::STORE_IC:
- ASSERT(arg0->reg().is(r0));
- ASSERT(arg1->reg().is(r2));
ASSERT(dropped_args == 0);
- spilled_args = 1;
break;
case Code::BUILTIN:
ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
- ASSERT(arg0->reg().is(r0));
- ASSERT(arg1->reg().is(r1));
- spilled_args = dropped_args + 1;
break;
default:
- // No other types of code objects are called with values
- // in exactly two registers.
UNREACHABLE();
break;
}
- PrepareForCall(spilled_args, dropped_args);
- arg0->Unuse();
- arg1->Unuse();
- RawCallCodeObject(code, rmode);
+ Forget(dropped_args);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ Call(code, rmode);
}
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index b2f0eea6..9a2f7d36 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -287,18 +287,11 @@ class VirtualFrame : public ZoneObject {
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
void CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- RawCallStub(stub);
+ Forget(arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ masm()->CallStub(stub);
}
- // Call stub that expects its argument in r0. The argument is given
- // as a result which must be the register r0.
- void CallStub(CodeStub* stub, Result* arg);
-
- // Call stub that expects its arguments in r1 and r0. The arguments
- // are given as results which must be the appropriate registers.
- void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
void CallRuntime(Runtime::Function* f, int arg_count);
@@ -311,19 +304,10 @@ class VirtualFrame : public ZoneObject {
int arg_count);
// Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments are passed as results and
- // consumed by the call.
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
+ // from the stack. Register arguments to the IC stub are implicit,
+ // and depend on the type of IC stub.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
int dropped_args);
// Drop a number of elements from the top of the expression stack. May
@@ -511,14 +495,6 @@ class VirtualFrame : public ZoneObject {
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- void RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
diff --git a/src/assembler.cc b/src/assembler.cc
index fcdb14ae..dbf2742b 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -655,7 +655,7 @@ ExternalReference ExternalReference::re_check_stack_guard_state() {
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#else
- UNREACHABLE("Unexpected architecture");
+ UNREACHABLE();
#endif
return ExternalReference(Redirect(function));
}
diff --git a/src/ast.cc b/src/ast.cc
index 4edcf6d8..7cb55783 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -146,27 +146,6 @@ bool ObjectLiteral::Property::IsCompileTimeValue() {
}
-bool ObjectLiteral::IsValidJSON() {
- int length = properties()->length();
- for (int i = 0; i < length; i++) {
- Property* prop = properties()->at(i);
- if (!prop->value()->IsValidJSON())
- return false;
- }
- return true;
-}
-
-
-bool ArrayLiteral::IsValidJSON() {
- int length = values()->length();
- for (int i = 0; i < length; i++) {
- if (!values()->at(i)->IsValidJSON())
- return false;
- }
- return true;
-}
-
-
void TargetCollector::AddTarget(BreakTarget* target) {
// Add the label to the collector, but discard duplicates.
int length = targets_->length();
diff --git a/src/ast.h b/src/ast.h
index 22e096f2..48d0bfac 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -180,9 +180,12 @@ class Expression: public AstNode {
kTestValue
};
+ static const int kNoLabel = -1;
+
+ Expression() : num_(kNoLabel) {}
+
virtual Expression* AsExpression() { return this; }
- virtual bool IsValidJSON() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
// Symbols that cannot be parsed as array indices are considered property
@@ -198,8 +201,14 @@ class Expression: public AstNode {
// Static type information for this expression.
StaticType* type() { return &type_; }
+ int num() { return num_; }
+
+ // AST node numbering ordered by evaluation order.
+ void set_num(int n) { num_ = n; }
+
private:
StaticType type_;
+ int num_;
};
@@ -703,8 +712,6 @@ class Literal: public Expression {
return handle_.is_identical_to(other->handle_);
}
- virtual bool IsValidJSON() { return true; }
-
virtual bool IsPropertyName() {
if (handle_->IsSymbol()) {
uint32_t ignored;
@@ -741,8 +748,6 @@ class MaterializedLiteral: public Expression {
// constants and simple object and array literals.
bool is_simple() const { return is_simple_; }
- virtual bool IsValidJSON() { return true; }
-
int depth() const { return depth_; }
private:
@@ -796,7 +801,6 @@ class ObjectLiteral: public MaterializedLiteral {
virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v);
- virtual bool IsValidJSON();
Handle<FixedArray> constant_properties() const {
return constant_properties_;
@@ -844,7 +848,6 @@ class ArrayLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; }
- virtual bool IsValidJSON();
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
@@ -1321,7 +1324,6 @@ class FunctionLiteral: public Expression {
start_position_(start_position),
end_position_(end_position),
is_expression_(is_expression),
- loop_nesting_(0),
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()),
try_full_codegen_(false) {
@@ -1356,9 +1358,6 @@ class FunctionLiteral: public Expression {
bool AllowsLazyCompilation();
- bool loop_nesting() const { return loop_nesting_; }
- void set_loop_nesting(int nesting) { loop_nesting_ = nesting; }
-
Handle<String> inferred_name() const { return inferred_name_; }
void set_inferred_name(Handle<String> inferred_name) {
inferred_name_ = inferred_name;
@@ -1386,7 +1385,6 @@ class FunctionLiteral: public Expression {
int start_position_;
int end_position_;
bool is_expression_;
- int loop_nesting_;
int function_token_position_;
Handle<String> inferred_name_;
bool try_full_codegen_;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 9eacf57a..78d09952 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -249,26 +249,24 @@ bool PendingFixups::Process(Handle<JSBuiltinsObject> builtins) {
V8_Fatal(__FILE__, __LINE__, "Cannot resolve call to builtin %s", name);
}
#endif
- Handle<JSFunction> f = Handle<JSFunction>(JSFunction::cast(o));
+ Handle<SharedFunctionInfo> shared(JSFunction::cast(o)->shared());
// Make sure the number of parameters match the formal parameter count.
int argc = Bootstrapper::FixupFlagsArgumentsCount::decode(flags);
USE(argc);
- ASSERT(f->shared()->formal_parameter_count() == argc);
- if (!f->is_compiled()) {
- // Do lazy compilation and check for stack overflows.
- if (!CompileLazy(f, CLEAR_EXCEPTION)) {
- Clear();
- return false;
- }
+ ASSERT(shared->formal_parameter_count() == argc);
+ // Do lazy compilation if necessary and check for stack overflows.
+ if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+ Clear();
+ return false;
}
Code* code = Code::cast(code_[i]);
Address pc = code->instruction_start() + pc_[i];
RelocInfo target(pc, RelocInfo::CODE_TARGET, 0);
bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags);
if (use_code_object) {
- target.set_target_object(f->code());
+ target.set_target_object(shared->code());
} else {
- target.set_target_address(f->code()->instruction_start());
+ target.set_target_address(shared->code()->instruction_start());
}
LOG(StringEvent("resolved", name));
}
@@ -960,7 +958,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context);
- // Call function using the either the runtime object or the global
+ // Call function using either the runtime object or the global
// object as the receiver. Provide no parameters.
Handle<Object> receiver =
Handle<Object>(use_runtime_context
diff --git a/src/builtins.cc b/src/builtins.cc
index 9db22303..db0770f3 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -175,12 +175,12 @@ Handle<Code> Builtins::GetCode(JavaScript id, bool* resolved) {
if (Top::context() != NULL) {
Object* object = Top::builtins()->javascript_builtin(id);
if (object->IsJSFunction()) {
- Handle<JSFunction> function(JSFunction::cast(object));
+ Handle<SharedFunctionInfo> shared(JSFunction::cast(object)->shared());
// Make sure the number of parameters match the formal parameter count.
- ASSERT(function->shared()->formal_parameter_count() ==
+ ASSERT(shared->formal_parameter_count() ==
Builtins::GetArgumentsCount(id));
- if (function->is_compiled() || CompileLazy(function, CLEAR_EXCEPTION)) {
- code = function->code();
+ if (EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+ code = shared->code();
*resolved = true;
}
}
@@ -247,8 +247,10 @@ BUILTIN(ArrayCodeGeneric) {
Smi* len = Smi::FromInt(number_of_elements);
Object* obj = Heap::AllocateFixedArrayWithHoles(len->value());
if (obj->IsFailure()) return obj;
+
+ AssertNoAllocation no_gc;
FixedArray* elms = FixedArray::cast(obj);
- WriteBarrierMode mode = elms->GetWriteBarrierMode();
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
// Fill in the content
for (int index = 0; index < number_of_elements; index++) {
elms->set(index, args[index+1], mode);
@@ -256,7 +258,7 @@ BUILTIN(ArrayCodeGeneric) {
// Set length and elements on the array.
array->set_elements(FixedArray::cast(obj));
- array->set_length(len, SKIP_WRITE_BARRIER);
+ array->set_length(len);
return array;
}
@@ -283,8 +285,10 @@ BUILTIN(ArrayPush) {
int capacity = new_length + (new_length >> 1) + 16;
Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
if (obj->IsFailure()) return obj;
+
+ AssertNoAllocation no_gc;
FixedArray* new_elms = FixedArray::cast(obj);
- WriteBarrierMode mode = new_elms->GetWriteBarrierMode();
+ WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
// Fill out the new array with old elements.
for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
// Add the provided values.
@@ -295,7 +299,7 @@ BUILTIN(ArrayPush) {
array->set_elements(new_elms);
}
// Set the length.
- array->set_length(Smi::FromInt(new_length), SKIP_WRITE_BARRIER);
+ array->set_length(Smi::FromInt(new_length));
return array->length();
}
@@ -313,7 +317,7 @@ BUILTIN(ArrayPop) {
Object* top = elms->get(len - 1);
// Set the length.
- array->set_length(Smi::FromInt(len - 1), SKIP_WRITE_BARRIER);
+ array->set_length(Smi::FromInt(len - 1));
if (!top->IsTheHole()) {
// Delete the top element.
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 052c1caf..16267f64 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -55,9 +55,9 @@ namespace internal {
V(CounterOp) \
V(ArgumentsAccess) \
V(RegExpExec) \
- V(Runtime) \
V(CEntry) \
- V(JSEntry)
+ V(JSEntry) \
+ V(DebuggerStatement)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
diff --git a/src/codegen.cc b/src/codegen.cc
index aa2a2b82..8822eddb 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -216,7 +216,8 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun,
// the compiler.cc code.
Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
- bool is_eval) {
+ bool is_eval,
+ CompilationInfo* info) {
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
Counters::total_old_codegen_source_size.Increment(len);
@@ -224,9 +225,10 @@ Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
MakeCodePrologue(fun);
// Generate code.
const int kInitialBufferSize = 4 * KB;
- CodeGenerator cgen(kInitialBufferSize, script, is_eval);
+ MacroAssembler masm(NULL, kInitialBufferSize);
+ CodeGenerator cgen(&masm, script, is_eval);
CodeGeneratorScope scope(&cgen);
- cgen.GenCode(fun);
+ cgen.Generate(fun, PRIMARY, info);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
@@ -451,11 +453,6 @@ void CodeGenerator::CodeForSourcePosition(int pos) {
}
-const char* RuntimeStub::GetName() {
- return Runtime::FunctionForId(id_)->stub_name;
-}
-
-
const char* GenericUnaryOpStub::GetName() {
switch (op_) {
case Token::SUB:
@@ -473,14 +470,6 @@ const char* GenericUnaryOpStub::GetName() {
}
-void RuntimeStub::Generate(MacroAssembler* masm) {
- Runtime::Function* f = Runtime::FunctionForId(id_);
- masm->TailCallRuntime(ExternalReference(f),
- num_arguments_,
- f->result_size);
-}
-
-
void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
switch (type_) {
case READ_LENGTH: GenerateReadLength(masm); break;
@@ -490,6 +479,17 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
}
+int CEntryStub::MinorKey() {
+ ASSERT(result_size_ <= 2);
+#ifdef _WIN64
+ return ExitFrameModeBits::encode(mode_)
+ | IndirectResultBits::encode(result_size_ > 1);
+#else
+ return ExitFrameModeBits::encode(mode_);
+#endif
+}
+
+
bool ApiGetterEntryStub::GetCustomCache(Code** code_out) {
Object* cache = info()->load_stub_cache();
if (cache->IsUndefined()) {
@@ -505,5 +505,12 @@ void ApiGetterEntryStub::SetCustomCache(Code* value) {
info()->set_load_stub_cache(value);
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void DebuggerStatementStub::Generate(MacroAssembler* masm) {
+ Runtime::Function* f = Runtime::FunctionForId(Runtime::kDebugBreak);
+ masm->TailCallRuntime(ExternalReference(f), 0, f->result_size);
+}
+#endif
+
} } // namespace v8::internal
diff --git a/src/codegen.h b/src/codegen.h
index 76cc4914..d0be5f1b 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -55,7 +55,7 @@
// CodeGenerator
// ~CodeGenerator
// ProcessDeferred
-// GenCode
+// Generate
// ComputeLazyCompile
// BuildBoilerplate
// ComputeCallInitialize
@@ -181,43 +181,6 @@ class DeferredCode: public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(DeferredCode);
};
-
-// RuntimeStub models code stubs calling entry points in the Runtime class.
-class RuntimeStub : public CodeStub {
- public:
- explicit RuntimeStub(Runtime::FunctionId id, int num_arguments)
- : id_(id), num_arguments_(num_arguments) { }
-
- void Generate(MacroAssembler* masm);
-
- // Disassembler support. It is useful to be able to print the name
- // of the runtime function called through this stub.
- static const char* GetNameFromMinorKey(int minor_key) {
- return Runtime::FunctionForId(IdField::decode(minor_key))->stub_name;
- }
-
- private:
- Runtime::FunctionId id_;
- int num_arguments_;
-
- class ArgumentField: public BitField<int, 0, 16> {};
- class IdField: public BitField<Runtime::FunctionId, 16, kMinorBits - 16> {};
-
- Major MajorKey() { return Runtime; }
- int MinorKey() {
- return IdField::encode(id_) | ArgumentField::encode(num_arguments_);
- }
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("RuntimeStub (id %s)\n", Runtime::FunctionForId(id_)->name);
- }
-#endif
-};
-
-
class StackCheckStub : public CodeStub {
public:
StackCheckStub() { }
@@ -367,25 +330,30 @@ class CompareStub: public CodeStub {
class CEntryStub : public CodeStub {
public:
- explicit CEntryStub(int result_size) : result_size_(result_size) { }
+ explicit CEntryStub(int result_size,
+ ExitFrame::Mode mode = ExitFrame::MODE_NORMAL)
+ : result_size_(result_size), mode_(mode) { }
- void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
+ void Generate(MacroAssembler* masm);
- protected:
- void GenerateBody(MacroAssembler* masm, bool is_debug_break);
+ private:
void GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type);
- private:
+
// Number of pointers/values returned.
- int result_size_;
+ const int result_size_;
+ const ExitFrame::Mode mode_;
+
+ // Minor key encoding
+ class ExitFrameModeBits: public BitField<ExitFrame::Mode, 0, 1> {};
+ class IndirectResultBits: public BitField<bool, 1, 1> {};
Major MajorKey() { return CEntry; }
// Minor key must differ if different result_size_ values means different
@@ -422,16 +390,18 @@ class ApiGetterEntryStub : public CodeStub {
};
-class CEntryDebugBreakStub : public CEntryStub {
+// Mark the debugger statement to be recognized by debugger (by the MajorKey)
+class DebuggerStatementStub : public CodeStub {
public:
- CEntryDebugBreakStub() : CEntryStub(1) { }
+ DebuggerStatementStub() { }
- void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
+ void Generate(MacroAssembler* masm);
private:
- int MinorKey() { return 1; }
+ Major MajorKey() { return DebuggerStatement; }
+ int MinorKey() { return 0; }
- const char* GetName() { return "CEntryDebugBreakStub"; }
+ const char* GetName() { return "DebuggerStatementStub"; }
};
diff --git a/src/compiler.cc b/src/compiler.cc
index 7482ae1a..a5e1e5c8 100644..100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -47,7 +47,7 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
Handle<Script> script,
Handle<Context> context,
bool is_eval,
- Handle<SharedFunctionInfo> shared) {
+ CompilationInfo* info) {
ASSERT(literal != NULL);
// Rewrite the AST by introducing .result assignments where needed.
@@ -96,6 +96,7 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
// incompatible.
CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
+ Handle<SharedFunctionInfo> shared = info->shared_info();
bool is_run_once = (shared.is_null())
? literal->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
@@ -109,22 +110,13 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
} else if (FLAG_always_fast_compiler ||
(FLAG_fast_compiler && !is_run_once)) {
FastCodeGenSyntaxChecker checker;
- checker.Check(literal);
- // Does not yet generate code.
+ checker.Check(literal, info);
+ if (checker.has_supported_syntax()) {
+ return FastCodeGenerator::MakeCode(literal, script, is_eval, info);
+ }
}
- return CodeGenerator::MakeCode(literal, script, is_eval);
-}
-
-
-static bool IsValidJSON(FunctionLiteral* lit) {
- if (lit->body()->length() != 1)
- return false;
- Statement* stmt = lit->body()->at(0);
- if (stmt->AsExpressionStatement() == NULL)
- return false;
- Expression* expr = stmt->AsExpressionStatement()->expression();
- return expr->IsValidJSON();
+ return CodeGenerator::MakeCode(literal, script, is_eval, info);
}
@@ -142,8 +134,8 @@ static Handle<JSFunction> MakeFunction(bool is_global,
ASSERT(!i::Top::global_context().is_null());
script->set_context_data((*i::Top::global_context())->data());
-#ifdef ENABLE_DEBUGGER_SUPPORT
bool is_json = (validate == Compiler::VALIDATE_JSON);
+#ifdef ENABLE_DEBUGGER_SUPPORT
if (is_eval || is_json) {
script->set_compilation_type(
is_json ? Smi::FromInt(Script::COMPILATION_TYPE_JSON) :
@@ -151,12 +143,14 @@ static Handle<JSFunction> MakeFunction(bool is_global,
// For eval scripts add information on the function from which eval was
// called.
if (is_eval) {
- JavaScriptFrameIterator it;
- script->set_eval_from_shared(
- JSFunction::cast(it.frame()->function())->shared());
- int offset = static_cast<int>(
- it.frame()->pc() - it.frame()->code()->instruction_start());
- script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+ StackTraceFrameIterator it;
+ if (!it.done()) {
+ script->set_eval_from_shared(
+ JSFunction::cast(it.frame()->function())->shared());
+ int offset = static_cast<int>(
+ it.frame()->pc() - it.frame()->code()->instruction_start());
+ script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+ }
}
}
@@ -168,7 +162,8 @@ static Handle<JSFunction> MakeFunction(bool is_global,
ASSERT(is_eval || is_global);
// Build AST.
- FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data);
+ FunctionLiteral* lit =
+ MakeAST(is_global, script, extension, pre_data, is_json);
// Check for parse errors.
if (lit == NULL) {
@@ -176,19 +171,6 @@ static Handle<JSFunction> MakeFunction(bool is_global,
return Handle<JSFunction>::null();
}
- // When parsing JSON we do an ordinary parse and then afterwards
- // check the AST to ensure it was well-formed. If not we give a
- // syntax error.
- if (validate == Compiler::VALIDATE_JSON && !IsValidJSON(lit)) {
- HandleScope scope;
- Handle<JSArray> args = Factory::NewJSArray(1);
- Handle<Object> source(script->source());
- SetElement(args, 0, source);
- Handle<Object> result = Factory::NewSyntaxError("invalid_json", args);
- Top::Throw(*result, NULL);
- return Handle<JSFunction>::null();
- }
-
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
@@ -198,8 +180,10 @@ static Handle<JSFunction> MakeFunction(bool is_global,
HistogramTimerScope timer(rate);
// Compile the code.
- Handle<Code> code = MakeCode(lit, script, context, is_eval,
- Handle<SharedFunctionInfo>::null());
+ CompilationInfo info(Handle<SharedFunctionInfo>::null(),
+ Handle<Object>::null(), // No receiver.
+ 0); // Not nested in a loop.
+ Handle<Code> code = MakeCode(lit, script, context, is_eval, &info);
// Check for stack-overflow exceptions.
if (code.is_null()) {
@@ -360,8 +344,7 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
}
-bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
- int loop_nesting) {
+bool Compiler::CompileLazy(CompilationInfo* info) {
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function.
@@ -370,6 +353,7 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
PostponeInterruptsScope postpone;
// Compute name, source code and script data.
+ Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<String> name(String::cast(shared->name()));
Handle<Script> script(Script::cast(shared->script()));
@@ -391,17 +375,17 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
return false;
}
- // Update the loop nesting in the function literal.
- lit->set_loop_nesting(loop_nesting);
-
// Measure how long it takes to do the lazy compilation; only take
// the rest of the function into account to avoid overlap with the
// lazy parsing statistics.
HistogramTimerScope timer(&Counters::compile_lazy);
// Compile the code.
- Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false,
- shared);
+ Handle<Code> code = MakeCode(lit,
+ script,
+ Handle<Context>::null(),
+ false,
+ info);
// Check for stack-overflow exception.
if (code.is_null()) {
@@ -482,6 +466,10 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in
// the static helper function MakeCode.
+ CompilationInfo info(Handle<SharedFunctionInfo>::null(),
+ Handle<Object>::null(), // No receiver.
+ 0); // Not nested in a loop.
+
CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
bool is_run_once = literal->try_full_codegen();
bool is_compiled = false;
@@ -496,16 +484,22 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
}
} else if (FLAG_always_fast_compiler ||
(FLAG_fast_compiler && !is_run_once)) {
+ // Since we are not lazily compiling we do not have a receiver to
+ // specialize for.
FastCodeGenSyntaxChecker checker;
- checker.Check(literal);
- // Generate no code.
+ checker.Check(literal, &info);
+ if (checker.has_supported_syntax()) {
+ code = FastCodeGenerator::MakeCode(literal, script, false, &info);
+ is_compiled = true;
+ }
}
if (!is_compiled) {
// We fall back to the classic V8 code generator.
code = CodeGenerator::MakeCode(literal,
script,
- false); // Not eval.
+ false, // Not eval.
+ &info);
}
// Check for stack-overflow exception.
diff --git a/src/compiler.h b/src/compiler.h
index 546e446b..19499de7 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -35,6 +35,41 @@
namespace v8 {
namespace internal {
+// CompilationInfo encapsulates some information known at compile time.
+class CompilationInfo BASE_EMBEDDED {
+ public:
+ CompilationInfo(Handle<SharedFunctionInfo> shared_info,
+ Handle<Object> receiver,
+ int loop_nesting)
+ : shared_info_(shared_info),
+ receiver_(receiver),
+ loop_nesting_(loop_nesting),
+ has_this_properties_(false),
+ has_globals_(false) {
+ }
+
+ Handle<SharedFunctionInfo> shared_info() { return shared_info_; }
+
+ bool has_receiver() { return !receiver_.is_null(); }
+ Handle<Object> receiver() { return receiver_; }
+
+ int loop_nesting() { return loop_nesting_; }
+
+ bool has_this_properties() { return has_this_properties_; }
+ void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
+
+ bool has_globals() { return has_globals_; }
+ void set_has_globals(bool flag) { has_globals_ = flag; }
+
+ private:
+ Handle<SharedFunctionInfo> shared_info_;
+ Handle<Object> receiver_;
+ int loop_nesting_;
+ bool has_this_properties_;
+ bool has_globals_;
+};
+
+
// The V8 compiler
//
// General strategy: Source code is translated into an anonymous function w/o
@@ -70,7 +105,7 @@ class Compiler : public AllStatic {
// Compile from function info (used for lazy compilation). Returns
// true on success and false if the compilation resulted in a stack
// overflow.
- static bool CompileLazy(Handle<SharedFunctionInfo> shared, int loop_nesting);
+ static bool CompileLazy(CompilationInfo* info);
// Compile a function boilerplate object (the function is possibly
// lazily compiled). Called recursively from a backend code
diff --git a/src/data-flow.cc b/src/data-flow.cc
new file mode 100644
index 00000000..0e30b315
--- /dev/null
+++ b/src/data-flow.cc
@@ -0,0 +1,267 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "data-flow.h"
+
+namespace v8 {
+namespace internal {
+
+
+void AstLabeler::Label(FunctionLiteral* fun) {
+ VisitStatements(fun->body());
+}
+
+
+void AstLabeler::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0, len = stmts->length(); i < len; i++) {
+ Visit(stmts->at(i));
+ }
+}
+
+
+void AstLabeler::VisitDeclarations(ZoneList<Declaration*>* decls) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void AstLabeler::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void AstLabeler::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Do nothing.
+}
+
+
+void AstLabeler::VisitIfStatement(IfStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitContinueStatement(ContinueStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitBreakStatement(BreakStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitReturnStatement(ReturnStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitWithEnterStatement(
+ WithEnterStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitWithExitStatement(WithExitStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitWhileStatement(WhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitForStatement(ForStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitForInStatement(ForInStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitDebuggerStatement(
+ DebuggerStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitFunctionLiteral(FunctionLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitConditional(Conditional* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitVariableProxy(VariableProxy* expr) {
+ expr->set_num(next_number_++);
+}
+
+
+void AstLabeler::VisitLiteral(Literal* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitRegExpLiteral(RegExpLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitObjectLiteral(ObjectLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitArrayLiteral(ArrayLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitAssignment(Assignment* expr) {
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ if (prop != NULL) {
+ ASSERT(prop->key()->IsPropertyName());
+ VariableProxy* proxy = prop->obj()->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->is_this()) {
+ has_this_properties_ = true;
+ } else {
+ Visit(prop->obj());
+ }
+ }
+ Visit(expr->value());
+ expr->set_num(next_number_++);
+}
+
+
+void AstLabeler::VisitThrow(Throw* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitProperty(Property* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitCall(Call* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitCallNew(CallNew* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitCallRuntime(CallRuntime* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitUnaryOperation(UnaryOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitCountOperation(CountOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitBinaryOperation(BinaryOperation* expr) {
+ Visit(expr->left());
+ Visit(expr->right());
+ expr->set_num(next_number_++);
+}
+
+
+void AstLabeler::VisitCompareOperation(CompareOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitThisFunction(ThisFunction* expr) {
+ UNREACHABLE();
+}
+
+
+void AstLabeler::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
+
+} } // namespace v8::internal
diff --git a/src/data-flow.h b/src/data-flow.h
new file mode 100644
index 00000000..ac835031
--- /dev/null
+++ b/src/data-flow.h
@@ -0,0 +1,67 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DATAFLOW_H_
+#define V8_DATAFLOW_H_
+
+#include "ast.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// This class is used to number all expressions in the AST according to
+// their evaluation order (post-order left-to-right traversal).
+class AstLabeler: public AstVisitor {
+ public:
+ AstLabeler() : next_number_(0), has_this_properties_(false) {}
+
+ void Label(FunctionLiteral* fun);
+
+ bool has_this_properties() { return has_this_properties_; }
+
+ private:
+ void VisitDeclarations(ZoneList<Declaration*>* decls);
+ void VisitStatements(ZoneList<Statement*>* stmts);
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ // Traversal number for labelling AST nodes.
+ int next_number_;
+
+ bool has_this_properties_;
+
+ DISALLOW_COPY_AND_ASSIGN(AstLabeler);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_DATAFLOW_H_
diff --git a/src/dateparser.cc b/src/dateparser.cc
index 1cc9aa16..51a63e1a 100644
--- a/src/dateparser.cc
+++ b/src/dateparser.cc
@@ -72,15 +72,9 @@ bool DateParser::DayComposer::Write(FixedArray* output) {
if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
- output->set(YEAR,
- Smi::FromInt(year),
- SKIP_WRITE_BARRIER);
- output->set(MONTH,
- Smi::FromInt(month - 1),
- SKIP_WRITE_BARRIER); // 0-based
- output->set(DAY,
- Smi::FromInt(day),
- SKIP_WRITE_BARRIER);
+ output->set(YEAR, Smi::FromInt(year));
+ output->set(MONTH, Smi::FromInt(month - 1)); // 0-based
+ output->set(DAY, Smi::FromInt(day));
return true;
}
@@ -103,15 +97,9 @@ bool DateParser::TimeComposer::Write(FixedArray* output) {
if (!IsHour(hour) || !IsMinute(minute) || !IsSecond(second)) return false;
- output->set(HOUR,
- Smi::FromInt(hour),
- SKIP_WRITE_BARRIER);
- output->set(MINUTE,
- Smi::FromInt(minute),
- SKIP_WRITE_BARRIER);
- output->set(SECOND,
- Smi::FromInt(second),
- SKIP_WRITE_BARRIER);
+ output->set(HOUR, Smi::FromInt(hour));
+ output->set(MINUTE, Smi::FromInt(minute));
+ output->set(SECOND, Smi::FromInt(second));
return true;
}
@@ -121,13 +109,9 @@ bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
if (minute_ == kNone) minute_ = 0;
int total_seconds = sign_ * (hour_ * 3600 + minute_ * 60);
if (!Smi::IsValid(total_seconds)) return false;
- output->set(UTC_OFFSET,
- Smi::FromInt(total_seconds),
- SKIP_WRITE_BARRIER);
+ output->set(UTC_OFFSET, Smi::FromInt(total_seconds));
} else {
- output->set(UTC_OFFSET,
- Heap::null_value(),
- SKIP_WRITE_BARRIER);
+ output->set_null(UTC_OFFSET);
}
return true;
}
diff --git a/src/debug.cc b/src/debug.cc
index fc809c56..fb9b23eb 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -75,9 +75,6 @@ BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info,
BreakLocatorType type) {
debug_info_ = debug_info;
type_ = type;
- // Get the stub early to avoid possible GC during iterations. We may need
- // this stub to detect debugger calls generated from debugger statements.
- debug_break_stub_ = RuntimeStub(Runtime::kDebugBreak, 0).GetCode();
reloc_iterator_ = NULL;
reloc_iterator_original_ = NULL;
Reset(); // Initialize the rest of the member variables.
@@ -461,9 +458,7 @@ bool BreakLocationIterator::IsDebuggerStatement() {
Code* code = Code::GetCodeFromTargetAddress(target);
if (code->kind() == Code::STUB) {
CodeStub::Major major_key = code->major_key();
- if (major_key == CodeStub::Runtime) {
- return (*debug_break_stub_ == code);
- }
+ return (major_key == CodeStub::DebuggerStatement);
}
}
return false;
@@ -1526,19 +1521,13 @@ void Debug::ClearStepNext() {
}
-bool Debug::EnsureCompiled(Handle<SharedFunctionInfo> shared) {
- if (shared->is_compiled()) return true;
- return CompileLazyShared(shared, CLEAR_EXCEPTION, 0);
-}
-
-
// Ensures the debug information is present for shared.
bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
// Return if we already have the debug info for shared.
if (HasDebugInfo(shared)) return true;
// Ensure shared in compiled. Return false if this failed.
- if (!EnsureCompiled(shared)) return false;
+ if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
// Create the debug info object.
Handle<DebugInfo> debug_info = Factory::NewDebugInfo(shared);
diff --git a/src/debug.h b/src/debug.h
index 5ea2e522..cab9e8e4 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -132,7 +132,6 @@ class BreakLocationIterator {
int position_;
int statement_position_;
Handle<DebugInfo> debug_info_;
- Handle<Code> debug_break_stub_;
RelocIterator* reloc_iterator_;
RelocIterator* reloc_iterator_original_;
@@ -391,7 +390,6 @@ class Debug {
static void ClearStepOut();
static void ClearStepNext();
// Returns whether the compile succeeded.
- static bool EnsureCompiled(Handle<SharedFunctionInfo> shared);
static void RemoveDebugInfo(Handle<DebugInfo> debug_info);
static void SetAfterBreakTarget(JavaScriptFrame* frame);
static Handle<Object> CheckBreakPoints(Handle<Object> break_point);
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 524dbe67..50f3eb99 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -266,13 +266,7 @@ static int DecodeIt(FILE* f,
case CodeStub::CallFunction:
out.AddFormatted("argc = %d", minor_key);
break;
- case CodeStub::Runtime: {
- const char* name =
- RuntimeStub::GetNameFromMinorKey(minor_key);
- out.AddFormatted("%s", name);
- break;
- }
- default:
+ default:
out.AddFormatted("minor: %d", minor_key);
}
}
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index 4e2df74f..4e6f259c 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#include "codegen-inl.h"
+#include "data-flow.h"
#include "fast-codegen.h"
#include "scopes.h"
@@ -49,11 +51,20 @@ namespace internal {
} while (false)
-void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun) {
- Scope* scope = fun->scope();
+void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun,
+ CompilationInfo* info) {
+ info_ = info;
+
+ // We do not specialize if we do not have a receiver or if it is not a
+ // JS object with fast mode properties.
+ if (!info->has_receiver()) BAILOUT("No receiver");
+ if (!info->receiver()->IsJSObject()) BAILOUT("Receiver is not an object");
+ Handle<JSObject> object = Handle<JSObject>::cast(info->receiver());
+ if (!object->HasFastProperties()) BAILOUT("Receiver is in dictionary mode");
// We do not support stack or heap slots (both of which require
// allocation).
+ Scope* scope = fun->scope();
if (scope->num_stack_slots() > 0) {
BAILOUT("Function has stack-allocated locals");
}
@@ -246,6 +257,22 @@ void FastCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
BAILOUT("Non-named-property assignment");
}
+ // We will only specialize for fields on the object itself.
+ // Expression::IsPropertyName implies that the name is a literal
+ // symbol but we do not assume that.
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsString()) {
+ Handle<Object> receiver = info()->receiver();
+ Handle<String> name = Handle<String>::cast(key->handle());
+ LookupResult lookup;
+ receiver->Lookup(*name, &lookup);
+ if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment");
+ if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment");
+ } else {
+ UNREACHABLE();
+ BAILOUT("Unexpected non-string-literal property key");
+ }
+
Visit(expr->value());
}
@@ -303,4 +330,266 @@ void FastCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
#undef CHECK_BAILOUT
+#define __ ACCESS_MASM(masm())
+
+Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval,
+ CompilationInfo* info) {
+ // Label the AST before calling MakeCodePrologue, so AST node numbers are
+ // printed with the AST.
+ AstLabeler labeler;
+ labeler.Label(fun);
+ info->set_has_this_properties(labeler.has_this_properties());
+
+ CodeGenerator::MakeCodePrologue(fun);
+
+ const int kInitialBufferSize = 4 * KB;
+ MacroAssembler masm(NULL, kInitialBufferSize);
+
+ // Generate the fast-path code.
+ FastCodeGenerator fast_cgen(&masm, script, is_eval);
+ fast_cgen.Generate(fun, info);
+ if (fast_cgen.HasStackOverflow()) {
+ ASSERT(!Top::has_pending_exception());
+ return Handle<Code>::null();
+ }
+
+ // Generate the full code for the function in bailout mode, using the same
+ // macro assembler.
+ CodeGenerator cgen(&masm, script, is_eval);
+ CodeGeneratorScope scope(&cgen);
+ cgen.Generate(fun, CodeGenerator::SECONDARY, info);
+ if (cgen.HasStackOverflow()) {
+ ASSERT(!Top::has_pending_exception());
+ return Handle<Code>::null();
+ }
+
+ Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
+ return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Nothing to do.
+}
+
+
+void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitConditional(Conditional* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ ASSERT(expr->var()->is_global() && !expr->var()->is_this());
+ Comment cmnt(masm(), ";; Global");
+ if (FLAG_print_ir) {
+ SmartPointer<char> name = expr->name()->ToCString();
+ PrintF("%d: t%d = Global(%s)\n", expr->num(), expr->num(), *name);
+ }
+ EmitGlobalVariableLoad(expr->name());
+}
+
+
+void FastCodeGenerator::VisitLiteral(Literal* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+ // Known to be a simple this property assignment.
+ Visit(expr->value());
+
+ Property* prop = expr->target()->AsProperty();
+ ASSERT_NOT_NULL(prop);
+ ASSERT_NOT_NULL(prop->obj()->AsVariableProxy());
+ ASSERT(prop->obj()->AsVariableProxy()->var()->is_this());
+ ASSERT(prop->key()->IsPropertyName());
+ Handle<String> name =
+ Handle<String>::cast(prop->key()->AsLiteral()->handle());
+
+ Comment cmnt(masm(), ";; Store(this)");
+ if (FLAG_print_ir) {
+ SmartPointer<char> name_string = name->ToCString();
+ PrintF("%d: t%d = Store(this, \"%s\", t%d)\n",
+ expr->num(), expr->num(), *name_string, expr->value()->num());
+ }
+
+ EmitThisPropertyStore(name);
+}
+
+
+void FastCodeGenerator::VisitThrow(Throw* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ UNREACHABLE();
+}
+
+#undef __
+
+
} } // namespace v8::internal
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
index 3e0bb417..b40f6fb7 100644
--- a/src/fast-codegen.h
+++ b/src/fast-codegen.h
@@ -31,16 +31,20 @@
#include "v8.h"
#include "ast.h"
+#include "compiler.h"
namespace v8 {
namespace internal {
class FastCodeGenSyntaxChecker: public AstVisitor {
public:
- FastCodeGenSyntaxChecker() : has_supported_syntax_(true) {}
+ explicit FastCodeGenSyntaxChecker()
+ : info_(NULL), has_supported_syntax_(true) {
+ }
- void Check(FunctionLiteral* fun);
+ void Check(FunctionLiteral* fun, CompilationInfo* info);
+ CompilationInfo* info() { return info_; }
bool has_supported_syntax() { return has_supported_syntax_; }
private:
@@ -52,12 +56,76 @@ class FastCodeGenSyntaxChecker: public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+ CompilationInfo* info_;
bool has_supported_syntax_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenSyntaxChecker);
};
+class FastCodeGenerator: public AstVisitor {
+ public:
+ FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+ : masm_(masm),
+ script_(script),
+ is_eval_(is_eval),
+ function_(NULL),
+ info_(NULL) {
+ }
+
+ static Handle<Code> MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval,
+ CompilationInfo* info);
+
+ void Generate(FunctionLiteral* fun, CompilationInfo* info);
+
+ private:
+ MacroAssembler* masm() { return masm_; }
+ FunctionLiteral* function() { return function_; }
+ Label* bailout() { return &bailout_; }
+
+ bool has_receiver() { return !info_->receiver().is_null(); }
+ Handle<Object> receiver() { return info_->receiver(); }
+ bool has_this_properties() { return info_->has_this_properties(); }
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ // Emit code to load the receiver from the stack into a given register.
+ void EmitLoadReceiver(Register reg);
+
+ // Emit code to check that the receiver has the same map as the
+ // compile-time receiver. Receiver is expected in {ia32-edx, x64-rdx,
+ // arm-r1}. Emit a branch to the (single) bailout label if check fails.
+ void EmitReceiverMapCheck();
+
+ // Emit code to load a global variable value into {is32-eax, x64-rax,
+ // arm-r0}. Register {ia32-edx, x64-rdx, arm-r1} is preserved if it is
+ // holding the receiver and {is32-ecx, x64-rcx, arm-r2} is always
+ // clobbered.
+ void EmitGlobalVariableLoad(Handle<String> name);
+
+ // Emit a store to an own property of this. The stored value is expected
+ // in {ia32-eax, x64-rax, arm-r0} and the receiver in {is32-edx, x64-rdx,
+ // arm-r1}. Both are preserve.
+ void EmitThisPropertyStore(Handle<String> name);
+
+ MacroAssembler* masm_;
+ Handle<Script> script_;
+ bool is_eval_;
+
+ FunctionLiteral* function_;
+ CompilationInfo* info_;
+
+ Label bailout_;
+
+ DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
+};
+
+
} } // namespace v8::internal
#endif // V8_FAST_CODEGEN_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 02088133..b57f2cb6 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -301,6 +301,7 @@ DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
+DEFINE_bool(print_ir, false, "print the AST as seen by the backend")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")
diff --git a/src/frames.cc b/src/frames.cc
index 2f90a316..e56a2c83 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -176,7 +176,7 @@ StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
StackTraceFrameIterator::StackTraceFrameIterator() {
- if (!done() && !frame()->function()->IsJSFunction()) Advance();
+ if (!done() && !IsValidFrame()) Advance();
}
@@ -184,10 +184,18 @@ void StackTraceFrameIterator::Advance() {
while (true) {
JavaScriptFrameIterator::Advance();
if (done()) return;
- if (frame()->function()->IsJSFunction()) return;
+ if (IsValidFrame()) return;
}
}
+bool StackTraceFrameIterator::IsValidFrame() {
+ if (!frame()->function()->IsJSFunction()) return false;
+ Object* script = JSFunction::cast(frame()->function())->shared()->script();
+ // Don't show functions from native scripts to user.
+ return (script->IsScript() &&
+ Script::TYPE_NATIVE != Script::cast(script)->type()->value());
+}
+
// -------------------------------------------------------------------------
@@ -402,7 +410,7 @@ Object*& ExitFrame::code_slot() const {
Code* ExitFrame::code() const {
Object* code = code_slot();
if (code->IsSmi()) {
- return Heap::c_entry_debug_break_code();
+ return Heap::debugger_statement_code();
} else {
return Code::cast(code);
}
diff --git a/src/frames.h b/src/frames.h
index 19860adb..8cbbc626 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -589,6 +589,9 @@ class StackTraceFrameIterator: public JavaScriptFrameIterator {
public:
StackTraceFrameIterator();
void Advance();
+
+ private:
+ bool IsValidFrame();
};
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 5bd294fe..01714cbb 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -54,18 +54,6 @@ namespace internal {
void FullCodeGenSyntaxChecker::Check(FunctionLiteral* fun) {
Scope* scope = fun->scope();
-
- if (scope->num_heap_slots() > 0) {
- // We support functions with a local context if they do not have
- // parameters that need to be copied into the context.
- for (int i = 0, len = scope->num_parameters(); i < len; i++) {
- Slot* slot = scope->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- BAILOUT("Function has context-allocated parameters.");
- }
- }
- }
-
VisitDeclarations(scope->declarations());
CHECK_BAILOUT;
@@ -387,17 +375,15 @@ void FullCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
void FullCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::ADD:
+ case Token::BIT_NOT:
case Token::NOT:
+ case Token::SUB:
case Token::TYPEOF:
case Token::VOID:
Visit(expr->expression());
break;
- case Token::BIT_NOT:
- BAILOUT("UnaryOperation: BIT_NOT");
case Token::DELETE:
BAILOUT("UnaryOperation: DELETE");
- case Token::SUB:
- BAILOUT("UnaryOperation: SUB");
default:
UNREACHABLE();
}
@@ -464,7 +450,7 @@ Handle<Code> FullCodeGenerator::MakeCode(FunctionLiteral* fun,
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
FullCodeGenerator cgen(&masm, script, is_eval);
- cgen.Generate(fun);
+ cgen.Generate(fun, PRIMARY);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
@@ -1000,7 +986,9 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
#ifdef ENABLE_DEBUGGER_SUPPORT
Comment cmnt(masm_, "[ DebuggerStatement");
SetStatementPosition(stmt);
- __ CallRuntime(Runtime::kDebugBreak, 0);
+
+ DebuggerStatementStub ces;
+ __ CallStub(&ces);
// Ignore the return value.
#endif
}
@@ -1161,7 +1149,6 @@ int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
return 0;
}
-
#undef __
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 3c8e198f..6688ff7c 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -63,11 +63,16 @@ class FullCodeGenSyntaxChecker: public AstVisitor {
class FullCodeGenerator: public AstVisitor {
public:
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
FullCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
: masm_(masm),
- function_(NULL),
script_(script),
is_eval_(is_eval),
+ function_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
location_(kStack),
@@ -79,7 +84,7 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script,
bool is_eval);
- void Generate(FunctionLiteral* fun);
+ void Generate(FunctionLiteral* fun, Mode mode);
private:
class Breakable;
@@ -422,9 +427,11 @@ class FullCodeGenerator: public AstVisitor {
void EmitLogicalOperation(BinaryOperation* expr);
MacroAssembler* masm_;
- FunctionLiteral* function_;
Handle<Script> script_;
bool is_eval_;
+
+ FunctionLiteral* function_;
+
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
diff --git a/src/globals.h b/src/globals.h
index f5cb1c09..39f6bcb2 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -204,6 +204,7 @@ class AccessorInfo;
class Allocation;
class Arguments;
class Assembler;
+class AssertNoAllocation;
class BreakableStatement;
class Code;
class CodeGenerator;
diff --git a/src/handles.cc b/src/handles.cc
index 3156670d..c66056eb 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -31,6 +31,7 @@
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "execution.h"
@@ -666,35 +667,52 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
}
-bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag,
- int loop_nesting) {
+bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag) {
+ return shared->is_compiled() || CompileLazyShared(shared, flag);
+}
+
+
+static bool CompileLazyHelper(CompilationInfo* info,
+ ClearExceptionFlag flag) {
// Compile the source information to a code object.
- ASSERT(!shared->is_compiled());
- bool result = Compiler::CompileLazy(shared, loop_nesting);
+ ASSERT(!info->shared_info()->is_compiled());
+ bool result = Compiler::CompileLazy(info);
ASSERT(result != Top::has_pending_exception());
if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();
return result;
}
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
- // Compile the source information to a code object.
+bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag) {
+ CompilationInfo info(shared, Handle<Object>::null(), 0);
+ return CompileLazyHelper(&info, flag);
+}
+
+
+bool CompileLazy(Handle<JSFunction> function,
+ Handle<Object> receiver,
+ ClearExceptionFlag flag) {
Handle<SharedFunctionInfo> shared(function->shared());
- bool result = CompileLazyShared(shared, flag, 0);
+ CompilationInfo info(shared, receiver, 0);
+ bool result = CompileLazyHelper(&info, flag);
LOG(FunctionCreateEvent(*function));
return result;
}
-bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) {
- // Compile the source information to a code object.
+bool CompileLazyInLoop(Handle<JSFunction> function,
+ Handle<Object> receiver,
+ ClearExceptionFlag flag) {
Handle<SharedFunctionInfo> shared(function->shared());
- bool result = CompileLazyShared(shared, flag, 1);
+ CompilationInfo info(shared, receiver, 1);
+ bool result = CompileLazyHelper(&info, flag);
LOG(FunctionCreateEvent(*function));
return result;
}
+
OptimizedObjectForAddingMultipleProperties::
OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
int expected_additional_properties,
diff --git a/src/handles.h b/src/handles.h
index fe820d59..04f087bd 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -313,12 +313,19 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
// false if the compilation resulted in a stack overflow.
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
+bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag);
+
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag,
- int loop_nesting);
+ ClearExceptionFlag flag);
+
+bool CompileLazy(Handle<JSFunction> function,
+ Handle<Object> receiver,
+ ClearExceptionFlag flag);
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
-bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
+bool CompileLazyInLoop(Handle<JSFunction> function,
+ Handle<Object> receiver,
+ ClearExceptionFlag flag);
// Returns the lazy compilation stub for argc arguments.
Handle<Code> ComputeLazyCompile(int argc);
diff --git a/src/heap-inl.h b/src/heap-inl.h
index bd4f86bb..f18bf0f6 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -152,7 +152,11 @@ Object* Heap::AllocateRawCell() {
bool Heap::InNewSpace(Object* object) {
- return new_space_.Contains(object);
+ bool result = new_space_.Contains(object);
+ ASSERT(!result || // Either not in new space
+ gc_state_ != NOT_IN_GC || // ... or in the middle of GC
+ InToSpace(object)); // ... or in to-space (where we allocate).
+ return result;
}
diff --git a/src/heap.cc b/src/heap.cc
index 6be1dafe..7263e230 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1185,7 +1185,10 @@ Object* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
+ reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
+ reinterpret_cast<Map*>(result)->set_bit_field(0);
+ reinterpret_cast<Map*>(result)->set_bit_field2(0);
return result;
}
@@ -1495,10 +1498,12 @@ void Heap::CreateRegExpCEntryStub() {
#endif
+#ifdef ENABLE_DEBUGGER_SUPPORT
void Heap::CreateCEntryDebugBreakStub() {
- CEntryDebugBreakStub stub;
- set_c_entry_debug_break_code(*stub.GetCode());
+ DebuggerStatementStub stub;
+ set_debugger_statement_code(*stub.GetCode());
}
+#endif
void Heap::CreateJSEntryStub() {
@@ -1523,12 +1528,14 @@ void Heap::CreateFixedStubs() {
// { CEntryStub stub;
// c_entry_code_ = *stub.GetCode();
// }
- // { CEntryDebugBreakStub stub;
- // c_entry_debug_break_code_ = *stub.GetCode();
+ // { DebuggerStatementStub stub;
+ // debugger_statement_code_ = *stub.GetCode();
// }
// To workaround the problem, make separate functions without inlining.
Heap::CreateCEntryStub();
+#ifdef ENABLE_DEBUGGER_SUPPORT
Heap::CreateCEntryDebugBreakStub();
+#endif
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
@@ -1726,7 +1733,7 @@ void Heap::SetNumberStringCache(Object* number, String* string) {
int mask = (number_string_cache()->length() >> 1) - 1;
if (number->IsSmi()) {
hash = smi_get_hash(Smi::cast(number)) & mask;
- number_string_cache()->set(hash * 2, number, SKIP_WRITE_BARRIER);
+ number_string_cache()->set(hash * 2, Smi::cast(number));
} else {
hash = double_get_hash(number->Number()) & mask;
number_string_cache()->set(hash * 2, number);
@@ -1983,8 +1990,10 @@ Object* Heap::AllocateConsString(String* first, String* second) {
Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
+
+ AssertNoAllocation no_gc;
ConsString* cons_string = ConsString::cast(result);
- WriteBarrierMode mode = cons_string->GetWriteBarrierMode();
+ WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
cons_string->set_length(length);
cons_string->set_hash_field(String::kEmptyHashField);
cons_string->set_first(first, mode);
@@ -2282,7 +2291,7 @@ Object* Heap::InitializeFunction(JSFunction* function,
function->set_shared(shared);
function->set_prototype_or_initial_map(prototype);
function->set_context(undefined_value());
- function->set_literals(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ function->set_literals(empty_fixed_array());
return function;
}
@@ -2401,8 +2410,10 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsSymbol());
FieldDescriptor field(name, i, NONE);
+ field.SetEnumerationIndex(i);
descriptors->Set(i, &field);
}
+ descriptors->SetNextEnumerationIndex(count);
descriptors->Sort();
map->set_instance_descriptors(descriptors);
map->set_pre_allocated_property_fields(count);
@@ -2883,8 +2894,10 @@ Object* Heap::CopyFixedArray(FixedArray* src) {
HeapObject::cast(obj)->set_map(src->map());
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
+
// Copy the content
- WriteBarrierMode mode = result->GetWriteBarrierMode();
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
return result;
}
@@ -2902,6 +2915,7 @@ Object* Heap::AllocateFixedArray(int length) {
Object* value = undefined_value();
// Initialize body.
for (int index = 0; index < length; index++) {
+ ASSERT(!Heap::InNewSpace(value)); // value = undefined
array->set(index, value, SKIP_WRITE_BARRIER);
}
}
@@ -2957,6 +2971,7 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
array->set_length(length);
Object* value = undefined_value();
for (int index = 0; index < length; index++) {
+ ASSERT(!Heap::InNewSpace(value)); // value = undefined
array->set(index, value, SKIP_WRITE_BARRIER);
}
return array;
@@ -2974,6 +2989,7 @@ Object* Heap::AllocateFixedArrayWithHoles(int length) {
// Initialize body.
Object* value = the_hole_value();
for (int index = 0; index < length; index++) {
+ ASSERT(!Heap::InNewSpace(value)); // value = the hole
array->set(index, value, SKIP_WRITE_BARRIER);
}
}
diff --git a/src/heap.h b/src/heap.h
index 0dd20c08..cbf0b73e 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -101,7 +101,7 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(Code, c_entry_code, CEntryCode) \
- V(Code, c_entry_debug_break_code, CEntryDebugBreakCode) \
+ V(Code, debugger_statement_code, DebuggerStatementCode) \
V(FixedArray, number_string_cache, NumberStringCache) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index da27fd09..9ce07343 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -229,8 +229,9 @@ enum ScaleFactor {
times_2 = 1,
times_4 = 2,
times_8 = 3,
- times_pointer_size = times_4,
- times_half_pointer_size = times_2
+ times_int_size = times_4,
+ times_half_pointer_size = times_2,
+ times_pointer_size = times_4
};
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 121e1552..fe91903e 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -103,13 +103,13 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
// CodeGenerator implementation
-CodeGenerator::CodeGenerator(int buffer_size,
+CodeGenerator::CodeGenerator(MacroAssembler* masm,
Handle<Script> script,
bool is_eval)
: is_eval_(is_eval),
script_(script),
deferred_(8),
- masm_(new MacroAssembler(NULL, buffer_size)),
+ masm_(masm),
scope_(NULL),
frame_(NULL),
allocator_(NULL),
@@ -126,7 +126,9 @@ CodeGenerator::CodeGenerator(int buffer_size,
// edi: called JS function
// esi: callee's context
-void CodeGenerator::GenCode(FunctionLiteral* fun) {
+void CodeGenerator::Generate(FunctionLiteral* fun,
+ Mode mode,
+ CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(fun);
@@ -143,7 +145,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
set_in_spilled_code(false);
// Adjust for function-level loop nesting.
- loop_nesting_ += fun->loop_nesting();
+ loop_nesting_ += info->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
@@ -167,96 +169,106 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// edi: called JS function
// esi: callee's context
allocator_->Initialize();
- frame_->Enter();
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- // Allocate the local context if needed.
- int heap_slots = scope_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ if (mode == PRIMARY) {
+ frame_->Enter();
+
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+
+ // Allocate the local context if needed.
+ int heap_slots = scope_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ allocate local context");
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ frame_->PushFunction();
+ Result context;
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ context = frame_->CallStub(&stub, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
- // Update context local.
- frame_->SaveContextRegister();
+ // Update context local.
+ frame_->SaveContextRegister();
- // Verify that the runtime call result and esi agree.
- if (FLAG_debug_code) {
- __ cmp(context.reg(), Operand(esi));
- __ Assert(equal, "Runtime::NewContext should end up in esi");
+ // Verify that the runtime call result and esi agree.
+ if (FLAG_debug_code) {
+ __ cmp(context.reg(), Operand(esi));
+ __ Assert(equal, "Runtime::NewContext should end up in esi");
+ }
}
- }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope_->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ mov(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // The use of SlotOperand below is safe in unspilled code
+ // because the slot is guaranteed to be a context slot.
+ //
+ // There are no parameters in the global scope.
+ ASSERT(!scope_->is_global_scope());
+ frame_->PushParameterAt(i);
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ // SlotOperand loads context.reg() with the context object
+ // stored to, used below in RecordWrite.
+ Result context = allocator_->Allocate();
+ ASSERT(context.is_valid());
+ __ mov(SlotOperand(slot, context.reg()), value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ frame_->Spill(context.reg());
+ frame_->Spill(value.reg());
+ __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+ }
}
}
- }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
- // Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+ } else {
+ // When used as the secondary compiler for splitting, ebp, esi,
+ // and edi have been pushed on the stack. Adjust the virtual
+ // frame to match this state.
+ frame_->Adjust(3);
+ allocator_->Unuse(edi);
}
+ // Initialize the function return target after the locals are set
+ // up, because it needs the expected frame height from the frame.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -321,7 +333,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
}
// Adjust for function-level loop nesting.
- loop_nesting_ -= fun->loop_nesting();
+ loop_nesting_ -= info->loop_nesting();
// Code generation state must be reset.
ASSERT(state_ == NULL);
@@ -3901,7 +3913,9 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Spill everything, even constants, to the frame.
frame_->SpillAll();
- frame_->CallRuntime(Runtime::kDebugBreak, 0);
+
+ DebuggerStatementStub ces;
+ frame_->CallStub(&ces, 0);
// Ignore the return value.
#endif
}
@@ -4470,8 +4484,6 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(property->value());
frame_->Push(key);
Result ignored = frame_->CallStoreIC();
- // Drop the duplicated receiver and ignore the result.
- frame_->Drop();
break;
}
// Fall through
@@ -5134,7 +5146,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
// flat string in a cons string). If that is not the case we would rather go
// to the runtime system now, to flatten the string.
__ mov(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
- __ cmp(Operand(temp.reg()), Immediate(Handle<String>(Heap::empty_string())));
+ __ cmp(Operand(temp.reg()), Factory::empty_string());
__ j(not_equal, &slow_case);
// Get the first of the two strings.
__ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
@@ -6704,6 +6716,7 @@ void Reference::SetValue(InitState init_state) {
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->StoreToSlot(slot, init_state);
+ cgen_->UnloadReference(this);
break;
}
@@ -6712,6 +6725,7 @@ void Reference::SetValue(InitState init_state) {
cgen_->frame()->Push(GetName());
Result answer = cgen_->frame()->CallStoreIC();
cgen_->frame()->Push(&answer);
+ set_unloaded();
break;
}
@@ -6814,13 +6828,13 @@ void Reference::SetValue(InitState init_state) {
__ nop();
cgen_->frame()->Push(&answer);
}
+ cgen_->UnloadReference(this);
break;
}
default:
UNREACHABLE();
}
- cgen_->UnloadReference(this);
}
@@ -8397,8 +8411,12 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if regexp entry in generated code is turned
- // off.
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifndef V8_NATIVE_REGEXP
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+#else // V8_NATIVE_REGEXP
if (!FLAG_regexp_entry_native) {
__ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
return;
@@ -8436,12 +8454,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
-#ifdef DEBUG
- __ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
-#endif
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
// ecx: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
@@ -8476,13 +8494,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the third argument is a positive smi.
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (usigned comparison).
__ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ test(eax, Immediate(kSmiTagMask | 0x80000000));
- __ j(not_zero, &runtime);
- // Check that it is not greater than the subject string length.
__ SmiUntag(eax);
__ cmp(eax, Operand(ebx));
- __ j(greater, &runtime);
+ __ j(above, &runtime);
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
@@ -8524,17 +8541,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
- // a sequential string.
+ // a sequential string or an external string.
__ mov(edx, ebx);
__ and_(edx, kStringRepresentationMask);
__ cmp(edx, kConsStringTag);
__ j(not_equal, &runtime);
__ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
- __ cmp(Operand(edx), Immediate(Handle<String>(Heap::empty_string())));
+ __ cmp(Operand(edx), Factory::empty_string());
__ j(not_equal, &runtime);
__ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ ASSERT_EQ(0, kSeqStringTag);
+ __ test(ebx, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &runtime);
__ and_(ebx, kStringRepresentationEncodingMask);
__ bind(&seq_string);
@@ -8545,10 +8565,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// it has, the field contains a code object otherwise it contains the hole.
__ cmp(ebx, kStringTag | kSeqStringTag | kTwoByteStringTag);
__ j(equal, &seq_two_byte_string);
-#ifdef DEBUG
- __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ Check(equal, "Expected sequential ascii string");
-#endif
+ if (FLAG_debug_code) {
+ __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ Check(equal, "Expected sequential ascii string");
+ }
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
__ Set(edi, Immediate(1)); // Type is ascii.
__ jmp(&check_code);
@@ -8560,23 +8580,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Set(edi, Immediate(0)); // Type is two byte.
__ bind(&check_code);
- // Check that the irregexp code has been generated for If it has, the field
- // contains a code object otherwise it contains the hole.
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
__ CmpObjectType(edx, CODE_TYPE, ebx);
__ j(not_equal, &runtime);
// eax: subject string
// edx: code
- // edi: encoding of subject string (1 if ascii 0 if two_byte);
+ // edi: encoding of subject string (1 if ascii, 0 if two_byte);
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ mov(ecx, Operand(esp, kJSRegExpOffset));
__ SmiUntag(ebx); // Previous index from smi.
// eax: subject string
// ebx: previous index
// edx: code
+ // edi: encoding of subject string (1 if ascii 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
@@ -8604,7 +8625,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ jmp(&push_rest);
__ bind(&push_two_byte);
- ASSERT(kShortSize == 2);
__ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
__ push(ecx); // Argument 4.
__ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
@@ -8637,6 +8657,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592) Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception(Top::k_pending_exception_address);
__ mov(eax,
Operand::StaticVariable(ExternalReference::the_hole_value_location()));
@@ -8653,6 +8674,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
__ add(Operand(edx), Immediate(2)); // edx was a smi.
// edx: Number of capture registers
@@ -8692,7 +8715,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ sub(Operand(edx), Immediate(1));
__ j(negative, &done);
// Read the value from the static offsets vector buffer.
- __ mov(edi, Operand(ecx, edx, times_pointer_size, 0));
+ __ mov(edi, Operand(ecx, edx, times_int_size, 0));
// Perform explicit shift
ASSERT_EQ(0, kSmiTag);
__ shl(edi, kSmiTagSize);
@@ -8718,6 +8741,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+#endif // V8_NATIVE_REGEXP
}
@@ -9061,13 +9085,6 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
-int CEntryStub::MinorKey() {
- ASSERT(result_size_ <= 2);
- // Result returned in eax, or eax+edx if result_size_ is 2.
- return 0;
-}
-
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// eax holds the exception.
@@ -9177,7 +9194,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
@@ -9187,6 +9203,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// edi: number of arguments including receiver (C callee-saved)
// esi: pointer to the first argument (C callee-saved)
+ // Result returned in eax, or eax+edx if result_size_ is 2.
+
if (do_gc) {
__ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
__ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
@@ -9227,7 +9245,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned, not_taken);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode);
+ __ LeaveExitFrame(mode_);
__ ret(0);
// Handling of failure.
@@ -9314,7 +9332,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
}
-void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+void CEntryStub::Generate(MacroAssembler* masm) {
// eax: number of arguments including receiver
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
@@ -9326,12 +9344,8 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// of a proper result. The builtin entry handles this by performing
// a garbage collection and retrying the builtin (twice).
- ExitFrame::Mode mode = is_debug_break
- ? ExitFrame::MODE_DEBUG
- : ExitFrame::MODE_NORMAL;
-
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode);
+ __ EnterExitFrame(mode_);
// eax: result parameter for PerformGC, if any (setup below)
// ebx: pointer to builtin function (C callee-saved)
@@ -9349,7 +9363,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
false,
false);
@@ -9358,7 +9371,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
false);
@@ -9369,7 +9381,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
true);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index a81a7d1d..956f4243 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -32,6 +32,7 @@ namespace v8 {
namespace internal {
// Forward declarations
+class CompilationInfo;
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
@@ -293,11 +294,21 @@ enum ArgumentsAllocationMode {
class CodeGenerator: public AstVisitor {
public:
+ // Compilation mode. Either the compiler is used as the primary
+ // compiler and needs to setup everything or the compiler is used as
+ // the secondary compiler for split compilation and has to handle
+ // bailouts.
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
- bool is_eval);
+ bool is_eval,
+ CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(FunctionLiteral* fun);
@@ -341,8 +352,7 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction
- CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
- virtual ~CodeGenerator() { delete masm_; }
+ CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
// Accessors
Scope* scope() const { return scope_; }
@@ -378,7 +388,7 @@ class CodeGenerator: public AstVisitor {
void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void GenCode(FunctionLiteral* fun);
+ void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
@@ -632,6 +642,7 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget;
friend class Reference;
friend class Result;
+ friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 5ebe1e07..1f34b302 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -94,7 +94,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(eax, Immediate(0)); // no arguments
__ mov(ebx, Immediate(ExternalReference::debug_break()));
- CEntryDebugBreakStub ceb;
+ CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
@@ -132,12 +132,13 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // REgister state for IC store call (from ic-ia32.cc).
+ // Register state for IC store call (from ic-ia32.cc).
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
}
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
new file mode 100644
index 00000000..2a15733a
--- /dev/null
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -0,0 +1,141 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+ // Offset 2 is due to return address and saved frame pointer.
+ int index = 2 + function()->scope()->num_parameters();
+ __ mov(reg, Operand(ebp, index * kPointerSize));
+}
+
+
+void FastCodeGenerator::EmitReceiverMapCheck() {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(this)\n");
+ }
+
+ EmitLoadReceiver(edx);
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, bailout());
+
+ ASSERT(has_receiver() && receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+ Handle<Map> map(object->map());
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Immediate(map));
+ __ j(not_equal, bailout());
+}
+
+
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+ // Compile global variable accesses as load IC calls. The only live
+ // registers are esi (context) and possibly edx (this). Both are also
+ // saved in the stack and esi is preserved by the call.
+ __ push(CodeGenerator::GlobalObject());
+ __ mov(ecx, name);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ if (has_this_properties()) {
+ // Restore this.
+ EmitLoadReceiver(edx);
+ } else {
+ __ nop(); // Not test eax, indicates IC has no inlined code at call site.
+ }
+}
+
+
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ receiver()->Lookup(*name, &lookup);
+
+ ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
+
+ // Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ mov(ecx, edx); // Copy receiver for write barrier.
+ } else {
+ offset += FixedArray::kHeaderSize;
+ __ mov(ecx, FieldOperand(edx, JSObject::kPropertiesOffset));
+ }
+ // Perform the store.
+ __ mov(FieldOperand(ecx, offset), eax);
+ // Preserve value from write barrier in case it's needed.
+ __ mov(ebx, eax);
+ __ RecordWrite(ecx, offset, ebx, edi);
+}
+
+
+void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
+ ASSERT(function_ == NULL);
+ ASSERT(info_ == NULL);
+ function_ = fun;
+ info_ = info;
+
+ // Save the caller's frame pointer and set up our own.
+ Comment prologue_cmnt(masm(), ";; Prologue");
+ __ push(ebp);
+ __ mov(ebp, esp);
+ __ push(esi); // Context.
+ __ push(edi); // Closure.
+ // Note that we keep a live register reference to esi (context) at this
+ // point.
+
+ // Receiver (this) is allocated to edx if there are this properties.
+ if (has_this_properties()) EmitReceiverMapCheck();
+
+ VisitStatements(fun->body());
+
+ Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ __ mov(eax, Factory::undefined_value());
+
+ Comment epilogue_cmnt(masm(), ";; Epilogue");
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+
+ __ bind(&bailout_);
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 03fe54da..9f9ac56c 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -51,80 +51,88 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-ia32.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
function_ = fun;
SetFunctionPosition(fun);
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = fun->scope()->num_stack_slots();
- if (locals_count == 1) {
- __ push(Immediate(Factory::undefined_value()));
- } else if (locals_count > 1) {
- __ mov(eax, Immediate(Factory::undefined_value()));
- for (int i = 0; i < locals_count; i++) {
- __ push(eax);
+ if (mode == PRIMARY) {
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = fun->scope()->num_stack_slots();
+ if (locals_count == 1) {
+ __ push(Immediate(Factory::undefined_value()));
+ } else if (locals_count > 1) {
+ __ mov(eax, Immediate(Factory::undefined_value()));
+ for (int i = 0; i < locals_count; i++) {
+ __ push(eax);
+ }
}
}
- }
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is still in edi.
- __ push(edi);
- __ CallRuntime(Runtime::kNewContext, 1);
- function_in_register = false;
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-
- // Copy parameters into context if necessary.
- int num_parameters = fun->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context
- __ mov(Operand(esi, Context::SlotOffset(slot->index())), eax);
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in edi.
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both eax and esi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in esi.
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ mov(eax, Operand(ebp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ mov(Operand(esi, context_offset), eax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering esi.
+ __ mov(ecx, esi);
+ __ RecordWrite(ecx, context_offset, eax, ebx);
+ }
}
}
- }
- Variable* arguments = fun->scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(edi);
- } else {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(edi);
+ } else {
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ push(edx);
+ __ push(Immediate(Smi::FromInt(fun->num_parameters())));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ __ mov(ecx, eax); // Duplicate result.
+ Move(arguments->slot(), eax, ebx, edx);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, ecx, ebx, edx);
}
- // Receiver is just before the parameters on the caller's stack.
- __ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
- __ push(edx);
- __ push(Immediate(Smi::FromInt(fun->num_parameters())));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- __ mov(ecx, eax); // Duplicate result.
- Move(arguments->slot(), eax, ebx, edx);
- Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, ecx, ebx, edx);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -695,7 +703,8 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
__ mov(CodeGenerator::ContextOperand(esi, slot->index()),
result_register());
int offset = Context::SlotOffset(slot->index());
- __ RecordWrite(esi, offset, result_register(), ecx);
+ __ mov(ebx, esi);
+ __ RecordWrite(ebx, offset, result_register(), ecx);
}
break;
@@ -917,10 +926,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->handle()->IsSymbol()) {
VisitForValue(value, kAccumulator);
__ mov(ecx, Immediate(key->handle()));
+ __ mov(edx, Operand(esp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
- // StoreIC leaves the receiver on the stack.
break;
}
// Fall through.
@@ -1046,12 +1055,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// assignment. Right-hand-side value is passed in eax, variable name in
// ecx, and the global object on the stack.
__ mov(ecx, var->name());
- __ push(CodeGenerator::GlobalObject());
+ __ mov(edx, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
- // Overwrite the receiver on the stack with the result if needed.
- DropAndApply(1, context, eax);
+ Apply(context, eax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
__ push(result_register()); // Value.
@@ -1111,6 +1119,11 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(ecx, prop->key()->AsLiteral()->handle());
+ if (expr->ends_initialization_block()) {
+ __ mov(edx, Operand(esp, 0));
+ } else {
+ __ pop(edx);
+ }
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
@@ -1121,9 +1134,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ push(Operand(esp, kPointerSize)); // Receiver is under value.
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
+ DropAndApply(1, context_, eax);
+ } else {
+ Apply(context_, eax);
}
-
- DropAndApply(1, context_, eax);
}
@@ -1476,6 +1490,45 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
+ case Token::SUB: {
+ Comment cmt(masm_, "[ UnaryOperation (SUB)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register eax.
+ VisitForValue(expr->expression(), kAccumulator);
+ __ CallStub(&stub);
+ Apply(context_, eax);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register eax.
+ VisitForValue(expr->expression(), kAccumulator);
+ // Avoid calling the stub for Smis.
+ Label smi, done;
+ __ test(result_register(), Immediate(kSmiTagMask));
+ __ j(zero, &smi);
+ // Non-smi: call stub leaving result in accumulator register.
+ __ CallStub(&stub);
+ __ jmp(&done);
+ // Perform operation directly on Smis.
+ __ bind(&smi);
+ __ not_(result_register());
+ __ and_(result_register(), ~kSmiTagMask); // Remove inverted smi-tag.
+ __ bind(&done);
+ Apply(context_, result_register());
+ break;
+ }
+
default:
UNREACHABLE();
}
@@ -1603,18 +1656,18 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->handle());
+ __ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// This nop signals to the IC that there is no inlined code at the call
// site for it to patch.
__ nop();
if (expr->is_postfix()) {
- __ Drop(1); // Result is on the stack under the receiver.
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
}
} else {
- DropAndApply(1, context_, eax);
+ Apply(context_, eax);
}
break;
}
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index ebc2cfa9..44dae3b4 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -180,7 +180,6 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
-
Label miss;
__ mov(eax, Operand(esp, kPointerSize));
@@ -197,7 +196,6 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
-
Label miss;
__ mov(eax, Operand(esp, kPointerSize));
@@ -214,7 +212,6 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
-
Label miss;
__ mov(eax, Operand(esp, kPointerSize));
@@ -1039,7 +1036,6 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
-
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address.
@@ -1178,7 +1174,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
-
Label miss, probe, global;
__ mov(eax, Operand(esp, kPointerSize));
@@ -1384,19 +1379,17 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
- // Get the receiver from the stack and probe the stub cache.
- __ mov(edx, Operand(esp, 4));
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+ GenerateMiss(masm);
}
@@ -1404,12 +1397,12 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : transition map
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
__ pop(ebx);
- __ push(Operand(esp, 0)); // receiver
+ __ push(edx); // receiver
__ push(ecx); // transition map
__ push(eax); // value
__ push(ebx); // return address
@@ -1420,23 +1413,22 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
}
-void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
- // Move the return address below the arguments.
__ pop(ebx);
- __ push(Operand(esp, 0));
+ __ push(edx);
__ push(ecx);
__ push(eax);
__ push(ebx);
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
@@ -1451,7 +1443,6 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
// -- esp[8] : receiver
// -----------------------------------
- // Move the return address below the arguments.
__ pop(ecx);
__ push(Operand(esp, 1 * kPointerSize));
__ push(Operand(esp, 1 * kPointerSize));
@@ -1472,7 +1463,6 @@ void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// -- esp[8] : receiver
// -----------------------------------
- // Move the return address below the arguments.
__ pop(ebx);
__ push(Operand(esp, 1 * kPointerSize));
__ push(ecx);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index a16c1033..19a380b0 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -147,6 +147,11 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object, int offset,
Register value, Register scratch) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are esi.
+ ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi));
+
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits.
@@ -210,6 +215,14 @@ void MacroAssembler::RecordWrite(Register object, int offset,
}
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(scratch, Immediate(bit_cast<int32_t>(kZapValue)));
+ }
}
@@ -1098,10 +1111,14 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
return;
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
- CallStub(&stub);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Set(eax, Immediate(num_arguments));
+ mov(ebx, Immediate(ExternalReference(f)));
+ CEntryStub ces(1);
+ CallStub(&ces);
}
@@ -1114,10 +1131,14 @@ Object* MacroAssembler::TryCallRuntime(Runtime::Function* f,
return Heap::undefined_value();
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
- return TryCallStub(&stub);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Set(eax, Immediate(num_arguments));
+ mov(ebx, Immediate(ExternalReference(f)));
+ CEntryStub ces(1);
+ return TryCallStub(&ces);
}
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 3f000ee4..cc245602 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -331,7 +331,7 @@ class MacroAssembler: public Assembler {
// Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
- // Call a runtime function, returning the RuntimeStub object called.
+ // Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
Object* TryCallRuntime(Runtime::Function* f, int num_arguments);
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 5961294a..7acf81c9 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -161,6 +161,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(holder);
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!Heap::InNewSpace(interceptor));
__ mov(receiver, Immediate(Handle<Object>(interceptor)));
__ push(receiver);
__ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
@@ -347,19 +348,6 @@ static void CompileLoadInterceptor(Compiler* compiler,
}
-static void LookupPostInterceptor(JSObject* holder,
- String* name,
- LookupResult* lookup) {
- holder->LocalLookupRealNamedProperty(name, lookup);
- if (lookup->IsNotFound()) {
- Object* proto = holder->GetPrototype();
- if (proto != Heap::null_value()) {
- proto->Lookup(name, lookup);
- }
- }
-}
-
-
class LoadInterceptorCompiler BASE_EMBEDDED {
public:
explicit LoadInterceptorCompiler(Register name) : name_(name) {}
@@ -559,7 +547,6 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments_,
@@ -1255,21 +1242,18 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
-
// Generate store field code. Trashes the name register.
GenerateStoreField(masm(),
Builtins::StoreIC_ExtendStorage,
object,
index,
transition,
- ebx, ecx, edx,
+ edx, ecx, ebx,
&miss);
// Handle store cache miss.
@@ -1289,26 +1273,23 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
-
// Check that the object isn't a smi.
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
// Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss, not_taken);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(ebx, edx, &miss);
+ __ CheckAccessGlobalProxy(edx, ebx, &miss);
}
// Stub never generated for non-global objects that require access
@@ -1316,7 +1297,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ pop(ebx); // remove the return address
- __ push(Operand(esp, 0)); // receiver
+ __ push(edx); // receiver
__ push(Immediate(Handle<AccessorInfo>(callback))); // callback info
__ push(ecx); // name
__ push(eax); // value
@@ -1329,7 +1310,6 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ mov(ecx, Immediate(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -1343,26 +1323,23 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
-
// Check that the object isn't a smi.
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
// Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(receiver->map())));
__ j(not_equal, &miss, not_taken);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(ebx, edx, &miss);
+ __ CheckAccessGlobalProxy(edx, ebx, &miss);
}
// Stub never generated for non-global objects that require access
@@ -1370,7 +1347,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
__ pop(ebx); // remove the return address
- __ push(Operand(esp, 0)); // receiver
+ __ push(edx); // receiver
__ push(ecx); // name
__ push(eax); // value
__ push(ebx); // restore return address
@@ -1382,7 +1359,6 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Handle store cache miss.
__ bind(&miss);
- __ mov(ecx, Immediate(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -1397,14 +1373,13 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
+ // -- edx : receiver
// -- esp[0] : return address
- // -- esp[4] : receiver
// -----------------------------------
Label miss;
// Check that the map of the global has not changed.
- __ mov(ebx, Operand(esp, kPointerSize));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss, not_taken);
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 104d1875..9267507c 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -899,31 +899,45 @@ Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
Result VirtualFrame::CallStoreIC() {
// Name, value, and receiver are on top of the frame. The IC
- // expects name in ecx, value in eax, and receiver on the stack. It
- // does not drop the receiver.
+ // expects name in ecx, value in eax, and receiver in edx.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Result name = Pop();
Result value = Pop();
- PrepareForCall(1, 0); // One stack arg, not callee-dropped.
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
- if (value.is_register() && value.reg().is(ecx)) {
- if (name.is_register() && name.reg().is(eax)) {
+ // Optimized for case in which name is a constant value.
+ if (name.is_register() && (name.reg().is(edx) || name.reg().is(eax))) {
+ if (!is_used(ecx)) {
+ name.ToRegister(ecx);
+ } else if (!is_used(ebx)) {
+ name.ToRegister(ebx);
+ } else {
+ ASSERT(!is_used(edi)); // Only three results are live, so edi is free.
+ name.ToRegister(edi);
+ }
+ }
+ // Now name is not in edx or eax, so we can fix them, then move name to ecx.
+ if (value.is_register() && value.reg().is(edx)) {
+ if (receiver.is_register() && receiver.reg().is(eax)) {
// Wrong registers.
- __ xchg(eax, ecx);
+ __ xchg(eax, edx);
} else {
- // Register eax is free for value, which frees ecx for name.
+ // Register eax is free for value, which frees edx for receiver.
value.ToRegister(eax);
- name.ToRegister(ecx);
+ receiver.ToRegister(edx);
}
} else {
- // Register ecx is free for name, which guarantees eax is free for
+ // Register edx is free for receiver, which guarantees eax is free for
// value.
- name.ToRegister(ecx);
+ receiver.ToRegister(edx);
value.ToRegister(eax);
}
-
+ // Receiver and value are in the right place, so ecx is free for name.
+ name.ToRegister(ecx);
name.Unuse();
value.Unuse();
+ receiver.Unuse();
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
diff --git a/src/ic.cc b/src/ic.cc
index d823c910..8fc9ddb8 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1302,9 +1302,9 @@ Object* CallIC_Miss(Arguments args) {
Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
InLoopFlag in_loop = ic.target()->ic_in_loop();
if (in_loop == IN_LOOP) {
- CompileLazyInLoop(function, CLEAR_EXCEPTION);
+ CompileLazyInLoop(function, args.at<Object>(0), CLEAR_EXCEPTION);
} else {
- CompileLazy(function, CLEAR_EXCEPTION);
+ CompileLazy(function, args.at<Object>(0), CLEAR_EXCEPTION);
}
return *function;
}
@@ -1395,16 +1395,6 @@ Object* SharedStoreIC_ExtendStorage(Arguments args) {
}
-void StoreIC::GenerateInitialize(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
-}
-
-
// Used from ic_<arch>.cc.
Object* KeyedStoreIC_Miss(Arguments args) {
NoHandleAllocation na;
diff --git a/src/ic.h b/src/ic.h
index 8f0eb376..a991e30a 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -348,14 +348,12 @@ class StoreIC: public IC {
Handle<Object> value);
// Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm);
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateExtendStorage(MacroAssembler* masm);
private:
- static void Generate(MacroAssembler* masm, const ExternalReference& f);
-
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
diff --git a/src/json-delay.js b/src/json-delay.js
index 1a6f0085..7788f516 100644
--- a/src/json-delay.js
+++ b/src/json-delay.js
@@ -29,7 +29,7 @@ var $JSON = global.JSON;
function ParseJSONUnfiltered(text) {
var s = $String(text);
- var f = %CompileString("(" + text + ")", true);
+ var f = %CompileString(text, true);
return f();
}
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index 0269f1f6..1487ce57 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -2089,8 +2089,10 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
content.evalFromScript =
this.serializeReference(mirror.evalFromScript());
var evalFromLocation = mirror.evalFromLocation()
- content.evalFromLocation = { line: evalFromLocation.line,
- column: evalFromLocation.column}
+ if (evalFromLocation) {
+ content.evalFromLocation = { line: evalFromLocation.line,
+ column: evalFromLocation.column };
+ }
if (mirror.evalFromFunctionName()) {
content.evalFromFunctionName = mirror.evalFromFunctionName();
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 6d48b5b0..4355fe9e 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1349,7 +1349,7 @@ void FixedArray::set(int index, Object* value) {
}
-WriteBarrierMode HeapObject::GetWriteBarrierMode() {
+WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
if (Heap::InNewSpace(this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
}
@@ -1367,6 +1367,7 @@ void FixedArray::set(int index,
void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
ASSERT(index >= 0 && index < array->length());
+ ASSERT(!Heap::InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
@@ -1547,9 +1548,7 @@ uint32_t NumberDictionary::max_number_key() {
}
void NumberDictionary::set_requires_slow_elements() {
- set(kMaxNumberKeyIndex,
- Smi::FromInt(kRequiresSlowElementsMask),
- SKIP_WRITE_BARRIER);
+ set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
}
@@ -2972,7 +2971,8 @@ void Dictionary<Shape, Key>::SetEntry(int entry,
PropertyDetails details) {
ASSERT(!key->IsString() || details.IsDeleted() || details.index() > 0);
int index = HashTable<Shape, Key>::EntryToIndex(entry);
- WriteBarrierMode mode = FixedArray::GetWriteBarrierMode();
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
FixedArray::set(index, key, mode);
FixedArray::set(index+1, value, mode);
FixedArray::fast_set(this, index+2, details.AsSmi());
@@ -3006,8 +3006,13 @@ void JSArray::EnsureSize(int required_size) {
}
+void JSArray::set_length(Smi* length) {
+ set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
+}
+
+
void JSArray::SetContent(FixedArray* storage) {
- set_length(Smi::FromInt(storage->length()), SKIP_WRITE_BARRIER);
+ set_length(Smi::FromInt(storage->length()));
set_elements(storage);
}
diff --git a/src/objects.cc b/src/objects.cc
index c76fc833..6dd1d492 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -3200,8 +3200,9 @@ Object* FixedArray::UnionOfKeys(FixedArray* other) {
Object* obj = Heap::AllocateFixedArray(len0 + extra);
if (obj->IsFailure()) return obj;
// Fill in the content
+ AssertNoAllocation no_gc;
FixedArray* result = FixedArray::cast(obj);
- WriteBarrierMode mode = result->GetWriteBarrierMode();
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len0; i++) {
result->set(i, get(i), mode);
}
@@ -3225,10 +3226,11 @@ Object* FixedArray::CopySize(int new_length) {
if (obj->IsFailure()) return obj;
FixedArray* result = FixedArray::cast(obj);
// Copy the content
+ AssertNoAllocation no_gc;
int len = length();
if (new_length < len) len = new_length;
result->set_map(map());
- WriteBarrierMode mode = result->GetWriteBarrierMode();
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) {
result->set(i, get(i), mode);
}
@@ -3237,7 +3239,8 @@ Object* FixedArray::CopySize(int new_length) {
void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
- WriteBarrierMode mode = dest->GetWriteBarrierMode();
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
for (int index = 0; index < len; index++) {
dest->set(dest_pos+index, get(pos+index), mode);
}
@@ -3271,8 +3274,7 @@ Object* DescriptorArray::Allocate(int number_of_descriptors) {
if (array->IsFailure()) return array;
result->set(kContentArrayIndex, array);
result->set(kEnumerationIndexIndex,
- Smi::FromInt(PropertyDetails::kInitialIndex),
- SKIP_WRITE_BARRIER);
+ Smi::FromInt(PropertyDetails::kInitialIndex));
return result;
}
@@ -4700,8 +4702,8 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
ASSERT(target->IsHeapObject());
if (!target->IsMarked()) {
ASSERT(target->IsMap());
- contents->set(i + 1, NullDescriptorDetails, SKIP_WRITE_BARRIER);
- contents->set(i, Heap::null_value(), SKIP_WRITE_BARRIER);
+ contents->set(i + 1, NullDescriptorDetails);
+ contents->set_null(i);
ASSERT(target->prototype() == this ||
target->prototype() == real_prototype);
// Getter prototype() is read-only, set_prototype() has side effects.
@@ -5161,7 +5163,8 @@ void JSObject::SetFastElements(FixedArray* elems) {
uint32_t len = static_cast<uint32_t>(elems->length());
for (uint32_t i = 0; i < len; i++) ASSERT(elems->get(i)->IsTheHole());
#endif
- WriteBarrierMode mode = elems->GetWriteBarrierMode();
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elems->GetWriteBarrierMode(no_gc);
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
FixedArray* old_elements = FixedArray::cast(elements());
@@ -5228,7 +5231,7 @@ Object* JSObject::SetSlowElements(Object* len) {
Object* JSArray::Initialize(int capacity) {
ASSERT(capacity >= 0);
- set_length(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ set_length(Smi::FromInt(0));
FixedArray* new_elements;
if (capacity == 0) {
new_elements = Heap::empty_fixed_array();
@@ -5288,7 +5291,7 @@ Object* JSObject::SetElementsLength(Object* len) {
for (int i = value; i < old_length; i++) {
FixedArray::cast(elements())->set_the_hole(i);
}
- JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER);
+ JSArray::cast(this)->set_length(Smi::cast(smi_length));
}
return this;
}
@@ -5298,8 +5301,9 @@ Object* JSObject::SetElementsLength(Object* len) {
!ShouldConvertToSlowElements(new_capacity)) {
Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
if (obj->IsFailure()) return obj;
- if (IsJSArray()) JSArray::cast(this)->set_length(smi_length,
- SKIP_WRITE_BARRIER);
+ if (IsJSArray()) {
+ JSArray::cast(this)->set_length(Smi::cast(smi_length));
+ }
SetFastElements(FixedArray::cast(obj));
return this;
}
@@ -5318,7 +5322,7 @@ Object* JSObject::SetElementsLength(Object* len) {
static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
element_dictionary()->RemoveNumberEntries(value, old_length);
}
- JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER);
+ JSArray::cast(this)->set_length(Smi::cast(smi_length));
}
return this;
}
@@ -5343,8 +5347,7 @@ Object* JSObject::SetElementsLength(Object* len) {
Object* obj = Heap::AllocateFixedArray(1);
if (obj->IsFailure()) return obj;
FixedArray::cast(obj)->set(0, len);
- if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1),
- SKIP_WRITE_BARRIER);
+ if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
set_elements(FixedArray::cast(obj));
return this;
}
@@ -5614,8 +5617,7 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
&array_length));
if (index >= array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(index + 1),
- SKIP_WRITE_BARRIER);
+ JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
}
}
return value;
@@ -5631,8 +5633,9 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
if (obj->IsFailure()) return obj;
SetFastElements(FixedArray::cast(obj));
- if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(index + 1),
- SKIP_WRITE_BARRIER);
+ if (IsJSArray()) {
+ JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+ }
FixedArray::cast(elements())->set(index, value);
return value;
}
@@ -6129,7 +6132,8 @@ template<typename Shape, typename Key>
void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
int pos = 0;
int capacity = HashTable<Shape, Key>::Capacity();
- WriteBarrierMode mode = elements->GetWriteBarrierMode();
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
Object* k = Dictionary<Shape, Key>::KeyAt(i);
if (Dictionary<Shape, Key>::IsKey(k)) {
@@ -6500,7 +6504,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
for (int i = 0; i < length; i++) {
if (!FixedArray::cast(elements())->get(i)->IsTheHole()) {
if (storage != NULL) {
- storage->set(counter, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ storage->set(counter, Smi::FromInt(i));
}
counter++;
}
@@ -6512,7 +6516,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
int length = PixelArray::cast(elements())->length();
while (counter < length) {
if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter), SKIP_WRITE_BARRIER);
+ storage->set(counter, Smi::FromInt(counter));
}
counter++;
}
@@ -6529,7 +6533,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
int length = ExternalArray::cast(elements())->length();
while (counter < length) {
if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter), SKIP_WRITE_BARRIER);
+ storage->set(counter, Smi::FromInt(counter));
}
counter++;
}
@@ -6554,7 +6558,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
String* str = String::cast(val);
if (storage) {
for (int i = 0; i < str->length(); i++) {
- storage->set(counter + i, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ storage->set(counter + i, Smi::FromInt(i));
}
}
counter += str->length();
@@ -6886,8 +6890,10 @@ Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
Object* obj = Allocate(nof * 2);
if (obj->IsFailure()) return obj;
+
+ AssertNoAllocation no_gc;
HashTable* table = HashTable::cast(obj);
- WriteBarrierMode mode = table->GetWriteBarrierMode();
+ WriteBarrierMode mode = table->GetWriteBarrierMode(no_gc);
// Copy prefix to new array.
for (int i = kPrefixStartIndex;
@@ -7134,7 +7140,7 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) {
// Split elements into defined, undefined and the_hole, in that order.
// Only count locations for undefined and the hole, and fill them afterwards.
- WriteBarrierMode write_barrier = elements->GetWriteBarrierMode();
+ WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
unsigned int undefs = limit;
unsigned int holes = limit;
// Assume most arrays contain no holes and undefined values, so minimize the
@@ -7629,7 +7635,7 @@ Object* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
if (obj->IsFailure()) return obj;
FixedArray* iteration_order = FixedArray::cast(obj);
for (int i = 0; i < length; i++) {
- iteration_order->set(i, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ iteration_order->set(i, Smi::FromInt(i));
}
// Allocate array with enumeration order.
@@ -7642,9 +7648,7 @@ Object* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
int pos = 0;
for (int i = 0; i < capacity; i++) {
if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
- enumeration_order->set(pos++,
- Smi::FromInt(DetailsAt(i).index()),
- SKIP_WRITE_BARRIER);
+ enumeration_order->set(pos++, Smi::FromInt(DetailsAt(i).index()));
}
}
@@ -7655,9 +7659,7 @@ Object* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
for (int i = 0; i < length; i++) {
int index = Smi::cast(iteration_order->get(i))->value();
int enum_index = PropertyDetails::kInitialIndex + i;
- enumeration_order->set(index,
- Smi::FromInt(enum_index),
- SKIP_WRITE_BARRIER);
+ enumeration_order->set(index, Smi::FromInt(enum_index));
}
// Update the dictionary with new indices.
@@ -7805,8 +7807,7 @@ void NumberDictionary::UpdateMaxNumberKey(uint32_t key) {
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi() || max_number_key() < key) {
FixedArray::set(kMaxNumberKeyIndex,
- Smi::FromInt(key << kRequiresSlowElementsTagSize),
- SKIP_WRITE_BARRIER);
+ Smi::FromInt(key << kRequiresSlowElementsTagSize));
}
}
@@ -7897,9 +7898,7 @@ void StringDictionary::CopyEnumKeysTo(FixedArray* storage,
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted() || details.IsDontEnum()) continue;
storage->set(index, k);
- sort_array->set(index,
- Smi::FromInt(details.index()),
- SKIP_WRITE_BARRIER);
+ sort_array->set(index, Smi::FromInt(details.index()));
index++;
}
}
diff --git a/src/objects.h b/src/objects.h
index 0b22b0e6..f6411965 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1023,8 +1023,12 @@ class HeapObject: public Object {
// Casting.
static inline HeapObject* cast(Object* obj);
- // Return the write barrier mode for this.
- inline WriteBarrierMode GetWriteBarrierMode();
+ // Return the write barrier mode for this. Callers of this function
+ // must be able to present a reference to an AssertNoAllocation
+ // object as a sign that they are not going to use this function
+ // from code that allocates and thus invalidates the returned write
+ // barrier mode.
+ inline WriteBarrierMode GetWriteBarrierMode(const AssertNoAllocation&);
// Dispatched behavior.
void HeapObjectShortPrint(StringStream* accumulator);
@@ -1669,7 +1673,8 @@ class FixedArray: public Array {
void SortPairs(FixedArray* numbers, uint32_t len);
protected:
- // Set operation on FixedArray without using write barriers.
+ // Set operation on FixedArray without using write barriers. Can
+ // only be used for storing old space objects or smis.
static inline void fast_set(FixedArray* array, int index, Object* value);
private:
@@ -4474,6 +4479,10 @@ class JSArray: public JSObject {
// [length]: The length property.
DECL_ACCESSORS(length, Object)
+ // Overload the length setter to skip write barrier when the length
+ // is set to a smi. This matches the set function on FixedArray.
+ inline void set_length(Smi* length);
+
Object* JSArrayUpdateLengthFromIndex(uint32_t index, Object* value);
// Initialize the array with the given capacity. The function may
diff --git a/src/parser.cc b/src/parser.cc
index 3ae85774..b06d86f5 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -91,7 +91,7 @@ class PositionStack {
class Parser {
public:
Parser(Handle<Script> script, bool allow_natives_syntax,
- v8::Extension* extension, bool is_pre_parsing,
+ v8::Extension* extension, ParserMode is_pre_parsing,
ParserFactory* factory, ParserLog* log, ScriptDataImpl* pre_data);
virtual ~Parser() { }
@@ -112,6 +112,8 @@ class Parser {
FunctionLiteral* ParseLazy(Handle<String> source,
Handle<String> name,
int start_position, bool is_expression);
+ FunctionLiteral* ParseJson(Handle<String> source,
+ unibrow::CharacterStream* stream);
// The minimum number of contiguous assignment that will
// be treated as an initialization block. Benchmarks show that
@@ -202,7 +204,21 @@ class Parser {
Expression* ParseObjectLiteral(bool* ok);
Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
- // Decide if a property should be the object boilerplate.
+ // Populate the constant properties fixed array for a materialized object
+ // literal.
+ void BuildObjectLiteralConstantProperties(
+ ZoneList<ObjectLiteral::Property*>* properties,
+ Handle<FixedArray> constants,
+ bool* is_simple,
+ int* depth);
+
+ // Populate the literals fixed array for a materialized array literal.
+ void BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* properties,
+ Handle<FixedArray> constants,
+ bool* is_simple,
+ int* depth);
+
+ // Decide if a property should be in the object boilerplate.
bool IsBoilerplateProperty(ObjectLiteral::Property* property);
// If the expression is a literal, return the literal value;
// if the expression is a materialized literal and is simple return a
@@ -231,6 +247,7 @@ class Parser {
INLINE(Token::Value Next()) { return scanner_.Next(); }
INLINE(void Consume(Token::Value token));
void Expect(Token::Value token, bool* ok);
+ bool Check(Token::Value token);
void ExpectSemicolon(bool* ok);
// Get odd-ball literals.
@@ -277,6 +294,29 @@ class Parser {
Handle<String> type,
Vector< Handle<Object> > arguments);
+ // JSON is a subset of JavaScript, as specified in, e.g., the ECMAScript 5
+ // specification section 15.12.1 (and appendix A.8).
+ // The grammar is given section 15.12.1.2 (and appendix A.8.2).
+
+ // Parse JSON input as a single JSON value.
+ Expression* ParseJson(bool* ok);
+
+ // Parse a single JSON value from input (grammar production JSONValue).
+ // A JSON value is either a (double-quoted) string literal, a number literal,
+ // one of "true", "false", or "null", or an object or array literal.
+ Expression* ParseJsonValue(bool* ok);
+ // Parse a JSON object literal (grammar production JSONObject).
+ // An object literal is a squiggly-braced and comma separated sequence
+ // (possibly empty) of key/value pairs, where the key is a JSON string
+ // literal, the value is a JSON value, and the two are spearated by a colon.
+ // A JavaScript object also allows numbers and identifiers as keys.
+ Expression* ParseJsonObject(bool* ok);
+ // Parses a JSON array literal (grammar production JSONArray). An array
+ // literal is a square-bracketed and comma separated sequence (possibly empty)
+ // of JSON values.
+ // A JavaScript array allows leaving out values from the sequence.
+ Expression* ParseJsonArray(bool* ok);
+
friend class Target;
friend class TargetScope;
friend class LexicalScope;
@@ -983,7 +1023,7 @@ class AstBuildingParser : public Parser {
public:
AstBuildingParser(Handle<Script> script, bool allow_natives_syntax,
v8::Extension* extension, ScriptDataImpl* pre_data)
- : Parser(script, allow_natives_syntax, extension, false,
+ : Parser(script, allow_natives_syntax, extension, PARSE,
factory(), log(), pre_data) { }
virtual void ReportMessageAt(Scanner::Location loc, const char* message,
Vector<const char*> args);
@@ -1002,9 +1042,9 @@ class PreParser : public Parser {
public:
PreParser(Handle<Script> script, bool allow_natives_syntax,
v8::Extension* extension)
- : Parser(script, allow_natives_syntax, extension, true,
- factory(), recorder(), NULL)
- , factory_(true) { }
+ : Parser(script, allow_natives_syntax, extension, PREPARSE,
+ factory(), recorder(), NULL),
+ factory_(true) { }
virtual void ReportMessageAt(Scanner::Location loc, const char* message,
Vector<const char*> args);
virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
@@ -1147,7 +1187,7 @@ class LexicalScope BASE_EMBEDDED {
Parser::Parser(Handle<Script> script,
bool allow_natives_syntax,
v8::Extension* extension,
- bool is_pre_parsing,
+ ParserMode is_pre_parsing,
ParserFactory* factory,
ParserLog* log,
ScriptDataImpl* pre_data)
@@ -1161,7 +1201,7 @@ Parser::Parser(Handle<Script> script,
extension_(extension),
factory_(factory),
log_(log),
- is_pre_parsing_(is_pre_parsing),
+ is_pre_parsing_(is_pre_parsing == PREPARSE),
pre_data_(pre_data) {
}
@@ -1172,7 +1212,7 @@ bool Parser::PreParseProgram(Handle<String> source,
AssertNoZoneAllocation assert_no_zone_allocation;
AssertNoAllocation assert_no_allocation;
NoHandleAllocation no_handle_allocation;
- scanner_.Init(source, stream, 0);
+ scanner_.Init(source, stream, 0, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
DummyScope top_scope;
@@ -1195,7 +1235,7 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
// Initialize parser state.
source->TryFlattenIfNotFlat();
- scanner_.Init(source, stream, 0);
+ scanner_.Init(source, stream, 0, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
// Compute the parsing mode.
@@ -1254,7 +1294,7 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
SafeStringInputBuffer buffer(source.location());
// Initialize parser state.
- scanner_.Init(source, &buffer, start_position);
+ scanner_.Init(source, &buffer, start_position, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
@@ -1290,6 +1330,55 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
return result;
}
+FunctionLiteral* Parser::ParseJson(Handle<String> source,
+ unibrow::CharacterStream* stream) {
+ CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+
+ HistogramTimerScope timer(&Counters::parse);
+ Counters::total_parse_size.Increment(source->length());
+
+ // Initialize parser state.
+ source->TryFlattenIfNotFlat();
+ scanner_.Init(source, stream, 0, JSON);
+ ASSERT(target_stack_ == NULL);
+
+ FunctionLiteral* result = NULL;
+ Handle<String> no_name = factory()->EmptySymbol();
+
+ {
+ Scope* scope = factory()->NewScope(top_scope_, Scope::GLOBAL_SCOPE, false);
+ LexicalScope lexical_scope(this, scope);
+ TemporaryScope temp_scope(this);
+ bool ok = true;
+ Expression* expression = ParseJson(&ok);
+ if (ok) {
+ ZoneListWrapper<Statement> statement = factory()->NewList<Statement>(1);
+ statement.Add(new ExpressionStatement(expression));
+ result = NEW(FunctionLiteral(
+ no_name,
+ top_scope_,
+ statement.elements(),
+ temp_scope.materialized_literal_count(),
+ temp_scope.expected_property_count(),
+ temp_scope.only_simple_this_property_assignments(),
+ temp_scope.this_property_assignments(),
+ 0,
+ 0,
+ source->length(),
+ false));
+ } else if (scanner().stack_overflow()) {
+ Top::StackOverflow();
+ }
+ }
+
+ // Make sure the target stack is empty.
+ ASSERT(target_stack_ == NULL);
+
+ // If there was a syntax error we have to get rid of the AST
+ // and it is not safe to do so before the scope has been deleted.
+ if (result == NULL) zone_scope.DeleteOnExit();
+ return result;
+}
void Parser::ReportMessage(const char* type, Vector<const char*> args) {
Scanner::Location source_location = scanner_.location();
@@ -3122,7 +3211,7 @@ DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
void Parser::ReportUnexpectedToken(Token::Value token) {
// We don't report stack overflows here, to avoid increasing the
// stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram.
+ // over, in ParseProgram/ParseJson.
if (token == Token::ILLEGAL && scanner().stack_overflow())
return;
// Four of the tokens are treated specially
@@ -3262,6 +3351,33 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
}
+void Parser::BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* values,
+ Handle<FixedArray> literals,
+ bool* is_simple,
+ int* depth) {
+ // Fill in the literals.
+ // Accumulate output values in local variables.
+ bool is_simple_acc = true;
+ int depth_acc = 1;
+ for (int i = 0; i < values->length(); i++) {
+ MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
+ if (m_literal != NULL && m_literal->depth() >= depth_acc) {
+ depth_acc = m_literal->depth() + 1;
+ }
+ Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
+ if (boilerplate_value->IsUndefined()) {
+ literals->set_the_hole(i);
+ is_simple_acc = false;
+ } else {
+ literals->set(i, *boilerplate_value);
+ }
+ }
+
+ *is_simple = is_simple_acc;
+ *depth = depth_acc;
+}
+
+
Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
@@ -3364,6 +3480,43 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
}
+void Parser::BuildObjectLiteralConstantProperties(
+ ZoneList<ObjectLiteral::Property*>* properties,
+ Handle<FixedArray> constant_properties,
+ bool* is_simple,
+ int* depth) {
+ int position = 0;
+ // Accumulate the value in local variables and store it at the end.
+ bool is_simple_acc = true;
+ int depth_acc = 1;
+ for (int i = 0; i < properties->length(); i++) {
+ ObjectLiteral::Property* property = properties->at(i);
+ if (!IsBoilerplateProperty(property)) {
+ is_simple_acc = false;
+ continue;
+ }
+ MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
+ if (m_literal != NULL && m_literal->depth() >= depth_acc) {
+ depth_acc = m_literal->depth() + 1;
+ }
+
+ // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
+ // value for COMPUTED properties, the real value is filled in at
+ // runtime. The enumeration order is maintained.
+ Handle<Object> key = property->key()->handle();
+ Handle<Object> value = GetBoilerplateValue(property->value());
+ is_simple_acc = is_simple_acc && !value->IsUndefined();
+
+ // Add name, value pair to the fixed array.
+ constant_properties->set(position++, *key);
+ constant_properties->set(position++, *value);
+ }
+
+ *is_simple = is_simple_acc;
+ *depth = depth_acc;
+}
+
+
Expression* Parser::ParseObjectLiteral(bool* ok) {
// ObjectLiteral ::
// '{' (
@@ -3454,32 +3607,13 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Handle<FixedArray> constant_properties =
Factory::NewFixedArray(number_of_boilerplate_properties * 2, TENURED);
- int position = 0;
+
bool is_simple = true;
int depth = 1;
- for (int i = 0; i < properties.length(); i++) {
- ObjectLiteral::Property* property = properties.at(i);
- if (!IsBoilerplateProperty(property)) {
- is_simple = false;
- continue;
- }
- MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() + 1 > depth) {
- depth = m_literal->depth() + 1;
- }
-
- // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
- // value for COMPUTED properties, the real value is filled in at
- // runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->handle();
- Handle<Object> value = GetBoilerplateValue(property->value());
- is_simple = is_simple && !value->IsUndefined();
-
- // Add name, value pair to the fixed array.
- constant_properties->set(position++, *key);
- constant_properties->set(position++, *value);
- }
-
+ BuildObjectLiteralConstantProperties(properties.elements(),
+ constant_properties,
+ &is_simple,
+ &depth);
return new ObjectLiteral(constant_properties,
properties.elements(),
literal_index,
@@ -3720,6 +3854,16 @@ void Parser::Expect(Token::Value token, bool* ok) {
}
+bool Parser::Check(Token::Value token) {
+ Token::Value next = peek();
+ if (next == token) {
+ Consume(next);
+ return true;
+ }
+ return false;
+}
+
+
void Parser::ExpectSemicolon(bool* ok) {
// Check for automatic semicolon insertion according to
// the rules given in ECMA-262, section 7.9, page 21.
@@ -3886,6 +4030,145 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
scanner().location().beg_pos);
}
+// ----------------------------------------------------------------------------
+// JSON
+
+Expression* Parser::ParseJson(bool* ok) {
+ Expression* result = ParseJsonValue(CHECK_OK);
+ Expect(Token::EOS, CHECK_OK);
+ return result;
+}
+
+
+// Parse any JSON value.
+Expression* Parser::ParseJsonValue(bool* ok) {
+ Token::Value token = peek();
+ switch (token) {
+ case Token::STRING: {
+ Consume(Token::STRING);
+ int literal_length = scanner_.literal_length();
+ const char* literal_string = scanner_.literal_string();
+ if (literal_length == 0) {
+ return NEW(Literal(Factory::empty_string()));
+ }
+ Vector<const char> literal(literal_string, literal_length);
+ return NEW(Literal(Factory::NewStringFromUtf8(literal, TENURED)));
+ }
+ case Token::NUMBER: {
+ Consume(Token::NUMBER);
+ ASSERT(scanner_.literal_length() > 0);
+ double value = StringToDouble(scanner_.literal_string(),
+ NO_FLAGS, // Hex, octal or trailing junk.
+ OS::nan_value());
+ return NewNumberLiteral(value);
+ }
+ case Token::FALSE_LITERAL:
+ Consume(Token::FALSE_LITERAL);
+ return NEW(Literal(Factory::false_value()));
+ case Token::TRUE_LITERAL:
+ Consume(Token::TRUE_LITERAL);
+ return NEW(Literal(Factory::true_value()));
+ case Token::NULL_LITERAL:
+ Consume(Token::NULL_LITERAL);
+ return NEW(Literal(Factory::null_value()));
+ case Token::LBRACE: {
+ Expression* result = ParseJsonObject(CHECK_OK);
+ return result;
+ }
+ case Token::LBRACK: {
+ Expression* result = ParseJsonArray(CHECK_OK);
+ return result;
+ }
+ default:
+ *ok = false;
+ ReportUnexpectedToken(token);
+ return NULL;
+ }
+}
+
+
+// Parse a JSON object. Scanner must be right after '{' token.
+Expression* Parser::ParseJsonObject(bool* ok) {
+ Consume(Token::LBRACE);
+ ZoneListWrapper<ObjectLiteral::Property> properties =
+ factory()->NewList<ObjectLiteral::Property>(4);
+ int boilerplate_properties = 0;
+ if (peek() != Token::RBRACE) {
+ do {
+ Expect(Token::STRING, CHECK_OK);
+ Handle<String> key = factory()->LookupSymbol(scanner_.literal_string(),
+ scanner_.literal_length());
+ Expect(Token::COLON, CHECK_OK);
+ Expression* value = ParseJsonValue(CHECK_OK);
+ Literal* key_literal;
+ uint32_t index;
+ if (key->AsArrayIndex(&index)) {
+ key_literal = NewNumberLiteral(index);
+ } else {
+ key_literal = NEW(Literal(key));
+ }
+ ObjectLiteral::Property* property =
+ NEW(ObjectLiteral::Property(key_literal, value));
+ properties.Add(property);
+
+ if (IsBoilerplateProperty(property)) {
+ boilerplate_properties++;
+ }
+ } while (Check(Token::COMMA));
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+
+ int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+ if (is_pre_parsing_) return NULL;
+
+ Handle<FixedArray> constant_properties =
+ Factory::NewFixedArray(boilerplate_properties * 2, TENURED);
+ bool is_simple = true;
+ int depth = 1;
+ BuildObjectLiteralConstantProperties(properties.elements(),
+ constant_properties,
+ &is_simple,
+ &depth);
+ return new ObjectLiteral(constant_properties,
+ properties.elements(),
+ literal_index,
+ is_simple,
+ depth);
+}
+
+
+// Parse a JSON array. Scanner must be right after '[' token.
+Expression* Parser::ParseJsonArray(bool* ok) {
+ Consume(Token::LBRACK);
+
+ ZoneListWrapper<Expression> values = factory()->NewList<Expression>(4);
+ if (peek() != Token::RBRACK) {
+ do {
+ Expression* exp = ParseJsonValue(CHECK_OK);
+ values.Add(exp);
+ } while (Check(Token::COMMA));
+ }
+ Expect(Token::RBRACK, CHECK_OK);
+
+ // Update the scope information before the pre-parsing bailout.
+ int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+
+ if (is_pre_parsing_) return NULL;
+
+ // Allocate a fixed array with all the literals.
+ Handle<FixedArray> literals =
+ Factory::NewFixedArray(values.length(), TENURED);
+
+ bool is_simple;
+ int depth;
+ BuildArrayLiteralBoilerplateLiterals(values.elements(),
+ literals,
+ &is_simple,
+ &depth);
+ return NEW(ArrayLiteral(literals, values.elements(),
+ literal_index, is_simple, depth));
+}
+
// ----------------------------------------------------------------------------
// Regular expressions
@@ -4761,7 +5044,8 @@ bool ParseRegExp(FlatStringReader* input,
FunctionLiteral* MakeAST(bool compile_in_global_context,
Handle<Script> script,
v8::Extension* extension,
- ScriptDataImpl* pre_data) {
+ ScriptDataImpl* pre_data,
+ bool is_json) {
bool allow_natives_syntax =
always_allow_natives_syntax ||
FLAG_allow_natives_syntax ||
@@ -4773,15 +5057,21 @@ FunctionLiteral* MakeAST(bool compile_in_global_context,
Vector<const char*> args = pre_data->BuildArgs();
parser.ReportMessageAt(loc, message, args);
DeleteArray(message);
- for (int i = 0; i < args.length(); i++)
+ for (int i = 0; i < args.length(); i++) {
DeleteArray(args[i]);
+ }
DeleteArray(args.start());
return NULL;
}
Handle<String> source = Handle<String>(String::cast(script->source()));
SafeStringInputBuffer input(source.location());
- FunctionLiteral* result = parser.ParseProgram(source,
- &input, compile_in_global_context);
+ FunctionLiteral* result;
+ if (is_json) {
+ ASSERT(compile_in_global_context);
+ result = parser.ParseJson(source, &input);
+ } else {
+ result = parser.ParseProgram(source, &input, compile_in_global_context);
+ }
return result;
}
diff --git a/src/parser.h b/src/parser.h
index a67284c2..0f808d72 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -133,7 +133,8 @@ class ScriptDataImpl : public ScriptData {
FunctionLiteral* MakeAST(bool compile_in_global_context,
Handle<Script> script,
v8::Extension* extension,
- ScriptDataImpl* pre_data);
+ ScriptDataImpl* pre_data,
+ bool is_json = false);
ScriptDataImpl* PreParse(Handle<String> source,
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 9ef72702..ca570a64 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -594,12 +594,22 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->inc_indent();
}
- explicit IndentedScope(const char* txt, StaticType* type = NULL) {
+ explicit IndentedScope(const char* txt, AstNode* node = NULL) {
ast_printer_->PrintIndented(txt);
- if ((type != NULL) && (type->IsKnown())) {
- ast_printer_->Print(" (type = ");
- ast_printer_->Print(StaticType::Type2String(type));
- ast_printer_->Print(")");
+ if (node != NULL && node->AsExpression() != NULL) {
+ Expression* expr = node->AsExpression();
+ bool printed_first = false;
+ if ((expr->type() != NULL) && (expr->type()->IsKnown())) {
+ ast_printer_->Print(" (type = ");
+ ast_printer_->Print(StaticType::Type2String(expr->type()));
+ printed_first = true;
+ }
+ if (expr->num() != Expression::kNoLabel) {
+ ast_printer_->Print(printed_first ? ", num = " : " (num = ");
+ ast_printer_->Print("%d", expr->num());
+ printed_first = true;
+ }
+ if (printed_first) ast_printer_->Print(")");
}
ast_printer_->Print("\n");
ast_printer_->inc_indent();
@@ -657,19 +667,22 @@ void AstPrinter::PrintLiteralIndented(const char* info,
void AstPrinter::PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- StaticType* type) {
+ StaticType* type,
+ int num) {
if (var == NULL) {
PrintLiteralIndented(info, value, true);
} else {
EmbeddedVector<char, 256> buf;
+ int pos = OS::SNPrintF(buf, "%s (mode = %s", info,
+ Variable::Mode2String(var->mode()));
if (type->IsKnown()) {
- OS::SNPrintF(buf, "%s (mode = %s, type = %s)", info,
- Variable::Mode2String(var->mode()),
- StaticType::Type2String(type));
- } else {
- OS::SNPrintF(buf, "%s (mode = %s)", info,
- Variable::Mode2String(var->mode()));
+ pos += OS::SNPrintF(buf + pos, ", type = %s",
+ StaticType::Type2String(type));
+ }
+ if (num != Expression::kNoLabel) {
+ pos += OS::SNPrintF(buf + pos, ", num = %d", num);
}
+ OS::SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true);
}
}
@@ -692,7 +705,7 @@ void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(s);
+ IndentedScope indent(s, node);
Visit(node);
}
@@ -726,7 +739,8 @@ void AstPrinter::PrintParameters(Scope* scope) {
for (int i = 0; i < scope->num_parameters(); i++) {
PrintLiteralWithModeIndented("VAR", scope->parameter(i),
scope->parameter(i)->name(),
- scope->parameter(i)->type());
+ scope->parameter(i)->type(),
+ Expression::kNoLabel);
}
}
}
@@ -771,7 +785,8 @@ void AstPrinter::VisitDeclaration(Declaration* node) {
PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
node->proxy()->AsVariable(),
node->proxy()->name(),
- node->proxy()->AsVariable()->type());
+ node->proxy()->AsVariable()->type(),
+ Expression::kNoLabel);
} else {
// function declarations
PrintIndented("FUNCTION ");
@@ -1007,7 +1022,7 @@ void AstPrinter::VisitSlot(Slot* node) {
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
- node->type());
+ node->type(), node->num());
Variable* var = node->var();
if (var != NULL && var->rewrite() != NULL) {
IndentedScope indent;
@@ -1017,7 +1032,7 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
void AstPrinter::VisitAssignment(Assignment* node) {
- IndentedScope indent(Token::Name(node->op()), node->type());
+ IndentedScope indent(Token::Name(node->op()), node);
Visit(node->target());
Visit(node->value());
}
@@ -1029,7 +1044,7 @@ void AstPrinter::VisitThrow(Throw* node) {
void AstPrinter::VisitProperty(Property* node) {
- IndentedScope indent("PROPERTY");
+ IndentedScope indent("PROPERTY", node);
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
if (literal != NULL && literal->handle()->IsSymbol()) {
@@ -1082,14 +1097,14 @@ void AstPrinter::VisitCountOperation(CountOperation* node) {
void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
- IndentedScope indent(Token::Name(node->op()), node->type());
+ IndentedScope indent(Token::Name(node->op()), node);
Visit(node->left());
Visit(node->right());
}
void AstPrinter::VisitCompareOperation(CompareOperation* node) {
- IndentedScope indent(Token::Name(node->op()), node->type());
+ IndentedScope indent(Token::Name(node->op()), node);
Visit(node->left());
Visit(node->right());
}
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index dfff49a4..8e958c77 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -102,7 +102,8 @@ class AstPrinter: public PrettyPrinter {
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- StaticType* type);
+ StaticType* type,
+ int num);
void PrintLabelsIndented(const char* info, ZoneStringList* labels);
void inc_indent() { indent_++; }
diff --git a/src/runtime.cc b/src/runtime.cc
index 51c1ba23..515343b7 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -107,25 +107,23 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
// Deep copy local properties.
if (copy->HasFastProperties()) {
FixedArray* properties = copy->properties();
- WriteBarrierMode mode = properties->GetWriteBarrierMode();
for (int i = 0; i < properties->length(); i++) {
Object* value = properties->get(i);
if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
if (result->IsFailure()) return result;
- properties->set(i, result, mode);
+ properties->set(i, result);
}
}
- mode = copy->GetWriteBarrierMode();
int nof = copy->map()->inobject_properties();
for (int i = 0; i < nof; i++) {
Object* value = copy->InObjectPropertyAt(i);
if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
if (result->IsFailure()) return result;
- copy->InObjectPropertyAtPut(i, result, mode);
+ copy->InObjectPropertyAtPut(i, result);
}
}
} else {
@@ -135,20 +133,20 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
copy->GetLocalPropertyNames(names, 0);
for (int i = 0; i < names->length(); i++) {
ASSERT(names->get(i)->IsString());
- String* keyString = String::cast(names->get(i));
+ String* key_string = String::cast(names->get(i));
PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(keyString);
+ copy->GetLocalPropertyAttribute(key_string);
// Only deep copy fields from the object literal expression.
// In particular, don't try to copy the length attribute of
// an array.
if (attributes != NONE) continue;
- Object* value = copy->GetProperty(keyString, &attributes);
+ Object* value = copy->GetProperty(key_string, &attributes);
ASSERT(!value->IsFailure());
if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
if (result->IsFailure()) return result;
- result = copy->SetProperty(keyString, result, NONE);
+ result = copy->SetProperty(key_string, result, NONE);
if (result->IsFailure()) return result;
}
}
@@ -160,14 +158,13 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
switch (copy->GetElementsKind()) {
case JSObject::FAST_ELEMENTS: {
FixedArray* elements = FixedArray::cast(copy->elements());
- WriteBarrierMode mode = elements->GetWriteBarrierMode();
for (int i = 0; i < elements->length(); i++) {
Object* value = elements->get(i);
if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
if (result->IsFailure()) return result;
- elements->set(i, result, mode);
+ elements->set(i, result);
}
}
break;
@@ -180,8 +177,8 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
if (element_dictionary->IsKey(k)) {
Object* value = element_dictionary->ValueAt(i);
if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
+ JSObject* js_object = JSObject::cast(value);
+ result = DeepCopyBoilerplate(js_object);
if (result->IsFailure()) return result;
element_dictionary->ValueAtPut(i, result);
}
@@ -1405,16 +1402,18 @@ static Object* Runtime_SetCode(Arguments args) {
if (!code->IsNull()) {
RUNTIME_ASSERT(code->IsJSFunction());
Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
- SetExpectedNofProperties(target, fun->shared()->expected_nof_properties());
- if (!fun->is_compiled() && !CompileLazy(fun, KEEP_EXCEPTION)) {
+ Handle<SharedFunctionInfo> shared(fun->shared());
+ SetExpectedNofProperties(target, shared->expected_nof_properties());
+
+ if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
// Set the code, formal parameter count, and the length of the target
// function.
target->set_code(fun->code());
- target->shared()->set_length(fun->shared()->length());
+ target->shared()->set_length(shared->length());
target->shared()->set_formal_parameter_count(
- fun->shared()->formal_parameter_count());
+ shared->formal_parameter_count());
// Set the source code of the target function to undefined.
// SetCode is only used for built-in constructors like String,
// Array, and Object, and some web code
@@ -1437,6 +1436,8 @@ static Object* Runtime_SetCode(Arguments args) {
literals->set(JSFunction::kLiteralGlobalContextIndex,
context->global_context());
}
+ // It's okay to skip the write barrier here because the literals
+ // are guaranteed to be in old space.
target->set_literals(*literals, SKIP_WRITE_BARRIER);
}
@@ -3250,6 +3251,12 @@ static Object* Runtime_GetLocalPropertyNames(Arguments args) {
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
if (obj->IsJSGlobalProxy()) {
+ // Only collect names if access is permitted.
+ if (obj->IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(*obj, Heap::undefined_value(), v8::ACCESS_KEYS)) {
+ Top::ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
+ return *Factory::NewJSArray(0);
+ }
obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
}
@@ -3261,6 +3268,14 @@ static Object* Runtime_GetLocalPropertyNames(Arguments args) {
int total_property_count = 0;
Handle<JSObject> jsproto = obj;
for (int i = 0; i < length; i++) {
+ // Only collect names if access is permitted.
+ if (jsproto->IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(*jsproto,
+ Heap::undefined_value(),
+ v8::ACCESS_KEYS)) {
+ Top::ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
+ return *Factory::NewJSArray(0);
+ }
int n;
n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
local_property_count[i] = n;
@@ -4661,7 +4676,9 @@ static Object* Runtime_Math_round(Arguments args) {
CONVERT_DOUBLE_CHECKED(x, args[0]);
if (signbit(x) && x >= -0.5) return Heap::minus_zero_value();
- return Heap::NumberFromDouble(floor(x + 0.5));
+ double integer = ceil(x);
+ if (integer - x > 0.5) { integer -= 1.0; }
+ return Heap::NumberFromDouble(integer);
}
@@ -4715,7 +4732,9 @@ static Object* Runtime_NewArguments(Arguments args) {
if (obj->IsFailure()) return obj;
FixedArray* array = FixedArray::cast(obj);
ASSERT(array->length() == length);
- WriteBarrierMode mode = array->GetWriteBarrierMode();
+
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, frame->GetParameter(i), mode);
}
@@ -4740,10 +4759,13 @@ static Object* Runtime_NewArgumentsFast(Arguments args) {
// Allocate the fixed array.
Object* obj = Heap::AllocateRawFixedArray(length);
if (obj->IsFailure()) return obj;
+
+ AssertNoAllocation no_gc;
reinterpret_cast<Array*>(obj)->set_map(Heap::fixed_array_map());
FixedArray* array = FixedArray::cast(obj);
array->set_length(length);
- WriteBarrierMode mode = array->GetWriteBarrierMode();
+
+ WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, *--parameters, mode);
}
@@ -4826,12 +4848,8 @@ static Object* Runtime_NewObject(Arguments args) {
}
// The function should be compiled for the optimization hints to be available.
- if (!function->shared()->is_compiled()) {
- CompileLazyShared(Handle<SharedFunctionInfo>(function->shared()),
- CLEAR_EXCEPTION,
- 0);
- LOG(FunctionCreateEvent(*function));
- }
+ Handle<SharedFunctionInfo> shared(function->shared());
+ EnsureCompiled(shared, CLEAR_EXCEPTION);
bool first_allocation = !function->has_initial_map();
Handle<JSObject> result = Factory::NewJSObject(function);
@@ -4870,7 +4888,7 @@ static Object* Runtime_LazyCompile(Arguments args) {
// this means that things called through constructors are never known to
// be in loops. We compile them as if they are in loops here just in case.
ASSERT(!function->is_compiled());
- if (!CompileLazyInLoop(function, KEEP_EXCEPTION)) {
+ if (!CompileLazyInLoop(function, Handle<Object>::null(), KEEP_EXCEPTION)) {
return Failure::Exception();
}
@@ -6032,7 +6050,7 @@ static Object* Runtime_MoveArrayContents(Arguments args) {
to->SetContent(FixedArray::cast(from->elements()));
to->set_length(from->length());
from->SetContent(Heap::empty_fixed_array());
- from->set_length(0);
+ from->set_length(Smi::FromInt(0));
return to;
}
@@ -6075,9 +6093,7 @@ static Object* Runtime_GetArrayKeys(Arguments args) {
} else {
Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
// -1 means start of array.
- single_interval->set(0,
- Smi::FromInt(-1),
- SKIP_WRITE_BARRIER);
+ single_interval->set(0, Smi::FromInt(-1));
uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
uint32_t min_length = actual_length < length ? actual_length : length;
Handle<Object> length_object =
@@ -7278,7 +7294,7 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
if (!done) {
// If the candidate is not compiled compile it to reveal any inner
// functions which might contain the requested source position.
- CompileLazyShared(target, KEEP_EXCEPTION, 0);
+ CompileLazyShared(target, KEEP_EXCEPTION);
}
}
@@ -7450,7 +7466,9 @@ static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
const int length = frame->GetProvidedParametersCount();
Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
Handle<FixedArray> array = Factory::NewFixedArray(length);
- WriteBarrierMode mode = array->GetWriteBarrierMode();
+
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, frame->GetParameter(i), mode);
}
@@ -7864,7 +7882,8 @@ static Object* Runtime_DebugDisassembleFunction(Arguments args) {
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
- if (!func->is_compiled() && !CompileLazy(func, KEEP_EXCEPTION)) {
+ Handle<SharedFunctionInfo> shared(func->shared());
+ if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
func->code()->PrintLn();
@@ -7879,10 +7898,11 @@ static Object* Runtime_DebugDisassembleConstructor(Arguments args) {
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
- if (!func->is_compiled() && !CompileLazy(func, KEEP_EXCEPTION)) {
+ Handle<SharedFunctionInfo> shared(func->shared());
+ if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
- func->shared()->construct_stub()->PrintLn();
+ shared->construct_stub()->PrintLn();
#endif // DEBUG
return Heap::undefined_value();
}
@@ -8032,7 +8052,7 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
if (cursor + 2 < elements->length()) {
elements->set(cursor++, recv);
elements->set(cursor++, fun);
- elements->set(cursor++, offset, SKIP_WRITE_BARRIER);
+ elements->set(cursor++, offset);
} else {
HandleScope scope;
Handle<Object> recv_handle(recv);
@@ -8045,8 +8065,7 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
iter.Advance();
}
- result->set_length(Smi::FromInt(cursor), SKIP_WRITE_BARRIER);
-
+ result->set_length(Smi::FromInt(cursor));
return *result;
}
@@ -8127,12 +8146,12 @@ static Object* Runtime_IS_VAR(Arguments args) {
// Implementation of Runtime
#define F(name, nargs, ressize) \
- { #name, "RuntimeStub_" #name, FUNCTION_ADDR(Runtime_##name), nargs, \
+ { #name, FUNCTION_ADDR(Runtime_##name), nargs, \
static_cast<int>(Runtime::k##name), ressize },
static Runtime::Function Runtime_functions[] = {
RUNTIME_FUNCTION_LIST(F)
- { NULL, NULL, NULL, 0, -1, 0 }
+ { NULL, NULL, 0, -1, 0 }
};
#undef F
diff --git a/src/runtime.h b/src/runtime.h
index efef7db8..b2b8609e 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -373,9 +373,6 @@ class Runtime : public AllStatic {
// The JS name of the function.
const char* name;
- // The name of the stub that calls the runtime function.
- const char* stub_name;
-
// The C++ (native) entry point.
byte* entry;
diff --git a/src/scanner.cc b/src/scanner.cc
index 0d3b789f..cf7e49f8 100644..100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -323,11 +323,14 @@ void KeywordMatcher::Step(uc32 input) {
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) { }
+Scanner::Scanner(ParserMode pre)
+ : stack_overflow_(false), is_pre_parsing_(pre == PREPARSE) { }
-void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
- int position) {
+void Scanner::Init(Handle<String> source,
+ unibrow::CharacterStream* stream,
+ int position,
+ ParserLanguage language) {
// Initialize the source buffer.
if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
two_byte_string_buffer_.Initialize(
@@ -339,6 +342,7 @@ void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
}
position_ = position;
+ is_parsing_json_ = (language == JSON);
// Set c0_ (one character ahead)
ASSERT(kCharacterLookaheadBufferSize == 1);
@@ -416,7 +420,17 @@ static inline bool IsByteOrderMark(uc32 c) {
}
-bool Scanner::SkipWhiteSpace() {
+bool Scanner::SkipJsonWhiteSpace() {
+ int start_position = source_pos();
+ // JSON WhiteSpace is tab, carrige-return, newline and space.
+ while (c0_ == ' ' || c0_ == '\n' || c0_ == '\r' || c0_ == '\t') {
+ Advance();
+ }
+ return source_pos() != start_position;
+}
+
+
+bool Scanner::SkipJavaScriptWhiteSpace() {
int start_position = source_pos();
while (true) {
@@ -512,7 +526,194 @@ Token::Value Scanner::ScanHtmlComment() {
}
-void Scanner::Scan() {
+
+void Scanner::ScanJson() {
+ next_.literal_buffer = NULL;
+ Token::Value token;
+ has_line_terminator_before_next_ = false;
+ do {
+ // Remember the position of the next token
+ next_.location.beg_pos = source_pos();
+ switch (c0_) {
+ case '\t':
+ case '\r':
+ case '\n':
+ case ' ':
+ Advance();
+ token = Token::WHITESPACE;
+ break;
+ case '{':
+ Advance();
+ token = Token::LBRACE;
+ break;
+ case '}':
+ Advance();
+ token = Token::RBRACE;
+ break;
+ case '[':
+ Advance();
+ token = Token::LBRACK;
+ break;
+ case ']':
+ Advance();
+ token = Token::RBRACK;
+ break;
+ case ':':
+ Advance();
+ token = Token::COLON;
+ break;
+ case ',':
+ Advance();
+ token = Token::COMMA;
+ break;
+ case '"':
+ token = ScanJsonString();
+ break;
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ token = ScanJsonNumber();
+ break;
+ case 't':
+ token = ScanJsonIdentifier("true", Token::TRUE_LITERAL);
+ break;
+ case 'f':
+ token = ScanJsonIdentifier("false", Token::FALSE_LITERAL);
+ break;
+ case 'n':
+ token = ScanJsonIdentifier("null", Token::NULL_LITERAL);
+ break;
+ default:
+ if (c0_ < 0) {
+ Advance();
+ token = Token::EOS;
+ } else {
+ Advance();
+ token = Select(Token::ILLEGAL);
+ }
+ }
+ } while (token == Token::WHITESPACE);
+
+ next_.location.end_pos = source_pos();
+ next_.token = token;
+}
+
+
+Token::Value Scanner::ScanJsonString() {
+ ASSERT_EQ('"', c0_);
+ Advance();
+ StartLiteral();
+ while (c0_ != '"' && c0_ > 0) {
+ // Check for control character (0x00-0x1f) or unterminated string (<0).
+ if (c0_ < 0x20) return Token::ILLEGAL;
+ if (c0_ != '\\') {
+ AddCharAdvance();
+ } else {
+ Advance();
+ switch (c0_) {
+ case '"':
+ case '\\':
+ case '/':
+ AddChar(c0_);
+ break;
+ case 'b':
+ AddChar('\x08');
+ break;
+ case 'f':
+ AddChar('\x0c');
+ break;
+ case 'n':
+ AddChar('\x0a');
+ break;
+ case 'r':
+ AddChar('\x0d');
+ break;
+ case 't':
+ AddChar('\x09');
+ break;
+ case 'u': {
+ uc32 value = 0;
+ for (int i = 0; i < 4; i++) {
+ Advance();
+ int digit = HexValue(c0_);
+ if (digit < 0) return Token::ILLEGAL;
+ value = value * 16 + digit;
+ }
+ AddChar(value);
+ break;
+ }
+ default:
+ return Token::ILLEGAL;
+ }
+ Advance();
+ }
+ }
+ if (c0_ != '"') {
+ return Token::ILLEGAL;
+ }
+ TerminateLiteral();
+ Advance();
+ return Token::STRING;
+}
+
+
+Token::Value Scanner::ScanJsonNumber() {
+ StartLiteral();
+ if (c0_ == '-') AddCharAdvance();
+ if (c0_ == '0') {
+ AddCharAdvance();
+ // Prefix zero is only allowed if it's the only digit before
+ // a decimal point or exponent.
+ if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
+ } else {
+ if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
+ do {
+ AddCharAdvance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+ if (c0_ == '.') {
+ AddCharAdvance();
+ if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
+ do {
+ AddCharAdvance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+ if ((c0_ | 0x20) == 'e') {
+ AddCharAdvance();
+ if (c0_ == '-' || c0_ == '+') AddCharAdvance();
+ if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
+ do {
+ AddCharAdvance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+ TerminateLiteral();
+ return Token::NUMBER;
+}
+
+
+Token::Value Scanner::ScanJsonIdentifier(const char* text,
+ Token::Value token) {
+ StartLiteral();
+ while (*text != '\0') {
+ if (c0_ != *text) return Token::ILLEGAL;
+ Advance();
+ text++;
+ }
+ if (kIsIdentifierPart.get(c0_)) return Token::ILLEGAL;
+ TerminateLiteral();
+ return token;
+}
+
+
+void Scanner::ScanJavaScript() {
next_.literal_buffer = NULL;
Token::Value token;
has_line_terminator_before_next_ = false;
diff --git a/src/scanner.h b/src/scanner.h
index 9d7b34e7..f0035c0e 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -252,18 +252,22 @@ class KeywordMatcher {
};
+enum ParserMode { PARSE, PREPARSE };
+enum ParserLanguage { JAVASCRIPT, JSON };
+
+
class Scanner {
public:
-
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
// Construction
- explicit Scanner(bool is_pre_parsing);
+ explicit Scanner(ParserMode parse_mode);
// Initialize the Scanner to scan source:
void Init(Handle<String> source,
unibrow::CharacterStream* stream,
- int position);
+ int position,
+ ParserLanguage language);
// Returns the next token.
Token::Value Next();
@@ -377,6 +381,7 @@ class Scanner {
TokenDesc next_; // desc for next token (one token look-ahead)
bool has_line_terminator_before_next_;
bool is_pre_parsing_;
+ bool is_parsing_json_;
// Literal buffer support
void StartLiteral();
@@ -391,14 +396,57 @@ class Scanner {
c0_ = ch;
}
- bool SkipWhiteSpace();
+ bool SkipWhiteSpace() {
+ if (is_parsing_json_) {
+ return SkipJsonWhiteSpace();
+ } else {
+ return SkipJavaScriptWhiteSpace();
+ }
+ }
+ bool SkipJavaScriptWhiteSpace();
+ bool SkipJsonWhiteSpace();
Token::Value SkipSingleLineComment();
Token::Value SkipMultiLineComment();
inline Token::Value Select(Token::Value tok);
inline Token::Value Select(uc32 next, Token::Value then, Token::Value else_);
- void Scan();
+ inline void Scan() {
+ if (is_parsing_json_) {
+ ScanJson();
+ } else {
+ ScanJavaScript();
+ }
+ }
+
+ // Scans a single JavaScript token.
+ void ScanJavaScript();
+
+ // Scan a single JSON token. The JSON lexical grammar is specified in the
+ // ECMAScript 5 standard, section 15.12.1.1.
+ // Recognizes all of the single-character tokens directly, or calls a function
+ // to scan a number, string or identifier literal.
+ // The only allowed whitespace characters between tokens are tab,
+ // carrige-return, newline and space.
+ void ScanJson();
+
+ // A JSON number (production JSONNumber) is a subset of the valid JavaScript
+ // decimal number literals.
+ // It includes an optional minus sign, must have at least one
+ // digit before and after a decimal point, may not have prefixed zeros (unless
+ // the integer part is zero), and may include an exponent part (e.g., "e-10").
+ // Hexadecimal and octal numbers are not allowed.
+ Token::Value ScanJsonNumber();
+ // A JSON string (production JSONString) is subset of valid JavaScript string
+ // literals. The string must only be double-quoted (not single-quoted), and
+ // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
+ // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
+ Token::Value ScanJsonString();
+ // Used to recognizes one of the literals "true", "false", or "null". These
+ // are the only valid JSON identifiers (productions JSONBooleanLiteral,
+ // JSONNullLiteral).
+ Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
+
void ScanDecimalDigits();
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifier();
diff --git a/src/serialize.cc b/src/serialize.cc
index 6b858939..bc934fb5 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -702,7 +702,6 @@ void Deserializer::ReadChunk(Object** current,
break;
case OBJECT_SERIALIZATION + CODE_SPACE:
ReadObject(CODE_SPACE, Heap::code_space(), current++);
- LOG(LogCodeObject(current[-1]));
break;
case OBJECT_SERIALIZATION + CELL_SPACE:
ReadObject(CELL_SPACE, Heap::cell_space(), current++);
@@ -712,7 +711,6 @@ void Deserializer::ReadChunk(Object** current,
break;
case OBJECT_SERIALIZATION + kLargeCode:
ReadObject(kLargeCode, Heap::lo_space(), current++);
- LOG(LogCodeObject(current[-1]));
break;
case OBJECT_SERIALIZATION + kLargeFixedArray:
ReadObject(kLargeFixedArray, Heap::lo_space(), current++);
@@ -721,7 +719,6 @@ void Deserializer::ReadChunk(Object** current,
Object* new_code_object = NULL;
ReadObject(kLargeCode, Heap::lo_space(), &new_code_object);
Code* code_object = reinterpret_cast<Code*>(new_code_object);
- LOG(LogCodeObject(code_object));
// Setting a branch/call to another code object from code.
Address location_of_branch_data = reinterpret_cast<Address>(current);
Assembler::set_target_at(location_of_branch_data,
@@ -734,7 +731,6 @@ void Deserializer::ReadChunk(Object** current,
Object* new_code_object = NULL;
ReadObject(CODE_SPACE, Heap::code_space(), &new_code_object);
Code* code_object = reinterpret_cast<Code*>(new_code_object);
- LOG(LogCodeObject(code_object));
// Setting a branch/call to another code object from code.
Address location_of_branch_data = reinterpret_cast<Address>(current);
Assembler::set_target_at(location_of_branch_data,
diff --git a/src/spaces.cc b/src/spaces.cc
index 28509003..2c495d85 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -357,12 +357,18 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
}
int alloced = static_cast<int>(*allocated);
size_ += alloced;
+#ifdef DEBUG
+ ZapBlock(reinterpret_cast<Address>(mem), alloced);
+#endif
Counters::memory_allocated.Increment(alloced);
return mem;
}
void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
+#ifdef DEBUG
+ ZapBlock(reinterpret_cast<Address>(mem), length);
+#endif
if (CodeRange::contains(static_cast<Address>(mem))) {
CodeRange::FreeRawMemory(mem, length);
} else {
@@ -446,6 +452,9 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size,
if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
return Page::FromAddress(NULL);
}
+#ifdef DEBUG
+ ZapBlock(start, size);
+#endif
Counters::memory_allocated.Increment(static_cast<int>(size));
// So long as we correctly overestimated the number of chunks we should not
@@ -467,10 +476,14 @@ bool MemoryAllocator::CommitBlock(Address start,
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Commit(start, size, executable)) return false;
+#ifdef DEBUG
+ ZapBlock(start, size);
+#endif
Counters::memory_allocated.Increment(static_cast<int>(size));
return true;
}
+
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
ASSERT(start != NULL);
ASSERT(size > 0);
@@ -483,6 +496,14 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
return true;
}
+
+void MemoryAllocator::ZapBlock(Address start, size_t size) {
+ for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
+ Memory::Address_at(start + s) = kZapValue;
+ }
+}
+
+
Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner) {
ASSERT(IsValidChunk(chunk_id));
@@ -1599,9 +1620,7 @@ void OldSpaceFreeList::RebuildSizeList() {
int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
#ifdef DEBUG
- for (int i = 0; i < size_in_bytes; i += kPointerSize) {
- Memory::Address_at(start + i) = kZapValue;
- }
+ MemoryAllocator::ZapBlock(start, size_in_bytes);
#endif
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(size_in_bytes);
@@ -1733,9 +1752,7 @@ void FixedSizeFreeList::Reset() {
void FixedSizeFreeList::Free(Address start) {
#ifdef DEBUG
- for (int i = 0; i < object_size_; i += kPointerSize) {
- Memory::Address_at(start + i) = kZapValue;
- }
+ MemoryAllocator::ZapBlock(start, object_size_);
#endif
// We only use the freelists with mark-sweep.
ASSERT(!MarkCompactCollector::IsCompacting());
diff --git a/src/spaces.h b/src/spaces.h
index 37117f95..850a7236 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -438,13 +438,16 @@ class MemoryAllocator : public AllStatic {
// and false otherwise.
static bool CommitBlock(Address start, size_t size, Executability executable);
-
// Uncommit a contiguous block of memory [start..(start+size)[.
// start is not NULL, the size is greater than zero, and the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
static bool UncommitBlock(Address start, size_t size);
+ // Zaps a contiguous block of memory [start..(start+size)[ thus
+ // filling it up with a recognizable non-NULL bit pattern.
+ static void ZapBlock(Address start, size_t size);
+
// Attempts to allocate the requested (non-zero) number of pages from the
// OS. Fewer pages might be allocated than requested. If it fails to
// allocate memory for the OS or cannot allocate a single page, this
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 9ab83beb..81f89fd4 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1058,6 +1058,19 @@ Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, String* name) {
return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
}
+void StubCompiler::LookupPostInterceptor(JSObject* holder,
+ String* name,
+ LookupResult* lookup) {
+ holder->LocalLookupRealNamedProperty(name, lookup);
+ if (lookup->IsNotFound()) {
+ Object* proto = holder->GetPrototype();
+ if (proto != Heap::null_value()) {
+ proto->Lookup(name, lookup);
+ }
+ }
+}
+
+
Object* LoadStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 2418c1f7..d97fe773 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -435,6 +435,10 @@ class StubCompiler BASE_EMBEDDED {
String* name,
Label* miss);
+ static void LookupPostInterceptor(JSObject* holder,
+ String* name,
+ LookupResult* lookup);
+
private:
HandleScope scope_;
MacroAssembler masm_;
diff --git a/src/version.cc b/src/version.cc
index ab2eab36..aea1a3a3 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 1
-#define BUILD_NUMBER 0
+#define BUILD_NUMBER 1
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION true
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 5d17edf8..3f2aef0e 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -745,6 +745,9 @@ class Assembler : public Malloced {
arithmetic_op_32(0x23, dst, src);
}
+ void andb(Register dst, Immediate src) {
+ immediate_arithmetic_op_8(0x4, dst, src);
+ }
void decq(Register dst);
void decq(const Operand& dst);
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 1a0138f9..685c9286 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -33,6 +33,7 @@
#include "debug.h"
#include "ic-inl.h"
#include "parser.h"
+#include "regexp-macro-assembler.h"
#include "register-allocator-inl.h"
#include "scopes.h"
@@ -207,19 +208,23 @@ class FloatingPointHelper : public AllStatic {
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+ // operand_1 in rdx, operand_2 in rax; Returns operands as
// floating point numbers in XMM registers.
static void LoadFloatOperands(MacroAssembler* masm,
XMMRegister dst1,
XMMRegister dst2);
+ // Similar to LoadFloatOperands, assumes that the operands are smis.
+ static void LoadFloatOperandsFromSmis(MacroAssembler* masm,
+ XMMRegister dst1,
+ XMMRegister dst2);
+
// Code pattern for loading floating point values onto the fp stack.
// Input values must be either smi or heap number objects (fp values).
// Requirements:
// Register version: operands in registers lhs and rhs.
// Stack version: operands on TOS+1 and TOS+2.
// Returns operands as floating point numbers on fp stack.
- static void LoadFloatOperands(MacroAssembler* masm);
static void LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs);
@@ -241,13 +246,13 @@ class FloatingPointHelper : public AllStatic {
// -----------------------------------------------------------------------------
// CodeGenerator implementation.
-CodeGenerator::CodeGenerator(int buffer_size,
+CodeGenerator::CodeGenerator(MacroAssembler* masm,
Handle<Script> script,
bool is_eval)
: is_eval_(is_eval),
script_(script),
deferred_(8),
- masm_(new MacroAssembler(NULL, buffer_size)),
+ masm_(masm),
scope_(NULL),
frame_(NULL),
allocator_(NULL),
@@ -273,7 +278,9 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void CodeGenerator::GenCode(FunctionLiteral* function) {
+void CodeGenerator::Generate(FunctionLiteral* function,
+ Mode mode,
+ CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(function);
ZoneList<Statement*>* body = function->body();
@@ -289,7 +296,7 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
set_in_spilled_code(false);
// Adjust for function-level loop nesting.
- loop_nesting_ += function->loop_nesting();
+ loop_nesting_ += info->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
@@ -313,96 +320,106 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
// rdi: called JS function
// rsi: callee's context
allocator_->Initialize();
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
- // Allocate the local context if needed.
- int heap_slots = scope_->num_heap_slots();
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ if (mode == PRIMARY) {
+ frame_->Enter();
+
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+
+ // Allocate the local context if needed.
+ int heap_slots = scope_->num_heap_slots();
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ allocate local context");
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ frame_->PushFunction();
+ Result context;
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ context = frame_->CallStub(&stub, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
- // Update context local.
- frame_->SaveContextRegister();
+ // Update context local.
+ frame_->SaveContextRegister();
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
+ // Verify that the runtime call result and rsi agree.
+ if (FLAG_debug_code) {
+ __ cmpq(context.reg(), rsi);
+ __ Assert(equal, "Runtime::NewContext should end up in rsi");
+ }
}
- }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope_->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ movq(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // The use of SlotOperand below is safe in unspilled code
+ // because the slot is guaranteed to be a context slot.
+ //
+ // There are no parameters in the global scope.
+ ASSERT(!scope_->is_global_scope());
+ frame_->PushParameterAt(i);
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ // SlotOperand loads context.reg() with the context object
+ // stored to, used below in RecordWrite.
+ Result context = allocator_->Allocate();
+ ASSERT(context.is_valid());
+ __ movq(SlotOperand(slot, context.reg()), value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ frame_->Spill(context.reg());
+ frame_->Spill(value.reg());
+ __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+ }
}
}
- }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
- // Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+ } else {
+ // When used as the secondary compiler for splitting, rbp, rsi,
+ // and rdi have been pushed on the stack. Adjust the virtual
+ // frame to match this state.
+ frame_->Adjust(3);
+ allocator_->Unuse(rdi);
}
+ // Initialize the function return target after the locals are set
+ // up, because it needs the expected frame height from the frame.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -467,7 +484,7 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
}
// Adjust for function-level loop nesting.
- loop_nesting_ -= function->loop_nesting();
+ loop_nesting_ -= info->loop_nesting();
// Code generation state must be reset.
ASSERT(state_ == NULL);
@@ -2207,7 +2224,9 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Spill everything, even constants, to the frame.
frame_->SpillAll();
- frame_->CallRuntime(Runtime::kDebugBreak, 0);
+
+ DebuggerStatementStub ces;
+ frame_->CallStub(&ces, 0);
// Ignore the return value.
#endif
}
@@ -2432,8 +2451,6 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(property->value());
frame_->Push(key);
Result ignored = frame_->CallStoreIC();
- // Drop the duplicated receiver and ignore the result.
- frame_->Drop();
break;
}
// Fall through
@@ -3955,7 +3972,8 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
Load(args->at(1));
Load(args->at(2));
Load(args->at(3));
- Result result = frame_->CallRuntime(Runtime::kRegExpExec, 4);
+ RegExpExecStub stub;
+ Result result = frame_->CallStub(&stub, 4);
frame_->Push(&result);
}
@@ -5117,11 +5135,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
Result answer;
if (left_is_non_smi || right_is_non_smi) {
- // Go straight to the slow case, with no smi code
- frame_->Push(&left);
- frame_->Push(&right);
GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
} else if (right_is_smi) {
answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
type, false, overwrite_mode);
@@ -5137,10 +5152,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
} else {
- frame_->Push(&left);
- frame_->Push(&right);
GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
- answer = frame_->CallStub(&stub, 2);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
}
}
frame_->Push(&answer);
@@ -6009,6 +6022,7 @@ void Reference::SetValue(InitState init_state) {
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->StoreToSlot(slot, init_state);
+ cgen_->UnloadReference(this);
break;
}
@@ -6017,6 +6031,7 @@ void Reference::SetValue(InitState init_state) {
cgen_->frame()->Push(GetName());
Result answer = cgen_->frame()->CallStoreIC();
cgen_->frame()->Push(&answer);
+ set_unloaded();
break;
}
@@ -6118,13 +6133,13 @@ void Reference::SetValue(InitState init_state) {
masm->nop();
cgen_->frame()->Push(&answer);
}
+ cgen_->UnloadReference(this);
break;
}
default:
UNREACHABLE();
}
- cgen_->UnloadReference(this);
}
@@ -6563,6 +6578,363 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
}
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifndef V8_NATIVE_REGEXP
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+#else // V8_NATIVE_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[8]: last_match_info (expected JSArray)
+ // esp[16]: previous index
+ // esp[24]: subject string
+ // esp[32]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ __ testq(kScratchRegister, kScratchRegister);
+ __ j(zero, &runtime);
+
+
+ // Check that the first argument is a JSRegExp object.
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ Condition is_smi = masm->CheckSmi(rcx);
+ __ Check(NegateCondition(is_smi),
+ "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // rcx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+ __ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP));
+ __ j(not_equal, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
+ __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
+ // Check that the static offsets vector buffer is large enough.
+ __ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ j(above, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the second argument is a string.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ JumpIfSmi(rax, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
+ // Get the length of the string to rbx.
+ __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+
+ // rbx: Length of subject string
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the third argument is a positive smi less than the string
+ // length. A negative value will be greater (usigned comparison).
+ __ movq(rax, Operand(rsp, kPreviousIndexOffset));
+ __ SmiToInteger32(rax, rax);
+ __ cmpl(rax, rbx);
+ __ j(above, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+ __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ Cmp(rax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information. Ensure no overflow in add.
+ ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
+ __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmpl(rdx, rax);
+ __ j(greater, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_string, seq_two_byte_string, check_code;
+ const int kStringRepresentationEncodingMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
+ // First check for sequential string.
+ ASSERT_EQ(0, kStringTag);
+ ASSERT_EQ(0, kSeqStringTag);
+ __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ __ movl(rdx, rbx);
+ __ andb(rdx, Immediate(kStringRepresentationMask));
+ __ cmpb(rdx, Immediate(kConsStringTag));
+ __ j(not_equal, &runtime);
+ __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
+ __ Cmp(rdx, Factory::empty_string());
+ __ j(not_equal, &runtime);
+ __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ ASSERT_EQ(0, kSeqStringTag);
+ __ testb(rbx, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &runtime);
+ __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
+
+ __ bind(&seq_string);
+ // rax: subject string (sequential either ascii to two byte)
+ // rbx: suject string type & kStringRepresentationEncodingMask
+ // rcx: RegExp data (FixedArray)
+ // Check that the irregexp code has been generated for an ascii string. If
+ // it has, the field contains a code object otherwise it contains the hole.
+ __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kTwoByteStringTag));
+ __ j(equal, &seq_two_byte_string);
+ if (FLAG_debug_code) {
+ __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ __ Check(equal, "Expected sequential ascii string");
+ }
+ __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(rdi, 1); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // rax: subject string
+ // rcx: RegExp data (FixedArray)
+ __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(rdi, 0); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CmpObjectType(r12, CODE_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+
+ // rax: subject string
+ // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r12: code
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ SmiToInteger64(rbx, rbx); // Previous index from smi.
+
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r12: code
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
+
+ // rsi is caller save on Windows and used to pass parameter on Linux.
+ __ push(rsi);
+
+ static const int kRegExpExecuteArguments = 7;
+ __ PrepareCallCFunction(kRegExpExecuteArguments);
+ int argument_slots_on_stack =
+ masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ Immediate(1));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
+ __ movq(r9, Operand(kScratchRegister, 0));
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ addq(r9, Operand(kScratchRegister, 0));
+ // Argument 6 passed in r9 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
+#endif
+
+ // Argument 5: static offsets vector buffer.
+ __ movq(r8, ExternalReference::address_of_static_offsets_vector());
+ // Argument 5 passed in r8 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
+#endif
+
+ // First four arguments are passed in registers on both Linux and Windows.
+#ifdef _WIN64
+ Register arg4 = r9;
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg4 = rcx;
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+
+ // Keep track on aliasing between argX defined above and the registers used.
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r12: code
+
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label setup_two_byte, setup_rest;
+ __ testb(rdi, rdi);
+ __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ j(zero, &setup_two_byte);
+ __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ jmp(&setup_rest);
+ __ bind(&setup_two_byte);
+ __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
+
+ __ bind(&setup_rest);
+ // Argument 2: Previous index.
+ __ movq(arg2, rbx);
+
+ // Argument 1: Subject string.
+ __ movq(arg1, rax);
+
+ // Locate the code entry and call it.
+ __ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r12, kRegExpExecuteArguments);
+
+ // rsi is caller save, as it is used to pass parameter.
+ __ pop(rsi);
+
+ // Check the result.
+ Label success;
+ __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
+ __ j(equal, &success);
+ Label failure;
+ __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
+ __ j(equal, &failure);
+ __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592) Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ Cmp(kScratchRegister, Factory::the_hole_value());
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ Move(rax, Factory::null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
+ __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
+
+ // rdx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+
+ // rbx: last_match_info backing store (FixedArray)
+ // rdx: number of capture registers
+ // Store the capture count.
+ __ Integer32ToSmi(kScratchRegister, rdx);
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ kScratchRegister);
+ // Store last subject and last input.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
+
+ // rbx: last_match_info backing store (FixedArray)
+ // rcx: offsets vector
+ // rdx: number of capture registers
+ Label next_capture, done;
+ __ movq(rax, Operand(rsp, kPreviousIndexOffset));
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ subq(rdx, Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer and make it a smi.
+ __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
+ __ Integer32ToSmi(rdi, rdi, &runtime);
+ // Add previous index (from its stack slot) if value is not negative.
+ Label capture_negative;
+ // Negative flag set by smi convertion above.
+ __ j(negative, &capture_negative);
+ __ SmiAdd(rdi, rdi, rax, &runtime); // Add previous index.
+ __ bind(&capture_negative);
+ // Store the smi value in the last match info.
+ __ movq(FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ rdi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+#endif // V8_NATIVE_REGEXP
+}
+
+
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
@@ -6972,23 +7344,6 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
}
-int CEntryStub::MinorKey() {
- ASSERT(result_size_ <= 2);
-#ifdef _WIN64
- // Simple results returned in rax (using default code).
- // Complex results must be written to address passed as first argument.
- // Use even numbers for minor keys, reserving the odd numbers for
- // CEntryDebugBreakStub.
- return (result_size_ < 2) ? 0 : result_size_ * 2;
-#else
- // Single results returned in rax (both AMD64 and Win64 calling conventions)
- // and a struct of two pointers in rax+rdx (AMD64 calling convention only)
- // by default.
- return 0;
-#endif
-}
-
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Check that stack should contain next handler, frame pointer, state and
// return address in that order.
@@ -7022,7 +7377,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
@@ -7034,6 +7388,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
+ // Simple results returned in rax (both AMD64 and Win64 calling conventions).
+ // Complex results must be written to address passed as first argument.
+ // AMD64 calling convention: a struct of two pointers in rax+rdx
+
if (do_gc) {
// Pass failure code returned from last attempt as first argument to GC.
#ifdef _WIN64
@@ -7105,7 +7463,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode, result_size_);
+ __ LeaveExitFrame(mode_, result_size_);
__ ret(0);
// Handling of failure.
@@ -7249,7 +7607,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
-void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+void CEntryStub::Generate(MacroAssembler* masm) {
// rax: number of arguments including receiver
// rbx: pointer to C function (C callee-saved)
// rbp: frame pointer of calling JS frame (restored after C call)
@@ -7261,12 +7619,8 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// this by performing a garbage collection and retrying the
// builtin once.
- ExitFrame::Mode mode = is_debug_break ?
- ExitFrame::MODE_DEBUG :
- ExitFrame::MODE_NORMAL;
-
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode, result_size_);
+ __ EnterExitFrame(mode_, result_size_);
// rax: Holds the context at this point, but should not be used.
// On entry to code generated by GenerateCore, it must hold
@@ -7289,7 +7643,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
false,
false);
@@ -7298,7 +7651,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
false);
@@ -7309,7 +7661,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
true);
@@ -7501,39 +7852,20 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
XMMRegister dst1,
XMMRegister dst2) {
- __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
+ __ movq(kScratchRegister, rdx);
LoadFloatOperand(masm, kScratchRegister, dst1);
- __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, rax);
LoadFloatOperand(masm, kScratchRegister, dst2);
}
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(kScratchRegister, &load_smi_1);
- __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ JumpIfSmi(kScratchRegister, &load_smi_2);
- __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_1);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
- __ fild_s(Operand(rsp, 0));
- __ pop(kScratchRegister);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
- __ fild_s(Operand(rsp, 0));
- __ pop(kScratchRegister);
-
- __ bind(&done);
+void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm,
+ XMMRegister dst1,
+ XMMRegister dst2) {
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(dst1, kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(dst2, kScratchRegister);
}
@@ -7787,94 +8119,188 @@ void GenericBinaryOpStub::GenerateCall(
}
+Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right) {
+ if (ArgsInRegistersSupported()) {
+ SetArgsInRegisters();
+ return frame->CallStub(this, left, right);
+ } else {
+ frame->Push(left);
+ frame->Push(right);
+ return frame->CallStub(this, 2);
+ }
+}
+
+
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // Perform fast-case smi code for the operation (rax <op> rbx) and
- // leave result in register rax.
+ // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+ // dividend in eax and edx free for the division. Use eax, ebx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = rdx;
+ Register right = rax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = rax;
+ right = rbx;
+ if (HasArgsInRegisters()) {
+ __ movq(rbx, rax);
+ __ movq(rax, rdx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ movq(right, Operand(rsp, 1 * kPointerSize));
+ __ movq(left, Operand(rsp, 2 * kPointerSize));
+ }
- // Smi check both operands.
- __ JumpIfNotBothSmi(rax, rbx, slow);
+ // 2. Smi check both operands. Skip the check for OR as it is better combined
+ // with the actual operation.
+ Label not_smis;
+ if (op_ != Token::BIT_OR) {
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ __ JumpIfNotBothSmi(left, right, &not_smis);
+ }
+ // 3. Operands are both smis (except for OR), perform the operation leaving
+ // the result in rax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
switch (op_) {
case Token::ADD: {
- __ SmiAdd(rax, rax, rbx, slow);
+ ASSERT(right.is(rax));
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
break;
}
case Token::SUB: {
- __ SmiSub(rax, rax, rbx, slow);
+ __ SmiSub(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
break;
}
case Token::MUL:
- __ SmiMul(rax, rax, rbx, slow);
+ ASSERT(right.is(rax));
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
break;
case Token::DIV:
- __ SmiDiv(rax, rax, rbx, slow);
+ ASSERT(left.is(rax));
+ __ SmiDiv(left, left, right, &use_fp_on_smis);
break;
case Token::MOD:
- __ SmiMod(rax, rax, rbx, slow);
+ ASSERT(left.is(rax));
+ __ SmiMod(left, left, right, slow);
break;
case Token::BIT_OR:
- __ SmiOr(rax, rax, rbx);
+ ASSERT(right.is(rax));
+ __ movq(rcx, right); // Save the right operand.
+ __ SmiOr(right, right, left); // BIT_OR is commutative.
+ __ testb(right, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smis);
break;
case Token::BIT_AND:
- __ SmiAnd(rax, rax, rbx);
+ ASSERT(right.is(rax));
+ __ SmiAnd(right, right, left); // BIT_AND is commutative.
break;
case Token::BIT_XOR:
- __ SmiXor(rax, rax, rbx);
+ ASSERT(right.is(rax));
+ __ SmiXor(right, right, left); // BIT_XOR is commutative.
break;
case Token::SHL:
case Token::SHR:
case Token::SAR:
- // Move the second operand into register rcx.
- __ movq(rcx, rbx);
- // Perform the operation.
switch (op_) {
case Token::SAR:
- __ SmiShiftArithmeticRight(rax, rax, rcx);
+ __ SmiShiftArithmeticRight(left, left, right);
break;
case Token::SHR:
- __ SmiShiftLogicalRight(rax, rax, rcx, slow);
+ __ SmiShiftLogicalRight(left, left, right, slow);
break;
case Token::SHL:
- __ SmiShiftLeft(rax, rax, rcx, slow);
+ __ SmiShiftLeft(left, left, right, slow);
break;
default:
UNREACHABLE();
}
+ __ movq(rax, left);
break;
default:
UNREACHABLE();
break;
}
+
+ // 4. Emit return of result in eax.
+ GenerateReturn(masm);
+
+ // 5. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV) {
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ }
+ // left is rdx, right is rax.
+ __ AllocateHeapNumber(rbx, rcx, slow);
+ FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm4, xmm5); break;
+ case Token::SUB: __ subsd(xmm4, xmm5); break;
+ case Token::MUL: __ mulsd(xmm4, xmm5); break;
+ case Token::DIV: __ divsd(xmm4, xmm5); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4);
+ __ movq(rax, rbx);
+ GenerateReturn(masm);
+ }
+ default:
+ break;
+ }
+
+ // 6. Non-smi operands, fall out to the non-smi code with the operands in
+ // rdx and rax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+
+ switch (op_) {
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in rax, rbx at this point.
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ break;
+
+ case Token::BIT_OR:
+ // Right operand is saved in rcx and rax was destroyed by the smi
+ // operation.
+ __ movq(rax, rcx);
+ break;
+
+ default:
+ break;
+ }
}
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
if (HasSmiCodeInStub()) {
- // The fast case smi code wasn't inlined in the stub caller
- // code. Generate it here to speed up common operations.
- Label slow;
- __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
- GenerateSmiCode(masm, &slow);
- GenerateReturn(masm);
-
- // Too bad. The fast case smi code didn't succeed.
- __ bind(&slow);
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) {
+ GenerateLoadArguments(masm);
}
-
- // Make sure the arguments are in rdx and rax.
- GenerateLoadArguments(masm);
-
// Floating point case.
switch (op_) {
case Token::ADD:
@@ -7885,12 +8311,34 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// rdx: x
FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
// Fast-case: Both operands are numbers.
+ // xmm4 and xmm5 are volatile XMM registers.
+ FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm4, xmm5); break;
+ case Token::SUB: __ subsd(xmm4, xmm5); break;
+ case Token::MUL: __ mulsd(xmm4, xmm5); break;
+ case Token::DIV: __ divsd(xmm4, xmm5); break;
+ default: UNREACHABLE();
+ }
// Allocate a heap number, if needed.
Label skip_allocation;
- switch (mode_) {
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
case OVERWRITE_LEFT:
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
__ movq(rax, rdx);
- // Fall through!
+ break;
case OVERWRITE_RIGHT:
// If the argument in rax is already an object, we skip the
// allocation of a heap number.
@@ -7905,16 +8353,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
break;
default: UNREACHABLE();
}
- // xmm4 and xmm5 are volatile XMM registers.
- FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm4, xmm5); break;
- case Token::SUB: __ subsd(xmm4, xmm5); break;
- case Token::MUL: __ mulsd(xmm4, xmm5); break;
- case Token::DIV: __ divsd(xmm4, xmm5); break;
- default: UNREACHABLE();
- }
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
GenerateReturn(masm);
}
@@ -7981,8 +8419,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
}
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
break;
}
default: UNREACHABLE(); break;
@@ -7992,9 +8428,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// result. If arguments was passed in registers now place them on the
// stack in the correct order below the return address.
__ bind(&call_runtime);
- if (HasArgumentsInRegisters()) {
+ if (HasArgsInRegisters()) {
__ pop(rcx);
- if (HasArgumentsReversed()) {
+ if (HasArgsReversed()) {
__ push(rax);
__ push(rdx);
} else {
@@ -8009,8 +8445,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_strings, both_strings, not_string1, string1;
Condition is_smi;
Result answer;
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // First argument.
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // Second argument.
is_smi = masm->CheckSmi(rdx);
__ j(is_smi, &not_string1);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
@@ -8028,7 +8462,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Only first argument is a string.
__ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+ __ InvokeBuiltin(
+ HasArgsReversed() ?
+ Builtins::STRING_ADD_RIGHT :
+ Builtins::STRING_ADD_LEFT,
+ JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(&not_string1);
@@ -8038,7 +8476,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(above_equal, &not_strings);
// Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+ __ InvokeBuiltin(
+ HasArgsReversed() ?
+ Builtins::STRING_ADD_LEFT :
+ Builtins::STRING_ADD_RIGHT,
+ JUMP_FUNCTION);
__ bind(&not_strings);
// Neither argument is a string.
@@ -8050,7 +8492,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
+ break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
@@ -8083,7 +8525,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
// If arguments are not passed in registers read them from the stack.
- if (!HasArgumentsInRegisters()) {
+ if (!HasArgsInRegisters()) {
__ movq(rax, Operand(rsp, 1 * kPointerSize));
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
}
@@ -8093,7 +8535,7 @@ void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
// If arguments are not passed in registers remove them from the stack before
// returning.
- if (!HasArgumentsInRegisters()) {
+ if (!HasArgsInRegisters()) {
__ ret(2 * kPointerSize); // Remove both operands
} else {
__ ret(0);
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 72c84162..a758e739 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -32,6 +32,7 @@ namespace v8 {
namespace internal {
// Forward declarations
+class CompilationInfo;
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
@@ -293,11 +294,21 @@ enum ArgumentsAllocationMode {
class CodeGenerator: public AstVisitor {
public:
+ // Compilation mode. Either the compiler is used as the primary
+ // compiler and needs to setup everything or the compiler is used as
+ // the secondary compiler for split compilation and has to handle
+ // bailouts.
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
- bool is_eval);
+ bool is_eval,
+ CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(FunctionLiteral* fun);
@@ -341,8 +352,7 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction
- CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
- virtual ~CodeGenerator() { delete masm_; }
+ CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
// Accessors
Scope* scope() const { return scope_; }
@@ -380,7 +390,7 @@ class CodeGenerator: public AstVisitor {
void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void GenCode(FunctionLiteral* fun);
+ void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
@@ -629,6 +639,7 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget;
friend class Reference;
friend class Result;
+ friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
@@ -667,6 +678,11 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+ Result GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right);
+
private:
Token::Value op_;
OverwriteMode mode_;
@@ -715,9 +731,8 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateReturn(MacroAssembler* masm);
bool ArgsInRegistersSupported() {
- return ((op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV))
- && flags_ != NO_SMI_CODE_IN_STUB;
+ return (op_ == Token::ADD) || (op_ == Token::SUB)
+ || (op_ == Token::MUL) || (op_ == Token::DIV);
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
@@ -726,8 +741,8 @@ class GenericBinaryOpStub: public CodeStub {
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgumentsInRegisters() { return args_in_registers_; }
- bool HasArgumentsReversed() { return args_reversed_; }
+ bool HasArgsInRegisters() { return args_in_registers_; }
+ bool HasArgsReversed() { return args_reversed_; }
};
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index bc88d466..261b16c0 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -68,7 +68,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ xor_(rax, rax); // No arguments (argc == 0).
__ movq(rbx, ExternalReference::debug_break());
- CEntryDebugBreakStub ceb;
+ CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
@@ -158,12 +158,13 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // REgister state for IC store call (from ic-x64.cc).
+ // Register state for IC store call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit() | rdx.bit(), false);
}
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
new file mode 100644
index 00000000..12b5653e
--- /dev/null
+++ b/src/x64/fast-codegen-x64.cc
@@ -0,0 +1,140 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+ // Offset 2 is due to return address and saved frame pointer.
+ int index = 2 + function()->scope()->num_parameters();
+ __ movq(reg, Operand(rbp, index * kPointerSize));
+}
+
+
+void FastCodeGenerator::EmitReceiverMapCheck() {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(this)\n");
+ }
+
+ EmitLoadReceiver(rdx);
+ __ JumpIfSmi(rdx, bailout());
+
+ ASSERT(has_receiver() && receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+ Handle<Map> map(object->map());
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), map);
+ __ j(not_equal, bailout());
+}
+
+
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+ // Compile global variable accesses as load IC calls. The only live
+ // registers are rsi (context) and possibly rdx (this). Both are also
+ // saved in the stack and rsi is preserved by the call.
+ __ push(CodeGenerator::GlobalObject());
+ __ Move(rcx, name);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ if (has_this_properties()) {
+ // Restore this.
+ EmitLoadReceiver(rdx);
+ } else {
+ __ nop(); // Not test rax, indicates IC has no inlined code at call site.
+ }
+}
+
+
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ receiver()->Lookup(*name, &lookup);
+
+ ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
+
+ // Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ movq(rcx, rdx); // Copy receiver for write barrier.
+ } else {
+ offset += FixedArray::kHeaderSize;
+ __ movq(rcx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ }
+ // Perform the store.
+ __ movq(FieldOperand(rcx, offset), rax);
+ // Preserve value from write barrier in case it's needed.
+ __ movq(rbx, rax);
+ __ RecordWrite(rcx, offset, rbx, rdi);
+}
+
+
+void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
+ ASSERT(function_ == NULL);
+ ASSERT(info_ == NULL);
+ function_ = fun;
+ info_ = info;
+
+ // Save the caller's frame pointer and set up our own.
+ Comment prologue_cmnt(masm(), ";; Prologue");
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi); // Context.
+ __ push(rdi); // Closure.
+ // Note that we keep a live register reference to esi (context) at this
+ // point.
+
+ // Receiver (this) is allocated to rdx if there are this properties.
+ if (has_this_properties()) EmitReceiverMapCheck();
+
+ VisitStatements(fun->body());
+
+ Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+
+ Comment epilogue_cmnt(masm(), ";; Epilogue");
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+
+ __ bind(&bailout_);
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 37551092..f5bbfafe 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -51,83 +51,91 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-x64.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
function_ = fun;
SetFunctionPosition(fun);
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = fun->scope()->num_stack_slots();
- if (locals_count == 1) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- } else if (locals_count > 1) {
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
+ if (mode == PRIMARY) {
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ __ push(rdi); // Callee's JS Function.
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = fun->scope()->num_stack_slots();
+ if (locals_count == 1) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ } else if (locals_count > 1) {
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < locals_count; i++) {
+ __ push(rdx);
+ }
}
}
- }
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
- __ CallRuntime(Runtime::kNewContext, 1);
- function_in_register = false;
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
-
- // Copy any necessary parameters into the context.
- int num_parameters = fun->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
- // Store it in the context
- __ movq(Operand(rsi, Context::SlotOffset(slot->index())), rax);
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in rdi.
+ __ push(rdi);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both rax and rsi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in rsi.
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ movq(rax, Operand(rbp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ movq(Operand(rsi, context_offset), rax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering rsi.
+ __ movq(rcx, rsi);
+ __ RecordWrite(rcx, context_offset, rax, rbx);
+ }
}
}
- }
- // Possibly allocate an arguments object.
- Variable* arguments = fun->scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Arguments object must be allocated after the context object, in
- // case the "arguments" or ".arguments" variables are in the context.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(rdi);
- } else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ // Possibly allocate an arguments object.
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Arguments object must be allocated after the context object, in
+ // case the "arguments" or ".arguments" variables are in the context.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(rdi);
+ } else {
+ __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ // The receiver is just before the parameters on the caller's stack.
+ __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ push(rdx);
+ __ Push(Smi::FromInt(fun->num_parameters()));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Store new arguments object in both "arguments" and ".arguments" slots.
+ __ movq(rcx, rax);
+ Move(arguments->slot(), rax, rbx, rdx);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, rcx, rbx, rdx);
}
- // The receiver is just before the parameters on the caller's stack.
- __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
- __ push(rdx);
- __ Push(Smi::FromInt(fun->num_parameters()));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- // Store new arguments object in both "arguments" and ".arguments" slots.
- __ movq(rcx, rax);
- Move(arguments->slot(), rax, rbx, rdx);
- Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, rcx, rbx, rdx);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -698,7 +706,8 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
result_register());
int offset = Context::SlotOffset(slot->index());
- __ RecordWrite(rsi, offset, result_register(), rcx);
+ __ movq(rbx, rsi);
+ __ RecordWrite(rbx, offset, result_register(), rcx);
}
break;
@@ -920,10 +929,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->handle()->IsSymbol()) {
VisitForValue(value, kAccumulator);
__ Move(rcx, key->handle());
+ __ movq(rdx, Operand(rsp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
- // StoreIC leaves the receiver on the stack.
break;
}
// Fall through.
@@ -1045,13 +1054,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in rax, variable name in
- // rcx, and the global object on the stack.
+ // rcx, and the global object in rdx.
__ Move(rcx, var->name());
- __ push(CodeGenerator::GlobalObject());
+ __ movq(rdx, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
- // Overwrite the global object on the stack with the result if needed.
- DropAndApply(1, context, rax);
+ Apply(context, rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
__ push(result_register()); // Value.
@@ -1111,6 +1119,11 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->handle());
+ if (expr->ends_initialization_block()) {
+ __ movq(rdx, Operand(rsp, 0));
+ } else {
+ __ pop(rdx);
+ }
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
__ nop();
@@ -1121,9 +1134,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ push(Operand(rsp, kPointerSize)); // Receiver is under value.
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(rax);
+ DropAndApply(1, context_, rax);
+ } else {
+ Apply(context_, rax);
}
-
- DropAndApply(1, context_, rax);
}
@@ -1474,8 +1488,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForValue(expr->expression(), kAccumulator);
Label no_conversion;
- Condition is_smi;
- is_smi = masm_->CheckSmi(result_register());
+ Condition is_smi = masm_->CheckSmi(result_register());
__ j(is_smi, &no_conversion);
__ push(result_register());
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
@@ -1484,6 +1497,44 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
+ case Token::SUB: {
+ Comment cmt(masm_, "[ UnaryOperation (SUB)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register rax.
+ VisitForValue(expr->expression(), kAccumulator);
+ __ CallStub(&stub);
+ Apply(context_, rax);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register rax.
+ VisitForValue(expr->expression(), kAccumulator);
+ // Avoid calling the stub for Smis.
+ Label smi, done;
+ Condition is_smi = masm_->CheckSmi(result_register());
+ __ j(is_smi, &smi);
+ // Non-smi: call stub leaving result in accumulator register.
+ __ CallStub(&stub);
+ __ jmp(&done);
+ // Perform operation directly on Smis.
+ __ bind(&smi);
+ __ SmiNot(result_register(), result_register());
+ __ bind(&done);
+ Apply(context_, result_register());
+ break;
+ }
+
default:
UNREACHABLE();
}
@@ -1588,12 +1639,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
// Call stub for +1/-1.
- __ push(rax);
- __ Push(Smi::FromInt(1));
GenericBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
+ stub.GenerateCall(masm_, rax, Smi::FromInt(1));
__ bind(&done);
// Store the value returned in rax.
@@ -1614,18 +1663,18 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->handle());
+ __ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// This nop signals to the IC that there is no inlined code at the call
// site for it to patch.
__ nop();
if (expr->is_postfix()) {
- __ Drop(1); // Result is on the stack under the receiver.
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
}
} else {
- DropAndApply(1, context_, rax);
+ Apply(context_, rax);
}
break;
}
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index e293247d..28bfd2ee 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -414,6 +414,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
+
GenerateGeneric(masm);
}
@@ -1080,7 +1081,6 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// rsp[(argc + 1) * 8] argument 0 = receiver
// rsp[(argc + 2) * 8] function name
// -----------------------------------
-
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack.
@@ -1178,7 +1178,6 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
-
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
@@ -1195,7 +1194,6 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
-
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
@@ -1243,7 +1241,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
-
Label miss, probe, global;
__ movq(rax, Operand(rsp, kPointerSize));
@@ -1291,7 +1288,6 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
-
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
@@ -1330,33 +1326,34 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
return true;
}
-void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
+
__ pop(rbx);
- __ push(Operand(rsp, 0)); // receiver
+ __ push(rdx); // receiver
__ push(rcx); // name
__ push(rax); // value
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : Map (target of map transition)
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
__ pop(rbx);
- __ push(Operand(rsp, 0)); // receiver
+ __ push(rdx); // receiver
__ push(rcx); // transition map
__ push(rax); // value
__ push(rbx); // return address
@@ -1370,19 +1367,18 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- __ movq(rdx, Operand(rsp, kPointerSize));
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+ GenerateMiss(masm);
}
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index b06b8c8a..96b45e84 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -178,6 +178,11 @@ void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
Register smi_index) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are rsi.
+ ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
+
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits.
@@ -186,6 +191,17 @@ void MacroAssembler::RecordWrite(Register object,
RecordWriteNonSmi(object, offset, value, smi_index);
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors. This clobbering repeats the
+ // clobbering done inside RecordWriteNonSmi but it's necessary to
+ // avoid having the fast case for smis leave the registers
+ // unchanged.
+ if (FLAG_debug_code) {
+ movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
}
@@ -194,6 +210,14 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
Register scratch,
Register smi_index) {
Label done;
+
+ if (FLAG_debug_code) {
+ Label okay;
+ JumpIfNotSmi(object, &okay);
+ Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
+ bind(&okay);
+ }
+
// Test that the object address is not in the new space. We cannot
// set remembered set bits in the new space.
movq(scratch, object);
@@ -243,6 +267,14 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
}
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(scratch, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
}
@@ -344,10 +376,14 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
return;
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
- CallStub(&stub);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ movq(rax, Immediate(num_arguments));
+ movq(rbx, ExternalReference(f));
+ CEntryStub ces(f->result_size);
+ CallStub(&ces);
}
@@ -1553,7 +1589,7 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- movzxbq(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
ASSERT(kNotStringTag != 0);
testb(instance_type, Immediate(kIsNotStringMask));
return zero;
@@ -2473,6 +2509,51 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
+int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
+ // On Windows stack slots are reserved by the caller for all arguments
+ // including the ones passed in registers. On Linux 6 arguments are passed in
+ // registers and the caller does not reserve stack slots for them.
+ ASSERT(num_arguments >= 0);
+#ifdef _WIN64
+ static const int kArgumentsWithoutStackSlot = 0;
+#else
+ static const int kArgumentsWithoutStackSlot = 6;
+#endif
+ return num_arguments > kArgumentsWithoutStackSlot ?
+ num_arguments - kArgumentsWithoutStackSlot : 0;
+}
+
+void MacroAssembler::PrepareCallCFunction(int num_arguments) {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ ASSERT(frame_alignment != 0);
+ ASSERT(num_arguments >= 0);
+ // Make stack end at alignment and allocate space for arguments and old rsp.
+ movq(kScratchRegister, rsp);
+ ASSERT(IsPowerOf2(frame_alignment));
+ int argument_slots_on_stack =
+ ArgumentStackSlotsForCFunctionCall(num_arguments);
+ subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
+ and_(rsp, Immediate(-frame_alignment));
+ movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ movq(rax, function);
+ CallCFunction(rax, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+ call(function);
+ ASSERT(OS::ActivationFrameAlignment() != 0);
+ ASSERT(num_arguments >= 0);
+ int argument_slots_on_stack =
+ ArgumentStackSlotsForCFunctionCall(num_arguments);
+ movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
+}
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 8d4a8f2e..2913274d 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -162,7 +162,8 @@ class MacroAssembler: public Assembler {
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
- // Only uses the low 32 bits of the src register.
+ // Only uses the low 32 bits of the src register. Sets the N and Z flags
+ // based on the value of the resulting integer.
void Integer32ToSmi(Register dst, Register src);
// Tag an integer value if possible, or jump the integer value cannot be
@@ -644,6 +645,26 @@ class MacroAssembler: public Assembler {
// Jump to a runtime routine.
void JumpToRuntime(const ExternalReference& ext, int result_size);
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, arguments must be stored in esp[0], esp[4],
+ // etc., not pushed. The argument count assumes all arguments are word sized.
+ // The number of slots reserved for arguments depends on platform. On Windows
+ // stack slots are reserved for the arguments passed in registers. On other
+ // platforms stack slots are only reserved for the arguments actually passed
+ // on the stack.
+ void PrepareCallCFunction(int num_arguments);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+
+ // Calculate the number of stack slots to reserve for arguments when calling a
+ // C function.
+ int ArgumentStackSlotsForCFunctionCall(int num_arguments);
// ---------------------------------------------------------------------------
// Utilities
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 6142ce3c..026301b2 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -329,14 +329,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
ASSERT(mode_ == UC16);
// Save important/volatile registers before calling C function.
#ifndef _WIN64
- // Callee save on Win64
+ // Caller save on Linux and callee save in Windows.
__ push(rsi);
__ push(rdi);
#endif
__ push(backtrack_stackpointer());
int num_arguments = 3;
- FrameAlign(num_arguments);
+ __ PrepareCallCFunction(num_arguments);
// Put arguments into parameter registers. Parameters are
// Address byte_offset1 - Address captured substring's start.
@@ -361,7 +361,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
#endif
ExternalReference compare =
ExternalReference::re_case_insensitive_compare_uc16();
- CallCFunction(compare, num_arguments);
+ __ CallCFunction(compare, num_arguments);
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_->CodeObject());
@@ -634,7 +634,6 @@ void RegExpMacroAssemblerX64::Fail() {
Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Finalize code - write the entry point code now we know how many
// registers we need.
-
// Entry code:
__ bind(&entry_label_);
// Start new stack frame.
@@ -671,6 +670,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ push(rbx); // Callee-save
#endif
+
__ push(Immediate(0)); // Make room for "input start - 1" constant.
__ push(Immediate(0)); // Make room for "at start" constant.
@@ -850,7 +850,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer())
int num_arguments = 2;
- FrameAlign(num_arguments);
+ __ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx.
// First argument, backtrack stackpointer, is already in rcx.
@@ -861,7 +861,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
#endif
ExternalReference grow_stack = ExternalReference::re_grow_stack();
- CallCFunction(grow_stack, num_arguments);
+ __ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ testq(rax, rax);
@@ -1030,7 +1030,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
// This function call preserves no register values. Caller should
// store anything volatile in a C call or overwritten by this function.
int num_arguments = 3;
- FrameAlign(num_arguments);
+ __ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Second argument: Code* of self. (Do this before overwriting r8).
__ movq(rdx, code_object_pointer());
@@ -1050,7 +1050,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state();
- CallCFunction(stack_check, num_arguments);
+ __ CallCFunction(stack_check, num_arguments);
}
@@ -1072,6 +1072,12 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
// If not real stack overflow the stack guard was used to interrupt
// execution for another purpose.
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
// Prepare for possible GC.
HandleScope handles;
Handle<Code> code_handle(re_code);
@@ -1266,45 +1272,6 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
}
-void RegExpMacroAssemblerX64::FrameAlign(int num_arguments) {
- // TODO(lrn): Since we no longer use the system stack arbitrarily (but we do
- // use it, e.g., for SafeCall), we know the number of elements on the stack
- // since the last frame alignment. We might be able to do this simpler then.
- int frameAlignment = OS::ActivationFrameAlignment();
- ASSERT(frameAlignment != 0);
- // Make stack end at alignment and make room for num_arguments pointers
- // (on Win64 only) and the original value of rsp.
- __ movq(kScratchRegister, rsp);
- ASSERT(IsPowerOf2(frameAlignment));
-#ifdef _WIN64
- // Allocate space for parameters and old rsp.
- __ subq(rsp, Immediate((num_arguments + 1) * kPointerSize));
- __ and_(rsp, Immediate(-frameAlignment));
- __ movq(Operand(rsp, num_arguments * kPointerSize), kScratchRegister);
-#else
- // Allocate space for old rsp.
- __ subq(rsp, Immediate(kPointerSize));
- __ and_(rsp, Immediate(-frameAlignment));
- __ movq(Operand(rsp, 0), kScratchRegister);
-#endif
-}
-
-
-void RegExpMacroAssemblerX64::CallCFunction(ExternalReference function,
- int num_arguments) {
- __ movq(rax, function);
- __ call(rax);
- ASSERT(OS::ActivationFrameAlignment() != 0);
-#ifdef _WIN64
- __ movq(rsp, Operand(rsp, num_arguments * kPointerSize));
-#else
- // All arguments passed in registers.
- ASSERT(num_arguments <= 6);
- __ pop(rsp);
-#endif
-}
-
-
void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
if (mode_ == ASCII) {
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index c17f2b87..6d139635 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -247,21 +247,6 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- inline void FrameAlign(int num_arguments);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by FrameAlign. The called function is not allowed to trigger a garbage
- // collection, since that might move the code and invalidate the return
- // address (unless this is somehow accounted for by the called function).
- inline void CallCFunction(ExternalReference function, int num_arguments);
-
MacroAssembler* masm_;
ZoneList<int> code_relative_fixup_positions_;
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 8d600a5c..693447b5 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -143,6 +143,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(holder);
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!Heap::InNewSpace(interceptor));
__ movq(kScratchRegister, Handle<Object>(interceptor),
RelocInfo::EMBEDDED_OBJECT);
__ push(kScratchRegister);
@@ -370,15 +371,47 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-static void LookupPostInterceptor(JSObject* holder,
- String* name,
- LookupResult* lookup) {
- holder->LocalLookupRealNamedProperty(name, lookup);
- if (lookup->IsNotFound()) {
- Object* proto = holder->GetPrototype();
- if (proto != Heap::null_value()) {
- proto->Lookup(name, lookup);
- }
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ if (lookup->IsValid() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
+ receiver,
+ reg,
+ scratch2,
+ holder,
+ miss);
}
}
@@ -518,51 +551,6 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
};
-template <class Compiler>
-static void CompileLoadInterceptor(Compiler* compiler,
- StubCompiler* stub_compiler,
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- stub_compiler->CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- if (lookup->IsValid() && lookup->IsCacheable()) {
- compiler->CompileCacheable(masm,
- stub_compiler,
- receiver,
- reg,
- scratch1,
- scratch2,
- holder,
- lookup,
- name,
- miss);
- } else {
- compiler->CompileRegular(masm,
- receiver,
- reg,
- scratch2,
- holder,
- miss);
- }
-}
-
-
class CallInterceptorCompiler BASE_EMBEDDED {
public:
explicit CallInterceptorCompiler(const ParameterCount& arguments)
@@ -631,7 +619,6 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments_,
@@ -1354,25 +1341,22 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ movq(rbx, Operand(rsp, 1 * kPointerSize));
-
// Check that the object isn't a smi.
- __ JumpIfSmi(rbx, &miss);
+ __ JumpIfSmi(rdx, &miss);
// Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Handle<Map>(object->map()));
__ j(not_equal, &miss);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+ __ CheckAccessGlobalProxy(rdx, rbx, &miss);
}
// Stub never generated for non-global objects that require access
@@ -1380,7 +1364,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ pop(rbx); // remove the return address
- __ push(Operand(rsp, 0)); // receiver
+ __ push(rdx); // receiver
__ Push(Handle<AccessorInfo>(callback)); // callback info
__ push(rcx); // name
__ push(rax); // value
@@ -1393,7 +1377,6 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ Move(rcx, Handle<String>(name)); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1409,21 +1392,18 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ movq(rbx, Operand(rsp, 1 * kPointerSize));
-
// Generate store field code. Trashes the name register.
GenerateStoreField(masm(),
Builtins::StoreIC_ExtendStorage,
object,
index,
transition,
- rbx, rcx, rdx,
+ rdx, rcx, rbx,
&miss);
// Handle store cache miss.
@@ -1442,25 +1422,22 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- // Get the object from the stack.
- __ movq(rbx, Operand(rsp, 1 * kPointerSize));
-
// Check that the object isn't a smi.
- __ JumpIfSmi(rbx, &miss);
+ __ JumpIfSmi(rdx, &miss);
// Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Handle<Map>(receiver->map()));
__ j(not_equal, &miss);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+ __ CheckAccessGlobalProxy(rdx, rbx, &miss);
}
// Stub never generated for non-global objects that require access
@@ -1468,7 +1445,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
__ pop(rbx); // remove the return address
- __ push(Operand(rsp, 0)); // receiver
+ __ push(rdx); // receiver
__ push(rcx); // name
__ push(rax); // value
__ push(rbx); // restore return address
@@ -1480,7 +1457,6 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Handle store cache miss.
__ bind(&miss);
- __ Move(rcx, Handle<String>(name)); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1495,14 +1471,13 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
// Check that the map of the global has not changed.
- __ movq(rbx, Operand(rsp, kPointerSize));
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Handle<Map>(object->map()));
__ j(not_equal, &miss);
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 6e84ed16..cb93d5d4 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1046,31 +1046,45 @@ Result VirtualFrame::CallConstructor(int arg_count) {
Result VirtualFrame::CallStoreIC() {
// Name, value, and receiver are on top of the frame. The IC
- // expects name in rcx, value in rax, and receiver on the stack. It
- // does not drop the receiver.
+ // expects name in rcx, value in rax, and receiver in edx.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Result name = Pop();
Result value = Pop();
- PrepareForCall(1, 0); // One stack arg, not callee-dropped.
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
- if (value.is_register() && value.reg().is(rcx)) {
- if (name.is_register() && name.reg().is(rax)) {
+ // Optimized for case in which name is a constant value.
+ if (name.is_register() && (name.reg().is(rdx) || name.reg().is(rax))) {
+ if (!is_used(rcx)) {
+ name.ToRegister(rcx);
+ } else if (!is_used(rbx)) {
+ name.ToRegister(rbx);
+ } else {
+ ASSERT(!is_used(rdi)); // Only three results are live, so rdi is free.
+ name.ToRegister(rdi);
+ }
+ }
+ // Now name is not in edx or eax, so we can fix them, then move name to ecx.
+ if (value.is_register() && value.reg().is(rdx)) {
+ if (receiver.is_register() && receiver.reg().is(rax)) {
// Wrong registers.
- __ xchg(rax, rcx);
+ __ xchg(rax, rdx);
} else {
- // Register rax is free for value, which frees rcx for name.
+ // Register rax is free for value, which frees rcx for receiver.
value.ToRegister(rax);
- name.ToRegister(rcx);
+ receiver.ToRegister(rdx);
}
} else {
- // Register rcx is free for name, which guarantees rax is free for
+ // Register rcx is free for receiver, which guarantees rax is free for
// value.
- name.ToRegister(rcx);
+ receiver.ToRegister(rdx);
value.ToRegister(rax);
}
-
+ // Receiver and value are in the right place, so rcx is free for name.
+ name.ToRegister(rcx);
name.Unuse();
value.Unuse();
+ receiver.Unuse();
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index a00097b5..f71b3258 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -1151,7 +1151,7 @@ THREADED_TEST(UndefinedIsNotEnumerable) {
v8::Handle<Script> call_recursively_script;
-static const int kTargetRecursionDepth = 300; // near maximum
+static const int kTargetRecursionDepth = 200; // near maximum
static v8::Handle<Value> CallScriptRecursivelyCall(const v8::Arguments& args) {
@@ -3602,6 +3602,37 @@ TEST(ApiUncaughtException) {
v8::V8::RemoveMessageListeners(ApiUncaughtExceptionTestListener);
}
+static const char* script_resource_name = "ExceptionInNativeScript.js";
+static void ExceptionInNativeScriptTestListener(v8::Handle<v8::Message> message,
+ v8::Handle<Value>) {
+ v8::Handle<v8::Value> name_val = message->GetScriptResourceName();
+ CHECK(!name_val.IsEmpty() && name_val->IsString());
+ v8::String::AsciiValue name(message->GetScriptResourceName());
+ CHECK_EQ(script_resource_name, *name);
+ CHECK_EQ(3, message->GetLineNumber());
+ v8::String::AsciiValue source_line(message->GetSourceLine());
+ CHECK_EQ(" new o.foo();", *source_line);
+}
+
+TEST(ExceptionInNativeScript) {
+ v8::HandleScope scope;
+ LocalContext env;
+ v8::V8::AddMessageListener(ExceptionInNativeScriptTestListener);
+
+ Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(TroubleCallback);
+ v8::Local<v8::Object> global = env->Global();
+ global->Set(v8_str("trouble"), fun->GetFunction());
+
+ Script::Compile(v8_str("function trouble() {\n"
+ " var o = {};\n"
+ " new o.foo();\n"
+ "};"), v8::String::New(script_resource_name))->Run();
+ Local<Value> trouble = global->Get(v8_str("trouble"));
+ CHECK(trouble->IsFunction());
+ Function::Cast(*trouble)->Call(global, 0, NULL);
+ v8::V8::RemoveMessageListeners(ExceptionInNativeScriptTestListener);
+}
+
TEST(CompilationErrorUsingTryCatchHandler) {
v8::HandleScope scope;
@@ -4093,6 +4124,65 @@ THREADED_TEST(AccessControl) {
}
+static bool GetOwnPropertyNamesNamedBlocker(Local<v8::Object> global,
+ Local<Value> name,
+ v8::AccessType type,
+ Local<Value> data) {
+ return false;
+}
+
+
+static bool GetOwnPropertyNamesIndexedBlocker(Local<v8::Object> global,
+ uint32_t key,
+ v8::AccessType type,
+ Local<Value> data) {
+ return false;
+}
+
+
+THREADED_TEST(AccessControlGetOwnPropertyNames) {
+ v8::HandleScope handle_scope;
+ v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New();
+
+ obj_template->Set(v8_str("x"), v8::Integer::New(42));
+ obj_template->SetAccessCheckCallbacks(GetOwnPropertyNamesNamedBlocker,
+ GetOwnPropertyNamesIndexedBlocker);
+
+ // Create an environment
+ v8::Persistent<Context> context0 = Context::New(NULL, obj_template);
+ context0->Enter();
+
+ v8::Handle<v8::Object> global0 = context0->Global();
+
+ v8::HandleScope scope1;
+
+ v8::Persistent<Context> context1 = Context::New();
+ context1->Enter();
+
+ v8::Handle<v8::Object> global1 = context1->Global();
+ global1->Set(v8_str("other"), global0);
+ global1->Set(v8_str("object"), obj_template->NewInstance());
+
+ v8::Handle<Value> value;
+
+ // Attempt to get the property names of the other global object and
+ // of an object that requires access checks. Accessing the other
+ // global object should be blocked by access checks on the global
+ // proxy object. Accessing the object that requires access checks
+ // is blocked by the access checks on the object itself.
+ value = CompileRun("Object.getOwnPropertyNames(other).length == 0");
+ CHECK(value->IsTrue());
+
+ value = CompileRun("Object.getOwnPropertyNames(object).length == 0");
+ CHECK(value->IsTrue());
+
+ context1->Exit();
+ context0->Exit();
+ context1.Dispose();
+ context0.Dispose();
+}
+
+
static v8::Handle<Value> ConstTenGetter(Local<String> name,
const AccessorInfo& info) {
return v8_num(10);
@@ -5675,6 +5765,35 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
}
+// Test the case when actual function to call sits on global object.
+THREADED_TEST(InterceptorCallICCachedFromGlobal) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ templ_o->SetNamedPropertyHandler(NoBlockGetterX);
+
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+
+ v8::Handle<Value> value = CompileRun(
+ "try {"
+ " o.__proto__ = this;"
+ " for (var i = 0; i < 10; i++) {"
+ " var v = o.parseFloat('239');"
+ " if (v != 239) throw v;"
+ // Now it should be ICed and keep a reference to parseFloat.
+ " }"
+ " var result = 0;"
+ " for (var i = 0; i < 10; i++) {"
+ " result += o.parseFloat('239');"
+ " }"
+ " result"
+ "} catch(e) {"
+ " e"
+ "};");
+ CHECK_EQ(239 * 10, value->Int32Value());
+}
+
+
static int interceptor_call_count = 0;
static v8::Handle<Value> InterceptorICRefErrorGetter(Local<String> name,
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 583a9c2c..92e18e06 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -2047,6 +2047,33 @@ TEST(DebuggerStatement) {
}
+// Test setting a breakpoint on the debugger statement.
+TEST(DebuggerStatementBreakpoint) {
+ break_point_hit_count = 0;
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
+ v8::Undefined());
+ v8::Script::Compile(v8::String::New("function foo(){debugger;}"))->Run();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+
+ // The debugger statement triggers breakpint hit
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(1, break_point_hit_count);
+
+ int bp = SetBreakPoint(foo, 0);
+
+ // Set breakpoint does not duplicate hits
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, break_point_hit_count);
+
+ ClearBreakPoint(bp);
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
// Thest that the evaluation of expressions when a break point is hit generates
// the correct results.
TEST(DebugEvaluate) {
diff --git a/test/message/bugs/.svn/entries b/test/message/bugs/.svn/entries
deleted file mode 100644
index 4090291a..00000000
--- a/test/message/bugs/.svn/entries
+++ /dev/null
@@ -1,28 +0,0 @@
-8
-
-dir
-3722
-https://v8.googlecode.com/svn/branches/bleeding_edge/test/message/bugs
-https://v8.googlecode.com/svn
-
-
-
-2008-10-23T08:40:19.012798Z
-565
-sgjesse@chromium.org
-
-
-svn:special svn:externals svn:needs-lock
-
-
-
-
-
-
-
-
-
-
-
-ce2b1a6d-e550-0410-aec6-3dcde31c8c00
-
diff --git a/test/mjsunit/compiler/thisfunction.js b/test/mjsunit/compiler/thisfunction.js
index 2af846f3..098fc3a4 100644
--- a/test/mjsunit/compiler/thisfunction.js
+++ b/test/mjsunit/compiler/thisfunction.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --always_fast_compiler
+// Flags: --always-full-compiler
// Test reference to this-function.
diff --git a/test/mjsunit/debug-compile-event-newfunction.js b/test/mjsunit/debug-compile-event-newfunction.js
new file mode 100644
index 00000000..fb43a87f
--- /dev/null
+++ b/test/mjsunit/debug-compile-event-newfunction.js
@@ -0,0 +1,68 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null; // Exception in debug event listener.
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.AfterCompile) {
+ assertEquals(Debug.ScriptCompilationType.Eval,
+ event_data.script().compilationType(),
+ 'Wrong compilationType');
+ var evalFromScript = event_data.script().evalFromScript();
+ assertTrue(!!evalFromScript, ' evalFromScript ');
+ assertFalse(evalFromScript.isUndefined(), 'evalFromScript.isUndefined()');
+ assertTrue(/debug-compile-event-newfunction.js$/.test(
+ evalFromScript.name()),
+ 'Wrong eval from script name.');
+
+ var evalFromLocation = event_data.script().evalFromLocation();
+ assertTrue(!!evalFromLocation, 'evalFromLocation is undefined');
+ assertEquals(63, evalFromLocation.line);
+
+ // Check that the event can be serialized without exceptions.
+ var json = event_data.toJSONProtocol();
+ }
+ } catch (e) {
+ exception = e
+ }
+};
+
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Create a function from its body text. It will lead to an eval.
+new Function('arg1', 'return arg1 + 1;');
+
+assertNull(exception, "exception in listener");
+
+Debug.setListener(null);
diff --git a/test/mjsunit/debug-compile-event.js b/test/mjsunit/debug-compile-event.js
index 4804ac77..071183bf 100644
--- a/test/mjsunit/debug-compile-event.js
+++ b/test/mjsunit/debug-compile-event.js
@@ -107,7 +107,7 @@ compileSource('eval("a=2")');
source_count++; // Using eval causes additional compilation event.
compileSource('eval("eval(\'(function(){return a;})\')")');
source_count += 2; // Using eval causes additional compilation event.
-compileSource('JSON.parse("{a:1,b:2}")');
+compileSource('JSON.parse(\'{"a":1,"b":2}\')');
source_count++; // Using JSON.parse causes additional compilation event.
// Make sure that the debug event listener was invoked.
diff --git a/test/mjsunit/for.js b/test/mjsunit/for.js
new file mode 100644
index 00000000..0b715808
--- /dev/null
+++ b/test/mjsunit/for.js
@@ -0,0 +1,32 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test missing condition in for loop.
+for (var i = 0; ; i++) {
+ if (i > 100) break;
+}
+assertEquals(101, i);
diff --git a/test/mjsunit/json.js b/test/mjsunit/json.js
index 35e16340..56562e76 100644
--- a/test/mjsunit/json.js
+++ b/test/mjsunit/json.js
@@ -93,20 +93,46 @@ for (var p in this)
assertFalse(p == "JSON");
// Parse
-
assertEquals({}, JSON.parse("{}"));
+assertEquals({42:37}, JSON.parse('{"42":37}'));
assertEquals(null, JSON.parse("null"));
assertEquals(true, JSON.parse("true"));
assertEquals(false, JSON.parse("false"));
assertEquals("foo", JSON.parse('"foo"'));
assertEquals("f\no", JSON.parse('"f\\no"'));
+assertEquals("\b\f\n\r\t\"\u2028\/\\",
+ JSON.parse('"\\b\\f\\n\\r\\t\\"\\u2028\\/\\\\"'));
+assertEquals([1.1], JSON.parse("[1.1]"));
+assertEquals([1], JSON.parse("[1.0]"));
+
+assertEquals(0, JSON.parse("0"));
+assertEquals(1, JSON.parse("1"));
+assertEquals(0.1, JSON.parse("0.1"));
assertEquals(1.1, JSON.parse("1.1"));
-assertEquals(1, JSON.parse("1.0"));
-assertEquals(0.0000000003, JSON.parse("3e-10"));
+assertEquals(1.1, JSON.parse("1.100000"));
+assertEquals(1.111111, JSON.parse("1.111111"));
+assertEquals(-0, JSON.parse("-0"));
+assertEquals(-1, JSON.parse("-1"));
+assertEquals(-0.1, JSON.parse("-0.1"));
+assertEquals(-1.1, JSON.parse("-1.1"));
+assertEquals(-1.1, JSON.parse("-1.100000"));
+assertEquals(-1.111111, JSON.parse("-1.111111"));
+assertEquals(11, JSON.parse("1.1e1"));
+assertEquals(11, JSON.parse("1.1e+1"));
+assertEquals(0.11, JSON.parse("1.1e-1"));
+assertEquals(11, JSON.parse("1.1E1"));
+assertEquals(11, JSON.parse("1.1E+1"));
+assertEquals(0.11, JSON.parse("1.1E-1"));
+
assertEquals([], JSON.parse("[]"));
assertEquals([1], JSON.parse("[1]"));
assertEquals([1, "2", true, null], JSON.parse('[1, "2", true, null]'));
+assertEquals("", JSON.parse('""'));
+assertEquals(["", "", -0, ""], JSON.parse('[ "" , "" , -0, ""]'));
+assertEquals("", JSON.parse('""'));
+
+
function GetFilter(name) {
function Filter(key, value) {
return (key == name) ? undefined : value;
@@ -145,6 +171,64 @@ TestInvalid('function () { return 0; }');
TestInvalid("[1, 2");
TestInvalid('{"x": 3');
+// JavaScript number literals not valid in JSON.
+TestInvalid('[01]');
+TestInvalid('[.1]');
+TestInvalid('[1.]');
+TestInvalid('[1.e1]');
+TestInvalid('[-.1]');
+TestInvalid('[-1.]');
+
+// Plain invalid number literals.
+TestInvalid('-');
+TestInvalid('--1');
+TestInvalid('-1e');
+TestInvalid('1e--1]');
+TestInvalid('1e+-1');
+TestInvalid('1e-+1');
+TestInvalid('1e++1');
+
+// JavaScript string literals not valid in JSON.
+TestInvalid("'single quote'"); // Valid JavaScript
+TestInvalid('"\\a invalid escape"');
+TestInvalid('"\\v invalid escape"'); // Valid JavaScript
+TestInvalid('"\\\' invalid escape"'); // Valid JavaScript
+TestInvalid('"\\x42 invalid escape"'); // Valid JavaScript
+TestInvalid('"\\u202 invalid escape"');
+TestInvalid('"\\012 invalid escape"');
+TestInvalid('"Unterminated string');
+TestInvalid('"Unterminated string\\"');
+TestInvalid('"Unterminated string\\\\\\"');
+
+// Test bad JSON that would be good JavaScript (ES5).
+
+TestInvalid("{true:42}");
+TestInvalid("{false:42}");
+TestInvalid("{null:42}");
+TestInvalid("{'foo':42}");
+TestInvalid("{42:42}");
+TestInvalid("{0:42}");
+TestInvalid("{-1:42}");
+
+// Test for trailing garbage detection.
+
+TestInvalid('42 px');
+TestInvalid('42 .2');
+TestInvalid('42 2');
+TestInvalid('42 e1');
+TestInvalid('"42" ""');
+TestInvalid('"42" ""');
+TestInvalid('"" ""');
+TestInvalid('true ""');
+TestInvalid('false ""');
+TestInvalid('null ""');
+TestInvalid('null ""');
+TestInvalid('[] ""');
+TestInvalid('[true] ""');
+TestInvalid('{} ""');
+TestInvalid('{"x":true} ""');
+TestInvalid('"Garbage""After string"');
+
// Stringify
assertEquals("true", JSON.stringify(true));
@@ -196,12 +280,8 @@ assertEquals('{"y":6,"x":5}', JSON.stringify({x:5,y:6}, ['y', 'x']));
assertEquals(undefined, JSON.stringify(undefined));
assertEquals(undefined, JSON.stringify(function () { }));
-function checkIllegal(str) {
- assertThrows(function () { JSON.parse(str); }, SyntaxError);
-}
-
-checkIllegal('1); throw "foo"; (1');
+TestInvalid('1); throw "foo"; (1');
var x = 0;
eval("(1); x++; (1)");
-checkIllegal('1); x++; (1');
+TestInvalid('1); x++; (1');
diff --git a/test/mjsunit/math-round.js b/test/mjsunit/math-round.js
new file mode 100644
index 00000000..d80a1036
--- /dev/null
+++ b/test/mjsunit/math-round.js
@@ -0,0 +1,52 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertEquals(0, Math.round(0));
+assertEquals(-0, Math.round(-0));
+assertEquals(Infinity, Math.round(Infinity));
+assertEquals(-Infinity, Math.round(-Infinity));
+assertNaN(Math.round(NaN));
+
+assertEquals(1, Math.round(0.5));
+assertEquals(1, Math.round(0.7));
+assertEquals(1, Math.round(1));
+assertEquals(1, Math.round(1.1));
+assertEquals(1, Math.round(1.49999));
+assertEquals(1/-0, 1/Math.round(-0.5)); // Test for -0 result.
+assertEquals(-1, Math.round(-0.5000000000000001));
+assertEquals(-1, Math.round(-0.7));
+assertEquals(-1, Math.round(-1));
+assertEquals(-1, Math.round(-1.1));
+assertEquals(-1, Math.round(-1.49999));
+assertEquals(-1, Math.round(-1.5));
+
+assertEquals(9007199254740990, Math.round(9007199254740990));
+assertEquals(9007199254740991, Math.round(9007199254740991));
+assertEquals(-9007199254740990, Math.round(-9007199254740990));
+assertEquals(-9007199254740991, Math.round(-9007199254740991));
+assertEquals(Number.MAX_VALUE, Math.round(Number.MAX_VALUE));
+assertEquals(-Number.MAX_VALUE, Math.round(-Number.MAX_VALUE));
diff --git a/test/mjsunit/mirror-script.js b/test/mjsunit/mirror-script.js
index 3208f16c..8631028e 100644
--- a/test/mjsunit/mirror-script.js
+++ b/test/mjsunit/mirror-script.js
@@ -87,8 +87,8 @@ testScriptMirror(function(){}, 'mirror-script.js', 100, 2, 0);
testScriptMirror(Math.sin, 'native math.js', -1, 0, 0);
testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 87);
testScriptMirror(eval('(function(){\n })'), null, 2, 2, 1, '(function(){\n })', 88);
-testScriptMirror(%CompileString("({a:1,b:2})", true), null, 1, 2, 2, '({a:1,b:2})');
-testScriptMirror(%CompileString("({a:1,\n b:2})", true), null, 2, 2, 2, '({a:1,\n b:2})');
+testScriptMirror(%CompileString('{"a":1,"b":2}', true), null, 1, 2, 2, '{"a":1,"b":2}');
+testScriptMirror(%CompileString('{"a":1,\n "b":2}', true), null, 2, 2, 2, '{"a":1,\n "b":2}');
// Test taking slices of source.
var mirror = debug.MakeMirror(eval('(function(){\n 1;\n})')).script();
diff --git a/test/mjsunit/mjsunit.js b/test/mjsunit/mjsunit.js
index 8ced0119..07c4e7ef 100644
--- a/test/mjsunit/mjsunit.js
+++ b/test/mjsunit/mjsunit.js
@@ -75,6 +75,7 @@ function deepEquals(a, b) {
if (typeof a == "number" && typeof b == "number" && isNaN(a) && isNaN(b)) {
return true;
}
+ if (a == null || b == null) return false;
if (a.constructor === RegExp || b.constructor === RegExp) {
return (a.constructor === b.constructor) && (a.toString === b.toString);
}
diff --git a/test/mjsunit/regress/regress-crbug-3867.js b/test/mjsunit/regress/regress-crbug-3867.js
new file mode 100644
index 00000000..03001b6c
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-3867.js
@@ -0,0 +1,77 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function props(x) {
+ var result = [];
+ for (var p in x) result.push(p);
+ return result;
+}
+
+function A() {
+ this.a1 = 1234;
+ this.a2 = "D";
+ this.a3 = false;
+}
+
+function B() {
+ this.b3 = false;
+ this.b2 = "D";
+ this.b1 = 1234;
+}
+
+function C() {
+ this.c3 = false;
+ this.c1 = 1234;
+ this.c2 = "D";
+}
+
+assertArrayEquals(["a1", "a2", "a3"], props(new A()));
+assertArrayEquals(["b3", "b2", "b1"], props(new B()));
+assertArrayEquals(["c3", "c1", "c2"], props(new C()));
+assertArrayEquals(["s1", "s2", "s3"], props({s1: 0, s2: 0, s3: 0}));
+assertArrayEquals(["s3", "s2", "s1"], props({s3: 0, s2: 0, s1: 0}));
+assertArrayEquals(["s3", "s1", "s2"], props({s3: 0, s1: 0, s2: 0}));
+
+var a = new A()
+a.a0 = 0;
+a.a4 = 0;
+assertArrayEquals(["a1", "a2", "a3", "a0", "a4"], props(a));
+
+var b = new B()
+b.b4 = 0;
+b.b0 = 0;
+assertArrayEquals(["b3", "b2", "b1", "b4", "b0"], props(b));
+
+var o1 = {s1: 0, s2: 0, s3: 0}
+o1.s0 = 0;
+o1.s4 = 0;
+assertArrayEquals(["s1", "s2", "s3", "s0", "s4"], props(o1));
+
+var o2 = {s3: 0, s2: 0, s1: 0}
+o2.s4 = 0;
+o2.s0 = 0;
+assertArrayEquals(["s3", "s2", "s1", "s4", "s0"], props(o2));
diff --git a/test/mjsunit/tools/csvparser.js b/test/mjsunit/tools/csvparser.js
index db3a2eba..6ac49080 100644
--- a/test/mjsunit/tools/csvparser.js
+++ b/test/mjsunit/tools/csvparser.js
@@ -77,3 +77,7 @@ assertEquals(
assertEquals(
['code-creation','RegExp','0xf6c21c00','826','NccyrJroXvg\\/([^,]*)'],
parser.parseLine('code-creation,RegExp,0xf6c21c00,826,"NccyrJroXvg\\/([^,]*)"'));
+
+assertEquals(
+ ['code-creation','Function','0x42f0a0','163',''],
+ parser.parseLine('code-creation,Function,0x42f0a0,163,""'));
diff --git a/tools/csvparser.js b/tools/csvparser.js
index 9e58deae..6e101e20 100644
--- a/tools/csvparser.js
+++ b/tools/csvparser.js
@@ -39,17 +39,17 @@ devtools.profiler.CsvParser = function() {
/**
- * A regex for matching a trailing quote.
+ * A regex for matching a CSV field.
* @private
*/
-devtools.profiler.CsvParser.TRAILING_QUOTE_RE_ = /\"$/;
+devtools.profiler.CsvParser.CSV_FIELD_RE_ = /^"((?:[^"]|"")*)"|([^,]*)/;
/**
* A regex for matching a double quote.
* @private
*/
-devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_ = /\"\"/g;
+devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_ = /""/g;
/**
@@ -58,41 +58,26 @@ devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_ = /\"\"/g;
* @param {string} line Input line.
*/
devtools.profiler.CsvParser.prototype.parseLine = function(line) {
- var insideQuotes = false;
+ var fieldRe = devtools.profiler.CsvParser.CSV_FIELD_RE_;
+ var doubleQuoteRe = devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_;
+ var pos = 0;
+ var endPos = line.length;
var fields = [];
- var prevPos = 0;
- for (var i = 0, n = line.length; i < n; ++i) {
- switch (line.charAt(i)) {
- case ',':
- if (!insideQuotes) {
- fields.push(line.substring(prevPos, i));
- prevPos = i + 1;
- }
- break;
- case '"':
- if (!insideQuotes) {
- insideQuotes = true;
- // Skip the leading quote.
- prevPos++;
- } else {
- if (i + 1 < n && line.charAt(i + 1) != '"') {
- insideQuotes = false;
- } else {
- i++;
- }
- }
- break;
- }
- }
- if (n > 0) {
- fields.push(line.substring(prevPos));
- }
-
- for (i = 0; i < fields.length; ++i) {
- // Eliminate trailing quotes.
- fields[i] = fields[i].replace(devtools.profiler.CsvParser.TRAILING_QUOTE_RE_, '');
- // Convert quoted quotes into single ones.
- fields[i] = fields[i].replace(devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_, '"');
+ if (endPos > 0) {
+ do {
+ var fieldMatch = fieldRe.exec(line.substr(pos));
+ if (typeof fieldMatch[1] === "string") {
+ var field = fieldMatch[1];
+ pos += field.length + 3; // Skip comma and quotes.
+ fields.push(field.replace(doubleQuoteRe, '"'));
+ } else {
+ // The second field pattern will match anything, thus
+ // in the worst case the match will be an empty string.
+ var field = fieldMatch[2];
+ pos += field.length + 1; // Skip comma.
+ fields.push(field);
+ }
+ } while (pos <= endPos);
}
return fields;
};
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index acf51001..f2d1b98e 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -252,6 +252,8 @@
'../../src/counters.cc',
'../../src/counters.h',
'../../src/cpu.h',
+ '../../src/data-flow.cc',
+ '../../src/data-flow.h',
'../../src/dateparser.cc',
'../../src/dateparser.h',
'../../src/dateparser-inl.h',
@@ -410,6 +412,7 @@
'../../src/arm/cpu-arm.cc',
'../../src/arm/debug-arm.cc',
'../../src/arm/disasm-arm.cc',
+ '../../src/arm/fast-codegen-arm.cc',
'../../src/arm/frames-arm.cc',
'../../src/arm/frames-arm.h',
'../../src/arm/full-codegen-arm.cc',
@@ -448,6 +451,7 @@
'../../src/ia32/cpu-ia32.cc',
'../../src/ia32/debug-ia32.cc',
'../../src/ia32/disasm-ia32.cc',
+ '../../src/ia32/fast-codegen-ia32.cc',
'../../src/ia32/frames-ia32.cc',
'../../src/ia32/frames-ia32.h',
'../../src/ia32/full-codegen-ia32.cc',
@@ -477,6 +481,7 @@
'../../src/x64/cpu-x64.cc',
'../../src/x64/debug-x64.cc',
'../../src/x64/disasm-x64.cc',
+ '../../src/x64/fast-codegen-x64.cc',
'../../src/x64/frames-x64.cc',
'../../src/x64/frames-x64.h',
'../../src/x64/full-codegen-x64.cc',
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index f95f2279..e58e8ff3 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -337,6 +337,14 @@
>
</File>
<File
+ RelativePath="..\..\src\data-flow.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\data-flow.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\dateparser.cc"
>
</File>
@@ -388,6 +396,18 @@
RelativePath="..\..\src\factory.h"
>
</File>
+ <File
+ RelativePath="..\..\src\ia32\fast-codegen-ia32.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\fast-codegen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\fast-codegen.h"
+ >
+ </File>
<File
RelativePath="..\..\src\flags.cc"
>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index a8f17228..4b37b538 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -345,6 +345,14 @@
>
</File>
<File
+ RelativePath="..\..\src\data-flow.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\data-flow.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\dateparser.cc"
>
</File>
@@ -396,6 +404,18 @@
RelativePath="..\..\src\factory.h"
>
</File>
+ <File
+ RelativePath="..\..\src\arm\fast-codegen-arm.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\fast-codegen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\fast-codegen.h"
+ >
+ </File>
<File
RelativePath="..\..\src\flags.cc"
>
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
index 2c22280a..b6d5c7d8 100644
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ b/tools/visual_studio/v8_base_x64.vcproj
@@ -337,6 +337,14 @@
>
</File>
<File
+ RelativePath="..\..\src\data-flow.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\data-flow.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\dateparser.cc"
>
</File>
@@ -389,6 +397,18 @@
>
</File>
<File
+ RelativePath="..\..\src\x64\fast-codegen-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\fast-codegen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\fast-codegen.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\flags.cc"
>
</File>
@@ -425,15 +445,15 @@
>
</File>
<File
- RelativePath="..\..\src\full-codegen.cc"
+ RelativePath="..\..\src\x64\full-codegen-x64.cc"
>
</File>
<File
- RelativePath="..\..\src\full-codegen.h"
+ RelativePath="..\..\src\full-codegen.cc"
>
</File>
<File
- RelativePath="..\..\src\x64\full-codegen-x64.cc"
+ RelativePath="..\..\src\full-codegen.h"
>
</File>
<File