// Copyright 2012 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef V8_OBJECTS_H_ #define V8_OBJECTS_H_ #include "src/allocation.h" #include "src/assert-scope.h" #include "src/bailout-reason.h" #include "src/base/bits.h" #include "src/builtins.h" #include "src/checks.h" #include "src/elements-kind.h" #include "src/field-index.h" #include "src/flags.h" #include "src/list.h" #include "src/property-details.h" #include "src/smart-pointers.h" #include "src/unicode-inl.h" #include "src/zone.h" #if V8_TARGET_ARCH_ARM #include "src/arm/constants-arm.h" // NOLINT #elif V8_TARGET_ARCH_ARM64 #include "src/arm64/constants-arm64.h" // NOLINT #elif V8_TARGET_ARCH_MIPS #include "src/mips/constants-mips.h" // NOLINT #elif V8_TARGET_ARCH_MIPS64 #include "src/mips64/constants-mips64.h" // NOLINT #endif // // Most object types in the V8 JavaScript are described in this file. // // Inheritance hierarchy: // - Object // - Smi (immediate small integer) // - HeapObject (superclass for everything allocated in the heap) // - JSReceiver (suitable for property access) // - JSObject // - JSArray // - JSArrayBuffer // - JSArrayBufferView // - JSTypedArray // - JSDataView // - JSCollection // - JSSet // - JSMap // - JSSetIterator // - JSMapIterator // - JSWeakCollection // - JSWeakMap // - JSWeakSet // - JSRegExp // - JSFunction // - JSGeneratorObject // - JSModule // - GlobalObject // - JSGlobalObject // - JSBuiltinsObject // - JSGlobalProxy // - JSValue // - JSDate // - JSMessageObject // - JSProxy // - JSFunctionProxy // - FixedArrayBase // - ByteArray // - FixedArray // - DescriptorArray // - HashTable // - Dictionary // - StringTable // - CompilationCacheTable // - CodeCacheHashTable // - MapCache // - OrderedHashTable // - OrderedHashSet // - OrderedHashMap // - Context // - TypeFeedbackVector // - JSFunctionResultCache // - ScopeInfo // - TransitionArray // - FixedDoubleArray // - ExternalArray // - ExternalUint8ClampedArray // - ExternalInt8Array // - ExternalUint8Array // - ExternalInt16Array // - ExternalUint16Array // - ExternalInt32Array // - ExternalUint32Array // - ExternalFloat32Array // - Name // - String // - SeqString // - SeqOneByteString // - SeqTwoByteString // - SlicedString // - ConsString // - ExternalString // - ExternalOneByteString // - ExternalTwoByteString // - InternalizedString // - SeqInternalizedString // - SeqOneByteInternalizedString // - SeqTwoByteInternalizedString // - ConsInternalizedString // - ExternalInternalizedString // - ExternalOneByteInternalizedString // - ExternalTwoByteInternalizedString // - Symbol // - HeapNumber // - Cell // - PropertyCell // - Code // - Map // - Oddball // - Foreign // - SharedFunctionInfo // - Struct // - Box // - DeclaredAccessorDescriptor // - AccessorInfo // - DeclaredAccessorInfo // - ExecutableAccessorInfo // - AccessorPair // - AccessCheckInfo // - InterceptorInfo // - CallHandlerInfo // - TemplateInfo // - FunctionTemplateInfo // - ObjectTemplateInfo // - Script // - SignatureInfo // - TypeSwitchInfo // - DebugInfo // - BreakPointInfo // - CodeCache // // Formats of Object*: // Smi: [31 bit signed int] 0 // HeapObject: [32 bit direct pointer] (4 byte aligned) | 01 namespace v8 { namespace internal { class OStream; enum KeyedAccessStoreMode { STANDARD_STORE, STORE_TRANSITION_SMI_TO_OBJECT, STORE_TRANSITION_SMI_TO_DOUBLE, STORE_TRANSITION_DOUBLE_TO_OBJECT, STORE_TRANSITION_HOLEY_SMI_TO_OBJECT, STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE, STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT, STORE_AND_GROW_NO_TRANSITION, STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT, STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE, STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT, STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT, STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE, STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT, STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS, STORE_NO_TRANSITION_HANDLE_COW }; enum ContextualMode { NOT_CONTEXTUAL, CONTEXTUAL }; enum MutableMode { MUTABLE, IMMUTABLE }; static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION - STANDARD_STORE; STATIC_ASSERT(STANDARD_STORE == 0); STATIC_ASSERT(kGrowICDelta == STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT - STORE_TRANSITION_SMI_TO_OBJECT); STATIC_ASSERT(kGrowICDelta == STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE - STORE_TRANSITION_SMI_TO_DOUBLE); STATIC_ASSERT(kGrowICDelta == STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT - STORE_TRANSITION_DOUBLE_TO_OBJECT); static inline KeyedAccessStoreMode GetGrowStoreMode( KeyedAccessStoreMode store_mode) { if (store_mode < STORE_AND_GROW_NO_TRANSITION) { store_mode = static_cast( static_cast(store_mode) + kGrowICDelta); } return store_mode; } static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) { return store_mode > STANDARD_STORE && store_mode <= STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT && store_mode != STORE_AND_GROW_NO_TRANSITION; } static inline KeyedAccessStoreMode GetNonTransitioningStoreMode( KeyedAccessStoreMode store_mode) { if (store_mode >= STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) { return store_mode; } if (store_mode >= STORE_AND_GROW_NO_TRANSITION) { return STORE_AND_GROW_NO_TRANSITION; } return STANDARD_STORE; } static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) { return store_mode >= STORE_AND_GROW_NO_TRANSITION && store_mode <= STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT; } // Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER. enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER }; // Indicates whether a value can be loaded as a constant. enum StoreMode { ALLOW_AS_CONSTANT, FORCE_FIELD }; // PropertyNormalizationMode is used to specify whether to keep // inobject properties when normalizing properties of a JSObject. enum PropertyNormalizationMode { CLEAR_INOBJECT_PROPERTIES, KEEP_INOBJECT_PROPERTIES }; // Indicates how aggressively the prototype should be optimized. FAST_PROTOTYPE // will give the fastest result by tailoring the map to the prototype, but that // will cause polymorphism with other objects. REGULAR_PROTOTYPE is to be used // (at least for now) when dynamically modifying the prototype chain of an // object using __proto__ or Object.setPrototypeOf. enum PrototypeOptimizationMode { REGULAR_PROTOTYPE, FAST_PROTOTYPE }; // Indicates whether transitions can be added to a source map or not. enum TransitionFlag { INSERT_TRANSITION, OMIT_TRANSITION }; enum DebugExtraICState { DEBUG_BREAK, DEBUG_PREPARE_STEP_IN }; // Indicates whether the transition is simple: the target map of the transition // either extends the current map with a new property, or it modifies the // property that was added last to the current map. enum SimpleTransitionFlag { SIMPLE_TRANSITION, FULL_TRANSITION }; // Indicates whether we are only interested in the descriptors of a particular // map, or in all descriptors in the descriptor array. enum DescriptorFlag { ALL_DESCRIPTORS, OWN_DESCRIPTORS }; // The GC maintains a bit of information, the MarkingParity, which toggles // from odd to even and back every time marking is completed. Incremental // marking can visit an object twice during a marking phase, so algorithms that // that piggy-back on marking can use the parity to ensure that they only // perform an operation on an object once per marking phase: they record the // MarkingParity when they visit an object, and only re-visit the object when it // is marked again and the MarkingParity changes. enum MarkingParity { NO_MARKING_PARITY, ODD_MARKING_PARITY, EVEN_MARKING_PARITY }; // ICs store extra state in a Code object. The default extra state is // kNoExtraICState. typedef int ExtraICState; static const ExtraICState kNoExtraICState = 0; // Instance size sentinel for objects of variable size. const int kVariableSizeSentinel = 0; // We may store the unsigned bit field as signed Smi value and do not // use the sign bit. const int kStubMajorKeyBits = 7; const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1; // All Maps have a field instance_type containing a InstanceType. // It describes the type of the instances. // // As an example, a JavaScript object is a heap object and its map // instance_type is JS_OBJECT_TYPE. // // The names of the string instance types are intended to systematically // mirror their encoding in the instance_type field of the map. The default // encoding is considered TWO_BYTE. It is not mentioned in the name. ONE_BYTE // encoding is mentioned explicitly in the name. Likewise, the default // representation is considered sequential. It is not mentioned in the // name. The other representations (e.g. CONS, EXTERNAL) are explicitly // mentioned. Finally, the string is either a STRING_TYPE (if it is a normal // string) or a INTERNALIZED_STRING_TYPE (if it is a internalized string). // // NOTE: The following things are some that depend on the string types having // instance_types that are less than those of all other types: // HeapObject::Size, HeapObject::IterateBody, the typeof operator, and // Object::IsString. // // NOTE: Everything following JS_VALUE_TYPE is considered a // JSObject for GC purposes. The first four entries here have typeof // 'object', whereas JS_FUNCTION_TYPE has typeof 'function'. #define INSTANCE_TYPE_LIST(V) \ V(STRING_TYPE) \ V(ONE_BYTE_STRING_TYPE) \ V(CONS_STRING_TYPE) \ V(CONS_ONE_BYTE_STRING_TYPE) \ V(SLICED_STRING_TYPE) \ V(SLICED_ONE_BYTE_STRING_TYPE) \ V(EXTERNAL_STRING_TYPE) \ V(EXTERNAL_ONE_BYTE_STRING_TYPE) \ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \ V(SHORT_EXTERNAL_STRING_TYPE) \ V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE) \ V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \ \ V(INTERNALIZED_STRING_TYPE) \ V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \ V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \ \ V(SYMBOL_TYPE) \ \ V(MAP_TYPE) \ V(CODE_TYPE) \ V(ODDBALL_TYPE) \ V(CELL_TYPE) \ V(PROPERTY_CELL_TYPE) \ \ V(HEAP_NUMBER_TYPE) \ V(MUTABLE_HEAP_NUMBER_TYPE) \ V(FOREIGN_TYPE) \ V(BYTE_ARRAY_TYPE) \ V(FREE_SPACE_TYPE) \ /* Note: the order of these external array */ \ /* types is relied upon in */ \ /* Object::IsExternalArray(). */ \ V(EXTERNAL_INT8_ARRAY_TYPE) \ V(EXTERNAL_UINT8_ARRAY_TYPE) \ V(EXTERNAL_INT16_ARRAY_TYPE) \ V(EXTERNAL_UINT16_ARRAY_TYPE) \ V(EXTERNAL_INT32_ARRAY_TYPE) \ V(EXTERNAL_UINT32_ARRAY_TYPE) \ V(EXTERNAL_FLOAT32_ARRAY_TYPE) \ V(EXTERNAL_FLOAT64_ARRAY_TYPE) \ V(EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE) \ \ V(FIXED_INT8_ARRAY_TYPE) \ V(FIXED_UINT8_ARRAY_TYPE) \ V(FIXED_INT16_ARRAY_TYPE) \ V(FIXED_UINT16_ARRAY_TYPE) \ V(FIXED_INT32_ARRAY_TYPE) \ V(FIXED_UINT32_ARRAY_TYPE) \ V(FIXED_FLOAT32_ARRAY_TYPE) \ V(FIXED_FLOAT64_ARRAY_TYPE) \ V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \ \ V(FILLER_TYPE) \ \ V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE) \ V(DECLARED_ACCESSOR_INFO_TYPE) \ V(EXECUTABLE_ACCESSOR_INFO_TYPE) \ V(ACCESSOR_PAIR_TYPE) \ V(ACCESS_CHECK_INFO_TYPE) \ V(INTERCEPTOR_INFO_TYPE) \ V(CALL_HANDLER_INFO_TYPE) \ V(FUNCTION_TEMPLATE_INFO_TYPE) \ V(OBJECT_TEMPLATE_INFO_TYPE) \ V(SIGNATURE_INFO_TYPE) \ V(TYPE_SWITCH_INFO_TYPE) \ V(ALLOCATION_MEMENTO_TYPE) \ V(ALLOCATION_SITE_TYPE) \ V(SCRIPT_TYPE) \ V(CODE_CACHE_TYPE) \ V(POLYMORPHIC_CODE_CACHE_TYPE) \ V(TYPE_FEEDBACK_INFO_TYPE) \ V(ALIASED_ARGUMENTS_ENTRY_TYPE) \ V(BOX_TYPE) \ \ V(FIXED_ARRAY_TYPE) \ V(FIXED_DOUBLE_ARRAY_TYPE) \ V(CONSTANT_POOL_ARRAY_TYPE) \ V(SHARED_FUNCTION_INFO_TYPE) \ \ V(JS_MESSAGE_OBJECT_TYPE) \ \ V(JS_VALUE_TYPE) \ V(JS_DATE_TYPE) \ V(JS_OBJECT_TYPE) \ V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \ V(JS_GENERATOR_OBJECT_TYPE) \ V(JS_MODULE_TYPE) \ V(JS_GLOBAL_OBJECT_TYPE) \ V(JS_BUILTINS_OBJECT_TYPE) \ V(JS_GLOBAL_PROXY_TYPE) \ V(JS_ARRAY_TYPE) \ V(JS_ARRAY_BUFFER_TYPE) \ V(JS_TYPED_ARRAY_TYPE) \ V(JS_DATA_VIEW_TYPE) \ V(JS_PROXY_TYPE) \ V(JS_SET_TYPE) \ V(JS_MAP_TYPE) \ V(JS_SET_ITERATOR_TYPE) \ V(JS_MAP_ITERATOR_TYPE) \ V(JS_WEAK_MAP_TYPE) \ V(JS_WEAK_SET_TYPE) \ V(JS_REGEXP_TYPE) \ \ V(JS_FUNCTION_TYPE) \ V(JS_FUNCTION_PROXY_TYPE) \ V(DEBUG_INFO_TYPE) \ V(BREAK_POINT_INFO_TYPE) // Since string types are not consecutive, this macro is used to // iterate over them. #define STRING_TYPE_LIST(V) \ V(STRING_TYPE, kVariableSizeSentinel, string, String) \ V(ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, one_byte_string, \ OneByteString) \ V(CONS_STRING_TYPE, ConsString::kSize, cons_string, ConsString) \ V(CONS_ONE_BYTE_STRING_TYPE, ConsString::kSize, cons_one_byte_string, \ ConsOneByteString) \ V(SLICED_STRING_TYPE, SlicedString::kSize, sliced_string, SlicedString) \ V(SLICED_ONE_BYTE_STRING_TYPE, SlicedString::kSize, sliced_one_byte_string, \ SlicedOneByteString) \ V(EXTERNAL_STRING_TYPE, ExternalTwoByteString::kSize, external_string, \ ExternalString) \ V(EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize, \ external_one_byte_string, ExternalOneByteString) \ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, ExternalTwoByteString::kSize, \ external_string_with_one_byte_data, ExternalStringWithOneByteData) \ V(SHORT_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kShortSize, \ short_external_string, ShortExternalString) \ V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kShortSize, \ short_external_one_byte_string, ShortExternalOneByteString) \ V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \ ExternalTwoByteString::kShortSize, \ short_external_string_with_one_byte_data, \ ShortExternalStringWithOneByteData) \ \ V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string, \ InternalizedString) \ V(ONE_BYTE_INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, \ one_byte_internalized_string, OneByteInternalizedString) \ V(EXTERNAL_INTERNALIZED_STRING_TYPE, ExternalTwoByteString::kSize, \ external_internalized_string, ExternalInternalizedString) \ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \ external_one_byte_internalized_string, ExternalOneByteInternalizedString) \ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \ ExternalTwoByteString::kSize, \ external_internalized_string_with_one_byte_data, \ ExternalInternalizedStringWithOneByteData) \ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \ ExternalTwoByteString::kShortSize, short_external_internalized_string, \ ShortExternalInternalizedString) \ V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, \ ExternalOneByteString::kShortSize, \ short_external_one_byte_internalized_string, \ ShortExternalOneByteInternalizedString) \ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \ ExternalTwoByteString::kShortSize, \ short_external_internalized_string_with_one_byte_data, \ ShortExternalInternalizedStringWithOneByteData) // A struct is a simple object a set of object-valued fields. Including an // object type in this causes the compiler to generate most of the boilerplate // code for the class including allocation and garbage collection routines, // casts and predicates. All you need to define is the class, methods and // object verification routines. Easy, no? // // Note that for subtle reasons related to the ordering or numerical values of // type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST // manually. #define STRUCT_LIST(V) \ V(BOX, Box, box) \ V(DECLARED_ACCESSOR_DESCRIPTOR, \ DeclaredAccessorDescriptor, \ declared_accessor_descriptor) \ V(DECLARED_ACCESSOR_INFO, DeclaredAccessorInfo, declared_accessor_info) \ V(EXECUTABLE_ACCESSOR_INFO, ExecutableAccessorInfo, executable_accessor_info)\ V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \ V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \ V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \ V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \ V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \ V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \ V(SIGNATURE_INFO, SignatureInfo, signature_info) \ V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \ V(SCRIPT, Script, script) \ V(ALLOCATION_SITE, AllocationSite, allocation_site) \ V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \ V(CODE_CACHE, CodeCache, code_cache) \ V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \ V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \ V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \ V(DEBUG_INFO, DebugInfo, debug_info) \ V(BREAK_POINT_INFO, BreakPointInfo, break_point_info) // We use the full 8 bits of the instance_type field to encode heap object // instance types. The high-order bit (bit 7) is set if the object is not a // string, and cleared if it is a string. const uint32_t kIsNotStringMask = 0x80; const uint32_t kStringTag = 0x0; const uint32_t kNotStringTag = 0x80; // Bit 6 indicates that the object is an internalized string (if set) or not. // Bit 7 has to be clear as well. const uint32_t kIsNotInternalizedMask = 0x40; const uint32_t kNotInternalizedTag = 0x40; const uint32_t kInternalizedTag = 0x0; // If bit 7 is clear then bit 2 indicates whether the string consists of // two-byte characters or one-byte characters. const uint32_t kStringEncodingMask = 0x4; const uint32_t kTwoByteStringTag = 0x0; const uint32_t kOneByteStringTag = 0x4; // If bit 7 is clear, the low-order 2 bits indicate the representation // of the string. const uint32_t kStringRepresentationMask = 0x03; enum StringRepresentationTag { kSeqStringTag = 0x0, kConsStringTag = 0x1, kExternalStringTag = 0x2, kSlicedStringTag = 0x3 }; const uint32_t kIsIndirectStringMask = 0x1; const uint32_t kIsIndirectStringTag = 0x1; STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0); // NOLINT STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0); // NOLINT STATIC_ASSERT((kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag); // NOLINT STATIC_ASSERT((kSlicedStringTag & kIsIndirectStringMask) == kIsIndirectStringTag); // NOLINT // Use this mask to distinguish between cons and slice only after making // sure that the string is one of the two (an indirect string). const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag; STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask)); // If bit 7 is clear, then bit 3 indicates whether this two-byte // string actually contains one byte data. const uint32_t kOneByteDataHintMask = 0x08; const uint32_t kOneByteDataHintTag = 0x08; // If bit 7 is clear and string representation indicates an external string, // then bit 4 indicates whether the data pointer is cached. const uint32_t kShortExternalStringMask = 0x10; const uint32_t kShortExternalStringTag = 0x10; // A ConsString with an empty string as the right side is a candidate // for being shortcut by the garbage collector. We don't allocate any // non-flat internalized strings, so we do not shortcut them thereby // avoiding turning internalized strings into strings. The bit-masks // below contain the internalized bit as additional safety. // See heap.cc, mark-compact.cc and objects-visiting.cc. const uint32_t kShortcutTypeMask = kIsNotStringMask | kIsNotInternalizedMask | kStringRepresentationMask; const uint32_t kShortcutTypeTag = kConsStringTag | kNotInternalizedTag; static inline bool IsShortcutCandidate(int type) { return ((type & kShortcutTypeMask) == kShortcutTypeTag); } enum InstanceType { // String types. INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag | kInternalizedTag, ONE_BYTE_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kSeqStringTag | kInternalizedTag, EXTERNAL_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kExternalStringTag | kInternalizedTag, EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kExternalStringTag | kInternalizedTag, EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE = EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag | kInternalizedTag, SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE = EXTERNAL_INTERNALIZED_STRING_TYPE | kShortExternalStringTag | kInternalizedTag, SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE = EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kShortExternalStringTag | kInternalizedTag, SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE = EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE | kShortExternalStringTag | kInternalizedTag, STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag, ONE_BYTE_STRING_TYPE = ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag, CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag | kNotInternalizedTag, CONS_ONE_BYTE_STRING_TYPE = kOneByteStringTag | kConsStringTag | kNotInternalizedTag, SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag | kNotInternalizedTag, SLICED_ONE_BYTE_STRING_TYPE = kOneByteStringTag | kSlicedStringTag | kNotInternalizedTag, EXTERNAL_STRING_TYPE = EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag, EXTERNAL_ONE_BYTE_STRING_TYPE = EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag, EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE = EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE | kNotInternalizedTag, SHORT_EXTERNAL_STRING_TYPE = SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag, SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE = SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag, SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE = SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE | kNotInternalizedTag, // Non-string names SYMBOL_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE // Objects allocated in their own spaces (never in new space). MAP_TYPE, CODE_TYPE, ODDBALL_TYPE, CELL_TYPE, PROPERTY_CELL_TYPE, // "Data", objects that cannot contain non-map-word pointers to heap // objects. HEAP_NUMBER_TYPE, MUTABLE_HEAP_NUMBER_TYPE, FOREIGN_TYPE, BYTE_ARRAY_TYPE, FREE_SPACE_TYPE, EXTERNAL_INT8_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE EXTERNAL_UINT8_ARRAY_TYPE, EXTERNAL_INT16_ARRAY_TYPE, EXTERNAL_UINT16_ARRAY_TYPE, EXTERNAL_INT32_ARRAY_TYPE, EXTERNAL_UINT32_ARRAY_TYPE, EXTERNAL_FLOAT32_ARRAY_TYPE, EXTERNAL_FLOAT64_ARRAY_TYPE, EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, FIXED_FLOAT32_ARRAY_TYPE, FIXED_FLOAT64_ARRAY_TYPE, FIXED_UINT8_CLAMPED_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE FIXED_DOUBLE_ARRAY_TYPE, FILLER_TYPE, // LAST_DATA_TYPE // Structs. DECLARED_ACCESSOR_DESCRIPTOR_TYPE, DECLARED_ACCESSOR_INFO_TYPE, EXECUTABLE_ACCESSOR_INFO_TYPE, ACCESSOR_PAIR_TYPE, ACCESS_CHECK_INFO_TYPE, INTERCEPTOR_INFO_TYPE, CALL_HANDLER_INFO_TYPE, FUNCTION_TEMPLATE_INFO_TYPE, OBJECT_TEMPLATE_INFO_TYPE, SIGNATURE_INFO_TYPE, TYPE_SWITCH_INFO_TYPE, ALLOCATION_SITE_TYPE, ALLOCATION_MEMENTO_TYPE, SCRIPT_TYPE, CODE_CACHE_TYPE, POLYMORPHIC_CODE_CACHE_TYPE, TYPE_FEEDBACK_INFO_TYPE, ALIASED_ARGUMENTS_ENTRY_TYPE, BOX_TYPE, DEBUG_INFO_TYPE, BREAK_POINT_INFO_TYPE, FIXED_ARRAY_TYPE, CONSTANT_POOL_ARRAY_TYPE, SHARED_FUNCTION_INFO_TYPE, // All the following types are subtypes of JSReceiver, which corresponds to // objects in the JS sense. The first and the last type in this range are // the two forms of function. This organization enables using the same // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the // NONCALLABLE_JS_OBJECT range. JS_FUNCTION_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE JS_MESSAGE_OBJECT_TYPE, JS_DATE_TYPE, JS_OBJECT_TYPE, JS_CONTEXT_EXTENSION_OBJECT_TYPE, JS_GENERATOR_OBJECT_TYPE, JS_MODULE_TYPE, JS_GLOBAL_OBJECT_TYPE, JS_BUILTINS_OBJECT_TYPE, JS_GLOBAL_PROXY_TYPE, JS_ARRAY_TYPE, JS_ARRAY_BUFFER_TYPE, JS_TYPED_ARRAY_TYPE, JS_DATA_VIEW_TYPE, JS_SET_TYPE, JS_MAP_TYPE, JS_SET_ITERATOR_TYPE, JS_MAP_ITERATOR_TYPE, JS_WEAK_MAP_TYPE, JS_WEAK_SET_TYPE, JS_REGEXP_TYPE, JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE // Pseudo-types FIRST_TYPE = 0x0, LAST_TYPE = JS_FUNCTION_TYPE, FIRST_NAME_TYPE = FIRST_TYPE, LAST_NAME_TYPE = SYMBOL_TYPE, FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE, LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE, FIRST_NONSTRING_TYPE = SYMBOL_TYPE, // Boundaries for testing for an external array. FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_INT8_ARRAY_TYPE, LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE, // Boundaries for testing for a fixed typed array. FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE, LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_UINT8_CLAMPED_ARRAY_TYPE, // Boundary for promotion to old data space/old pointer space. LAST_DATA_TYPE = FILLER_TYPE, // Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy). // Note that there is no range for JSObject or JSProxy, since their subtypes // are not continuous in this enum! The enum ranges instead reflect the // external class names, where proxies are treated as either ordinary objects, // or functions. FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE, LAST_JS_RECEIVER_TYPE = LAST_TYPE, // Boundaries for testing the types represented as JSObject FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE, LAST_JS_OBJECT_TYPE = LAST_TYPE, // Boundaries for testing the types represented as JSProxy FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE, LAST_JS_PROXY_TYPE = JS_PROXY_TYPE, // Boundaries for testing whether the type is a JavaScript object. FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE, LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE, // Boundaries for testing the types for which typeof is "object". FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE, LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE, // Note that the types for which typeof is "function" are not continuous. // Define this so that we can put assertions on discrete checks. NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2 }; const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE - FIRST_EXTERNAL_ARRAY_TYPE + 1; STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType); STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType); STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType); STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType); #define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \ V(FAST_ELEMENTS_SUB_TYPE) \ V(DICTIONARY_ELEMENTS_SUB_TYPE) \ V(FAST_PROPERTIES_SUB_TYPE) \ V(DICTIONARY_PROPERTIES_SUB_TYPE) \ V(MAP_CODE_CACHE_SUB_TYPE) \ V(SCOPE_INFO_SUB_TYPE) \ V(STRING_TABLE_SUB_TYPE) \ V(DESCRIPTOR_ARRAY_SUB_TYPE) \ V(TRANSITION_ARRAY_SUB_TYPE) enum FixedArraySubInstanceType { #define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name, FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE) #undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE LAST_FIXED_ARRAY_SUB_TYPE = TRANSITION_ARRAY_SUB_TYPE }; enum CompareResult { LESS = -1, EQUAL = 0, GREATER = 1, NOT_EQUAL = GREATER }; #define DECL_BOOLEAN_ACCESSORS(name) \ inline bool name() const; \ inline void set_##name(bool value); \ #define DECL_ACCESSORS(name, type) \ inline type* name() const; \ inline void set_##name(type* value, \ WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \ #define DECLARE_CAST(type) \ INLINE(static type* cast(Object* object)); \ INLINE(static const type* cast(const Object* object)); class AccessorPair; class AllocationSite; class AllocationSiteCreationContext; class AllocationSiteUsageContext; class DictionaryElementsAccessor; class ElementsAccessor; class FixedArrayBase; class GlobalObject; class ObjectVisitor; class LookupIterator; class StringStream; class TypeFeedbackVector; // We cannot just say "class HeapType;" if it is created from a template... =8-? template class TypeImpl; struct HeapTypeConfig; typedef TypeImpl HeapType; // A template-ized version of the IsXXX functions. template inline bool Is(Object* obj); #ifdef VERIFY_HEAP #define DECLARE_VERIFIER(Name) void Name##Verify(); #else #define DECLARE_VERIFIER(Name) #endif #ifdef OBJECT_PRINT #define DECLARE_PRINTER(Name) void Name##Print(OStream& os); // NOLINT #else #define DECLARE_PRINTER(Name) #endif #define OBJECT_TYPE_LIST(V) \ V(Smi) \ V(HeapObject) \ V(Number) #define HEAP_OBJECT_TYPE_LIST(V) \ V(HeapNumber) \ V(MutableHeapNumber) \ V(Name) \ V(UniqueName) \ V(String) \ V(SeqString) \ V(ExternalString) \ V(ConsString) \ V(SlicedString) \ V(ExternalTwoByteString) \ V(ExternalOneByteString) \ V(SeqTwoByteString) \ V(SeqOneByteString) \ V(InternalizedString) \ V(Symbol) \ \ V(ExternalArray) \ V(ExternalInt8Array) \ V(ExternalUint8Array) \ V(ExternalInt16Array) \ V(ExternalUint16Array) \ V(ExternalInt32Array) \ V(ExternalUint32Array) \ V(ExternalFloat32Array) \ V(ExternalFloat64Array) \ V(ExternalUint8ClampedArray) \ V(FixedTypedArrayBase) \ V(FixedUint8Array) \ V(FixedInt8Array) \ V(FixedUint16Array) \ V(FixedInt16Array) \ V(FixedUint32Array) \ V(FixedInt32Array) \ V(FixedFloat32Array) \ V(FixedFloat64Array) \ V(FixedUint8ClampedArray) \ V(ByteArray) \ V(FreeSpace) \ V(JSReceiver) \ V(JSObject) \ V(JSContextExtensionObject) \ V(JSGeneratorObject) \ V(JSModule) \ V(Map) \ V(DescriptorArray) \ V(TransitionArray) \ V(TypeFeedbackVector) \ V(DeoptimizationInputData) \ V(DeoptimizationOutputData) \ V(DependentCode) \ V(FixedArray) \ V(FixedDoubleArray) \ V(ConstantPoolArray) \ V(Context) \ V(NativeContext) \ V(ScopeInfo) \ V(JSFunction) \ V(Code) \ V(Oddball) \ V(SharedFunctionInfo) \ V(JSValue) \ V(JSDate) \ V(JSMessageObject) \ V(StringWrapper) \ V(Foreign) \ V(Boolean) \ V(JSArray) \ V(JSArrayBuffer) \ V(JSArrayBufferView) \ V(JSTypedArray) \ V(JSDataView) \ V(JSProxy) \ V(JSFunctionProxy) \ V(JSSet) \ V(JSMap) \ V(JSSetIterator) \ V(JSMapIterator) \ V(JSWeakCollection) \ V(JSWeakMap) \ V(JSWeakSet) \ V(JSRegExp) \ V(HashTable) \ V(Dictionary) \ V(StringTable) \ V(JSFunctionResultCache) \ V(NormalizedMapCache) \ V(CompilationCacheTable) \ V(CodeCacheHashTable) \ V(PolymorphicCodeCacheHashTable) \ V(MapCache) \ V(Primitive) \ V(GlobalObject) \ V(JSGlobalObject) \ V(JSBuiltinsObject) \ V(JSGlobalProxy) \ V(UndetectableObject) \ V(AccessCheckNeeded) \ V(Cell) \ V(PropertyCell) \ V(ObjectHashTable) \ V(WeakHashTable) \ V(OrderedHashTable) // Object is the abstract superclass for all classes in the // object hierarchy. // Object does not use any virtual functions to avoid the // allocation of the C++ vtable. // Since both Smi and HeapObject are subclasses of Object no // data members can be present in Object. class Object { public: // Type testing. bool IsObject() const { return true; } #define IS_TYPE_FUNCTION_DECL(type_) INLINE(bool Is##type_() const); OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL) HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL) #undef IS_TYPE_FUNCTION_DECL // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas // a keyed store is of the form a[expression] = foo. enum StoreFromKeyed { MAY_BE_STORE_FROM_KEYED, CERTAINLY_NOT_STORE_FROM_KEYED }; INLINE(bool IsFixedArrayBase() const); INLINE(bool IsExternal() const); INLINE(bool IsAccessorInfo() const); INLINE(bool IsStruct() const); #define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \ INLINE(bool Is##Name() const); STRUCT_LIST(DECLARE_STRUCT_PREDICATE) #undef DECLARE_STRUCT_PREDICATE INLINE(bool IsSpecObject()) const; INLINE(bool IsSpecFunction()) const; INLINE(bool IsTemplateInfo()) const; INLINE(bool IsNameDictionary() const); INLINE(bool IsSeededNumberDictionary() const); INLINE(bool IsUnseededNumberDictionary() const); INLINE(bool IsOrderedHashSet() const); INLINE(bool IsOrderedHashMap() const); bool IsCallable() const; // Oddball testing. INLINE(bool IsUndefined() const); INLINE(bool IsNull() const); INLINE(bool IsTheHole() const); INLINE(bool IsException() const); INLINE(bool IsUninitialized() const); INLINE(bool IsTrue() const); INLINE(bool IsFalse() const); INLINE(bool IsArgumentsMarker() const); // Filler objects (fillers and free space objects). INLINE(bool IsFiller() const); // Extract the number. inline double Number(); INLINE(bool IsNaN() const); INLINE(bool IsMinusZero() const); bool ToInt32(int32_t* value); bool ToUint32(uint32_t* value); inline Representation OptimalRepresentation() { if (!FLAG_track_fields) return Representation::Tagged(); if (IsSmi()) { return Representation::Smi(); } else if (FLAG_track_double_fields && IsHeapNumber()) { return Representation::Double(); } else if (FLAG_track_computed_fields && IsUninitialized()) { return Representation::None(); } else if (FLAG_track_heap_object_fields) { DCHECK(IsHeapObject()); return Representation::HeapObject(); } else { return Representation::Tagged(); } } inline bool FitsRepresentation(Representation representation) { if (FLAG_track_fields && representation.IsNone()) { return false; } else if (FLAG_track_fields && representation.IsSmi()) { return IsSmi(); } else if (FLAG_track_double_fields && representation.IsDouble()) { return IsMutableHeapNumber() || IsNumber(); } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { return IsHeapObject(); } return true; } Handle OptimalType(Isolate* isolate, Representation representation); inline static Handle NewStorageFor(Isolate* isolate, Handle object, Representation representation); inline static Handle WrapForRead(Isolate* isolate, Handle object, Representation representation); // Returns true if the object is of the correct type to be used as a // implementation of a JSObject's elements. inline bool HasValidElements(); inline bool HasSpecificClassOf(String* name); bool BooleanValue(); // ECMA-262 9.2. // Convert to a JSObject if needed. // native_context is used when creating wrapper object. static inline MaybeHandle ToObject(Isolate* isolate, Handle object); static MaybeHandle ToObject(Isolate* isolate, Handle object, Handle context); // Converts this to a Smi if possible. static MUST_USE_RESULT inline MaybeHandle ToSmi(Isolate* isolate, Handle object); MUST_USE_RESULT static MaybeHandle GetProperty(LookupIterator* it); // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5. MUST_USE_RESULT static MaybeHandle SetProperty( Handle object, Handle key, Handle value, StrictMode strict_mode, StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED); MUST_USE_RESULT static MaybeHandle SetProperty( LookupIterator* it, Handle value, StrictMode strict_mode, StoreFromKeyed store_mode); MUST_USE_RESULT static MaybeHandle WriteToReadOnlyProperty( LookupIterator* it, Handle value, StrictMode strict_mode); static Handle SetDataProperty(LookupIterator* it, Handle value); MUST_USE_RESULT static MaybeHandle AddDataProperty( LookupIterator* it, Handle value, PropertyAttributes attributes, StrictMode strict_mode, StoreFromKeyed store_mode); MUST_USE_RESULT static inline MaybeHandle GetPropertyOrElement( Handle object, Handle key); MUST_USE_RESULT static inline MaybeHandle GetProperty( Isolate* isolate, Handle object, const char* key); MUST_USE_RESULT static inline MaybeHandle GetProperty( Handle object, Handle key); MUST_USE_RESULT static MaybeHandle GetPropertyWithAccessor( Handle receiver, Handle name, Handle holder, Handle structure); MUST_USE_RESULT static MaybeHandle SetPropertyWithAccessor( Handle receiver, Handle name, Handle value, Handle holder, Handle structure, StrictMode strict_mode); MUST_USE_RESULT static MaybeHandle GetPropertyWithDefinedGetter( Handle receiver, Handle getter); MUST_USE_RESULT static MaybeHandle SetPropertyWithDefinedSetter( Handle receiver, Handle setter, Handle value); MUST_USE_RESULT static inline MaybeHandle GetElement( Isolate* isolate, Handle object, uint32_t index); MUST_USE_RESULT static MaybeHandle GetElementWithReceiver( Isolate* isolate, Handle object, Handle receiver, uint32_t index); // Returns the permanent hash code associated with this object. May return // undefined if not yet created. Object* GetHash(); // Returns the permanent hash code associated with this object depending on // the actual object type. May create and store a hash code if needed and none // exists. static Handle GetOrCreateHash(Isolate* isolate, Handle object); // Checks whether this object has the same value as the given one. This // function is implemented according to ES5, section 9.12 and can be used // to implement the Harmony "egal" function. bool SameValue(Object* other); // Checks whether this object has the same value as the given one. // +0 and -0 are treated equal. Everything else is the same as SameValue. // This function is implemented according to ES6, section 7.2.4 and is used // by ES6 Map and Set. bool SameValueZero(Object* other); // Tries to convert an object to an array index. Returns true and sets // the output parameter if it succeeds. inline bool ToArrayIndex(uint32_t* index); // Returns true if this is a JSValue containing a string and the index is // < the length of the string. Used to implement [] on strings. inline bool IsStringObjectWithCharacterAt(uint32_t index); DECLARE_VERIFIER(Object) #ifdef VERIFY_HEAP // Verify a pointer is a valid object pointer. static void VerifyPointer(Object* p); #endif inline void VerifyApiCallResultType(); // Prints this object without details. void ShortPrint(FILE* out = stdout); // Prints this object without details to a message accumulator. void ShortPrint(StringStream* accumulator); DECLARE_CAST(Object) // Layout description. static const int kHeaderSize = 0; // Object does not take up any space. #ifdef OBJECT_PRINT // For our gdb macros, we should perhaps change these in the future. void Print(); // Prints this object with details. void Print(OStream& os); // NOLINT #endif private: friend class LookupIterator; friend class PrototypeIterator; // Return the map of the root of object's prototype chain. Map* GetRootMap(Isolate* isolate); DISALLOW_IMPLICIT_CONSTRUCTORS(Object); }; struct Brief { explicit Brief(const Object* const v) : value(v) {} const Object* value; }; OStream& operator<<(OStream& os, const Brief& v); // Smi represents integer Numbers that can be stored in 31 bits. // Smis are immediate which means they are NOT allocated in the heap. // The this pointer has the following format: [31 bit signed int] 0 // For long smis it has the following format: // [32 bit signed int] [31 bits zero padding] 0 // Smi stands for small integer. class Smi: public Object { public: // Returns the integer value. inline int value() const; // Convert a value to a Smi object. static inline Smi* FromInt(int value); static inline Smi* FromIntptr(intptr_t value); // Returns whether value can be represented in a Smi. static inline bool IsValid(intptr_t value); DECLARE_CAST(Smi) // Dispatched behavior. void SmiPrint(OStream& os) const; // NOLINT DECLARE_VERIFIER(Smi) static const int kMinValue = (static_cast(-1)) << (kSmiValueSize - 1); static const int kMaxValue = -(kMinValue + 1); private: DISALLOW_IMPLICIT_CONSTRUCTORS(Smi); }; // Heap objects typically have a map pointer in their first word. However, // during GC other data (e.g. mark bits, forwarding addresses) is sometimes // encoded in the first word. The class MapWord is an abstraction of the // value in a heap object's first word. class MapWord BASE_EMBEDDED { public: // Normal state: the map word contains a map pointer. // Create a map word from a map pointer. static inline MapWord FromMap(const Map* map); // View this map word as a map pointer. inline Map* ToMap(); // Scavenge collection: the map word of live objects in the from space // contains a forwarding address (a heap object pointer in the to space). // True if this map word is a forwarding address for a scavenge // collection. Only valid during a scavenge collection (specifically, // when all map words are heap object pointers, i.e. not during a full GC). inline bool IsForwardingAddress(); // Create a map word from a forwarding address. static inline MapWord FromForwardingAddress(HeapObject* object); // View this map word as a forwarding address. inline HeapObject* ToForwardingAddress(); static inline MapWord FromRawValue(uintptr_t value) { return MapWord(value); } inline uintptr_t ToRawValue() { return value_; } private: // HeapObject calls the private constructor and directly reads the value. friend class HeapObject; explicit MapWord(uintptr_t value) : value_(value) {} uintptr_t value_; }; // HeapObject is the superclass for all classes describing heap allocated // objects. class HeapObject: public Object { public: // [map]: Contains a map which contains the object's reflective // information. inline Map* map() const; inline void set_map(Map* value); // The no-write-barrier version. This is OK if the object is white and in // new space, or if the value is an immortal immutable object, like the maps // of primitive (non-JS) objects like strings, heap numbers etc. inline void set_map_no_write_barrier(Map* value); // Get the map using acquire load. inline Map* synchronized_map(); inline MapWord synchronized_map_word() const; // Set the map using release store inline void synchronized_set_map(Map* value); inline void synchronized_set_map_no_write_barrier(Map* value); inline void synchronized_set_map_word(MapWord map_word); // During garbage collection, the map word of a heap object does not // necessarily contain a map pointer. inline MapWord map_word() const; inline void set_map_word(MapWord map_word); // The Heap the object was allocated in. Used also to access Isolate. inline Heap* GetHeap() const; // Convenience method to get current isolate. inline Isolate* GetIsolate() const; // Converts an address to a HeapObject pointer. static inline HeapObject* FromAddress(Address address); // Returns the address of this HeapObject. inline Address address(); // Iterates over pointers contained in the object (including the Map) void Iterate(ObjectVisitor* v); // Iterates over all pointers contained in the object except the // first map pointer. The object type is given in the first // parameter. This function does not access the map pointer in the // object, and so is safe to call while the map pointer is modified. void IterateBody(InstanceType type, int object_size, ObjectVisitor* v); // Returns the heap object's size in bytes inline int Size(); // Returns true if this heap object may contain raw values, i.e., values that // look like pointers to heap objects. inline bool MayContainRawValues(); // Given a heap object's map pointer, returns the heap size in bytes // Useful when the map pointer field is used for other purposes. // GC internal. inline int SizeFromMap(Map* map); // Returns the field at offset in obj, as a read/write Object* reference. // Does no checking, and is safe to use during GC, while maps are invalid. // Does not invoke write barrier, so should only be assigned to // during marking GC. static inline Object** RawField(HeapObject* obj, int offset); // Adds the |code| object related to |name| to the code cache of this map. If // this map is a dictionary map that is shared, the map copied and installed // onto the object. static void UpdateMapCodeCache(Handle object, Handle name, Handle code); DECLARE_CAST(HeapObject) // Return the write barrier mode for this. Callers of this function // must be able to present a reference to an DisallowHeapAllocation // object as a sign that they are not going to use this function // from code that allocates and thus invalidates the returned write // barrier mode. inline WriteBarrierMode GetWriteBarrierMode( const DisallowHeapAllocation& promise); // Dispatched behavior. void HeapObjectShortPrint(OStream& os); // NOLINT #ifdef OBJECT_PRINT void PrintHeader(OStream& os, const char* id); // NOLINT #endif DECLARE_PRINTER(HeapObject) DECLARE_VERIFIER(HeapObject) #ifdef VERIFY_HEAP inline void VerifyObjectField(int offset); inline void VerifySmiField(int offset); // Verify a pointer is a valid HeapObject pointer that points to object // areas in the heap. static void VerifyHeapPointer(Object* p); #endif // Layout description. // First field in a heap object is map. static const int kMapOffset = Object::kHeaderSize; static const int kHeaderSize = kMapOffset + kPointerSize; STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset); protected: // helpers for calling an ObjectVisitor to iterate over pointers in the // half-open range [start, end) specified as integer offsets inline void IteratePointers(ObjectVisitor* v, int start, int end); // as above, for the single element at "offset" inline void IteratePointer(ObjectVisitor* v, int offset); // as above, for the next code link of a code object. inline void IterateNextCodeLink(ObjectVisitor* v, int offset); private: DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject); }; // This class describes a body of an object of a fixed size // in which all pointer fields are located in the [start_offset, end_offset) // interval. template class FixedBodyDescriptor { public: static const int kStartOffset = start_offset; static const int kEndOffset = end_offset; static const int kSize = size; static inline void IterateBody(HeapObject* obj, ObjectVisitor* v); template static inline void IterateBody(HeapObject* obj) { StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset), HeapObject::RawField(obj, end_offset)); } }; // This class describes a body of an object of a variable size // in which all pointer fields are located in the [start_offset, object_size) // interval. template class FlexibleBodyDescriptor { public: static const int kStartOffset = start_offset; static inline void IterateBody(HeapObject* obj, int object_size, ObjectVisitor* v); template static inline void IterateBody(HeapObject* obj, int object_size) { StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset), HeapObject::RawField(obj, object_size)); } }; // The HeapNumber class describes heap allocated numbers that cannot be // represented in a Smi (small integer) class HeapNumber: public HeapObject { public: // [value]: number value. inline double value() const; inline void set_value(double value); DECLARE_CAST(HeapNumber) // Dispatched behavior. bool HeapNumberBooleanValue(); void HeapNumberPrint(OStream& os); // NOLINT DECLARE_VERIFIER(HeapNumber) inline int get_exponent(); inline int get_sign(); // Layout description. static const int kValueOffset = HeapObject::kHeaderSize; // IEEE doubles are two 32 bit words. The first is just mantissa, the second // is a mixture of sign, exponent and mantissa. The offsets of two 32 bit // words within double numbers are endian dependent and they are set // accordingly. #if defined(V8_TARGET_LITTLE_ENDIAN) static const int kMantissaOffset = kValueOffset; static const int kExponentOffset = kValueOffset + 4; #elif defined(V8_TARGET_BIG_ENDIAN) static const int kMantissaOffset = kValueOffset + 4; static const int kExponentOffset = kValueOffset; #else #error Unknown byte ordering #endif static const int kSize = kValueOffset + kDoubleSize; static const uint32_t kSignMask = 0x80000000u; static const uint32_t kExponentMask = 0x7ff00000u; static const uint32_t kMantissaMask = 0xfffffu; static const int kMantissaBits = 52; static const int kExponentBits = 11; static const int kExponentBias = 1023; static const int kExponentShift = 20; static const int kInfinityOrNanExponent = (kExponentMask >> kExponentShift) - kExponentBias; static const int kMantissaBitsInTopWord = 20; static const int kNonMantissaBitsInTopWord = 12; private: DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber); }; enum EnsureElementsMode { DONT_ALLOW_DOUBLE_ELEMENTS, ALLOW_COPIED_DOUBLE_ELEMENTS, ALLOW_CONVERTED_DOUBLE_ELEMENTS }; // Indicates whether a property should be set or (re)defined. Setting of a // property causes attributes to remain unchanged, writability to be checked // and callbacks to be called. Defining of a property causes attributes to // be updated and callbacks to be overridden. enum SetPropertyMode { SET_PROPERTY, DEFINE_PROPERTY }; // Indicator for one component of an AccessorPair. enum AccessorComponent { ACCESSOR_GETTER, ACCESSOR_SETTER }; // JSReceiver includes types on which properties can be defined, i.e., // JSObject and JSProxy. class JSReceiver: public HeapObject { public: enum DeleteMode { NORMAL_DELETION, STRICT_DELETION, FORCE_DELETION }; DECLARE_CAST(JSReceiver) MUST_USE_RESULT static MaybeHandle SetElement( Handle object, uint32_t index, Handle value, PropertyAttributes attributes, StrictMode strict_mode); // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6. MUST_USE_RESULT static inline Maybe HasProperty( Handle object, Handle name); MUST_USE_RESULT static inline Maybe HasOwnProperty(Handle, Handle name); MUST_USE_RESULT static inline Maybe HasElement( Handle object, uint32_t index); MUST_USE_RESULT static inline Maybe HasOwnElement( Handle object, uint32_t index); // Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7. MUST_USE_RESULT static MaybeHandle DeleteProperty( Handle object, Handle name, DeleteMode mode = NORMAL_DELETION); MUST_USE_RESULT static MaybeHandle DeleteElement( Handle object, uint32_t index, DeleteMode mode = NORMAL_DELETION); // Tests for the fast common case for property enumeration. bool IsSimpleEnum(); // Returns the class name ([[Class]] property in the specification). String* class_name(); // Returns the constructor name (the name (possibly, inferred name) of the // function that was used to instantiate the object). String* constructor_name(); MUST_USE_RESULT static inline Maybe GetPropertyAttributes( Handle object, Handle name); MUST_USE_RESULT static Maybe GetPropertyAttributes( LookupIterator* it); MUST_USE_RESULT static Maybe GetOwnPropertyAttributes( Handle object, Handle name); MUST_USE_RESULT static inline Maybe GetElementAttribute( Handle object, uint32_t index); MUST_USE_RESULT static inline Maybe GetOwnElementAttribute(Handle object, uint32_t index); // Return the constructor function (may be Heap::null_value()). inline Object* GetConstructor(); // Retrieves a permanent object identity hash code. The undefined value might // be returned in case no hash was created yet. inline Object* GetIdentityHash(); // Retrieves a permanent object identity hash code. May create and store a // hash code if needed and none exists. inline static Handle GetOrCreateIdentityHash( Handle object); enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS }; // Computes the enumerable keys for a JSObject. Used for implementing // "for (n in object) { }". MUST_USE_RESULT static MaybeHandle GetKeys( Handle object, KeyCollectionType type); private: DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver); }; // Forward declaration for JSObject::GetOrCreateHiddenPropertiesHashTable. class ObjectHashTable; // Forward declaration for JSObject::Copy. class AllocationSite; // The JSObject describes real heap allocated JavaScript objects with // properties. // Note that the map of JSObject changes during execution to enable inline // caching. class JSObject: public JSReceiver { public: // [properties]: Backing storage for properties. // properties is a FixedArray in the fast case and a Dictionary in the // slow case. DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties. inline void initialize_properties(); inline bool HasFastProperties(); inline NameDictionary* property_dictionary(); // Gets slow properties. // [elements]: The elements (properties with names that are integers). // // Elements can be in two general modes: fast and slow. Each mode // corrensponds to a set of object representations of elements that // have something in common. // // In the fast mode elements is a FixedArray and so each element can // be quickly accessed. This fact is used in the generated code. The // elements array can have one of three maps in this mode: // fixed_array_map, sloppy_arguments_elements_map or // fixed_cow_array_map (for copy-on-write arrays). In the latter case // the elements array may be shared by a few objects and so before // writing to any element the array must be copied. Use // EnsureWritableFastElements in this case. // // In the slow mode the elements is either a NumberDictionary, an // ExternalArray, or a FixedArray parameter map for a (sloppy) // arguments object. DECL_ACCESSORS(elements, FixedArrayBase) inline void initialize_elements(); static void ResetElements(Handle object); static inline void SetMapAndElements(Handle object, Handle map, Handle elements); inline ElementsKind GetElementsKind(); inline ElementsAccessor* GetElementsAccessor(); // Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind. inline bool HasFastSmiElements(); // Returns true if an object has elements of FAST_ELEMENTS ElementsKind. inline bool HasFastObjectElements(); // Returns true if an object has elements of FAST_ELEMENTS or // FAST_SMI_ONLY_ELEMENTS. inline bool HasFastSmiOrObjectElements(); // Returns true if an object has any of the fast elements kinds. inline bool HasFastElements(); // Returns true if an object has elements of FAST_DOUBLE_ELEMENTS // ElementsKind. inline bool HasFastDoubleElements(); // Returns true if an object has elements of FAST_HOLEY_*_ELEMENTS // ElementsKind. inline bool HasFastHoleyElements(); inline bool HasSloppyArgumentsElements(); inline bool HasDictionaryElements(); inline bool HasExternalUint8ClampedElements(); inline bool HasExternalArrayElements(); inline bool HasExternalInt8Elements(); inline bool HasExternalUint8Elements(); inline bool HasExternalInt16Elements(); inline bool HasExternalUint16Elements(); inline bool HasExternalInt32Elements(); inline bool HasExternalUint32Elements(); inline bool HasExternalFloat32Elements(); inline bool HasExternalFloat64Elements(); inline bool HasFixedTypedArrayElements(); inline bool HasFixedUint8ClampedElements(); inline bool HasFixedArrayElements(); inline bool HasFixedInt8Elements(); inline bool HasFixedUint8Elements(); inline bool HasFixedInt16Elements(); inline bool HasFixedUint16Elements(); inline bool HasFixedInt32Elements(); inline bool HasFixedUint32Elements(); inline bool HasFixedFloat32Elements(); inline bool HasFixedFloat64Elements(); bool HasFastArgumentsElements(); bool HasDictionaryArgumentsElements(); inline SeededNumberDictionary* element_dictionary(); // Gets slow elements. // Requires: HasFastElements(). static Handle EnsureWritableFastElements( Handle object); // Collects elements starting at index 0. // Undefined values are placed after non-undefined values. // Returns the number of non-undefined values. static Handle PrepareElementsForSort(Handle object, uint32_t limit); // As PrepareElementsForSort, but only on objects where elements is // a dictionary, and it will stay a dictionary. Collates undefined and // unexisting elements below limit from position zero of the elements. static Handle PrepareSlowElementsForSort(Handle object, uint32_t limit); MUST_USE_RESULT static MaybeHandle SetPropertyWithInterceptor( LookupIterator* it, Handle value); // SetLocalPropertyIgnoreAttributes converts callbacks to fields. We need to // grant an exemption to ExecutableAccessor callbacks in some cases. enum ExecutableAccessorInfoHandling { DEFAULT_HANDLING, DONT_FORCE_FIELD }; MUST_USE_RESULT static MaybeHandle SetOwnPropertyIgnoreAttributes( Handle object, Handle key, Handle value, PropertyAttributes attributes, ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING); static void AddProperty(Handle object, Handle key, Handle value, PropertyAttributes attributes); // Extend the receiver with a single fast property appeared first in the // passed map. This also extends the property backing store if necessary. static void AllocateStorageForMap(Handle object, Handle map); // Migrates the given object to a map whose field representations are the // lowest upper bound of all known representations for that field. static void MigrateInstance(Handle instance); // Migrates the given object only if the target map is already available, // or returns false if such a map is not yet available. static bool TryMigrateInstance(Handle instance); // Sets the property value in a normalized object given (key, value, details). // Handles the special representation of JS global objects. static void SetNormalizedProperty(Handle object, Handle key, Handle value, PropertyDetails details); static void OptimizeAsPrototype(Handle object, PrototypeOptimizationMode mode); static void ReoptimizeIfPrototype(Handle object); // Retrieve interceptors. InterceptorInfo* GetNamedInterceptor(); InterceptorInfo* GetIndexedInterceptor(); // Used from JSReceiver. MUST_USE_RESULT static Maybe GetPropertyAttributesWithInterceptor(Handle holder, Handle receiver, Handle name); MUST_USE_RESULT static Maybe GetPropertyAttributesWithFailedAccessCheck(LookupIterator* it); MUST_USE_RESULT static Maybe GetElementAttributeWithReceiver(Handle object, Handle receiver, uint32_t index, bool check_prototype); // Retrieves an AccessorPair property from the given object. Might return // undefined if the property doesn't exist or is of a different kind. MUST_USE_RESULT static MaybeHandle GetAccessor( Handle object, Handle name, AccessorComponent component); // Defines an AccessorPair property on the given object. // TODO(mstarzinger): Rename to SetAccessor(). static MaybeHandle DefineAccessor(Handle object, Handle name, Handle getter, Handle setter, PropertyAttributes attributes); // Defines an AccessorInfo property on the given object. MUST_USE_RESULT static MaybeHandle SetAccessor( Handle object, Handle info); MUST_USE_RESULT static MaybeHandle GetPropertyWithInterceptor( Handle object, Handle receiver, Handle name); // Returns true if this is an instance of an api function and has // been modified since it was created. May give false positives. bool IsDirty(); // Accessors for hidden properties object. // // Hidden properties are not own properties of the object itself. // Instead they are stored in an auxiliary structure kept as an own // property with a special name Heap::hidden_string(). But if the // receiver is a JSGlobalProxy then the auxiliary object is a property // of its prototype, and if it's a detached proxy, then you can't have // hidden properties. // Sets a hidden property on this object. Returns this object if successful, // undefined if called on a detached proxy. static Handle SetHiddenProperty(Handle object, Handle key, Handle value); // Gets the value of a hidden property with the given key. Returns the hole // if the property doesn't exist (or if called on a detached proxy), // otherwise returns the value set for the key. Object* GetHiddenProperty(Handle key); // Deletes a hidden property. Deleting a non-existing property is // considered successful. static void DeleteHiddenProperty(Handle object, Handle key); // Returns true if the object has a property with the hidden string as name. static bool HasHiddenProperties(Handle object); static void SetIdentityHash(Handle object, Handle hash); static inline void ValidateElements(Handle object); // Makes sure that this object can contain HeapObject as elements. static inline void EnsureCanContainHeapObjectElements(Handle obj); // Makes sure that this object can contain the specified elements. static inline void EnsureCanContainElements( Handle object, Object** elements, uint32_t count, EnsureElementsMode mode); static inline void EnsureCanContainElements( Handle object, Handle elements, uint32_t length, EnsureElementsMode mode); static void EnsureCanContainElements( Handle object, Arguments* arguments, uint32_t first_arg, uint32_t arg_count, EnsureElementsMode mode); // Would we convert a fast elements array to dictionary mode given // an access at key? bool WouldConvertToSlowElements(Handle key); // Do we want to keep the elements in fast case when increasing the // capacity? bool ShouldConvertToSlowElements(int new_capacity); // Returns true if the backing storage for the slow-case elements of // this object takes up nearly as much space as a fast-case backing // storage would. In that case the JSObject should have fast // elements. bool ShouldConvertToFastElements(); // Returns true if the elements of JSObject contains only values that can be // represented in a FixedDoubleArray and has at least one value that can only // be represented as a double and not a Smi. bool ShouldConvertToFastDoubleElements(bool* has_smi_only_elements); // Computes the new capacity when expanding the elements of a JSObject. static int NewElementsCapacity(int old_capacity) { // (old_capacity + 50%) + 16 return old_capacity + (old_capacity >> 1) + 16; } // These methods do not perform access checks! MUST_USE_RESULT static MaybeHandle GetOwnElementAccessorPair( Handle object, uint32_t index); MUST_USE_RESULT static MaybeHandle SetFastElement( Handle object, uint32_t index, Handle value, StrictMode strict_mode, bool check_prototype); MUST_USE_RESULT static MaybeHandle SetOwnElement( Handle object, uint32_t index, Handle value, StrictMode strict_mode); // Empty handle is returned if the element cannot be set to the given value. MUST_USE_RESULT static MaybeHandle SetElement( Handle object, uint32_t index, Handle value, PropertyAttributes attributes, StrictMode strict_mode, bool check_prototype = true, SetPropertyMode set_mode = SET_PROPERTY); // Returns the index'th element. // The undefined object if index is out of bounds. MUST_USE_RESULT static MaybeHandle GetElementWithInterceptor( Handle object, Handle receiver, uint32_t index); enum SetFastElementsCapacitySmiMode { kAllowSmiElements, kForceSmiElements, kDontAllowSmiElements }; // Replace the elements' backing store with fast elements of the given // capacity. Update the length for JSArrays. Returns the new backing // store. static Handle SetFastElementsCapacityAndLength( Handle object, int capacity, int length, SetFastElementsCapacitySmiMode smi_mode); static void SetFastDoubleElementsCapacityAndLength( Handle object, int capacity, int length); // Lookup interceptors are used for handling properties controlled by host // objects. inline bool HasNamedInterceptor(); inline bool HasIndexedInterceptor(); // Computes the enumerable keys from interceptors. Used for debug mirrors and // by JSReceiver::GetKeys. MUST_USE_RESULT static MaybeHandle GetKeysForNamedInterceptor( Handle object, Handle receiver); MUST_USE_RESULT static MaybeHandle GetKeysForIndexedInterceptor( Handle object, Handle receiver); // Support functions for v8 api (needed for correct interceptor behavior). MUST_USE_RESULT static Maybe HasRealNamedProperty( Handle object, Handle key); MUST_USE_RESULT static Maybe HasRealElementProperty( Handle object, uint32_t index); MUST_USE_RESULT static Maybe HasRealNamedCallbackProperty( Handle object, Handle key); // Get the header size for a JSObject. Used to compute the index of // internal fields as well as the number of internal fields. inline int GetHeaderSize(); inline int GetInternalFieldCount(); inline int GetInternalFieldOffset(int index); inline Object* GetInternalField(int index); inline void SetInternalField(int index, Object* value); inline void SetInternalField(int index, Smi* value); // Returns the number of properties on this object filtering out properties // with the specified attributes (ignoring interceptors). int NumberOfOwnProperties(PropertyAttributes filter = NONE); // Fill in details for properties into storage starting at the specified // index. void GetOwnPropertyNames( FixedArray* storage, int index, PropertyAttributes filter = NONE); // Returns the number of properties on this object filtering out properties // with the specified attributes (ignoring interceptors). int NumberOfOwnElements(PropertyAttributes filter); // Returns the number of enumerable elements (ignoring interceptors). int NumberOfEnumElements(); // Returns the number of elements on this object filtering out elements // with the specified attributes (ignoring interceptors). int GetOwnElementKeys(FixedArray* storage, PropertyAttributes filter); // Count and fill in the enumerable elements into storage. // (storage->length() == NumberOfEnumElements()). // If storage is NULL, will count the elements without adding // them to any storage. // Returns the number of enumerable elements. int GetEnumElementKeys(FixedArray* storage); // Returns a new map with all transitions dropped from the object's current // map and the ElementsKind set. static Handle GetElementsTransitionMap(Handle object, ElementsKind to_kind); static void TransitionElementsKind(Handle object, ElementsKind to_kind); static void MigrateToMap(Handle object, Handle new_map); // Convert the object to use the canonical dictionary // representation. If the object is expected to have additional properties // added this number can be indicated to have the backing store allocated to // an initial capacity for holding these properties. static void NormalizeProperties(Handle object, PropertyNormalizationMode mode, int expected_additional_properties); // Convert and update the elements backing store to be a // SeededNumberDictionary dictionary. Returns the backing after conversion. static Handle NormalizeElements( Handle object); // Transform slow named properties to fast variants. static void MigrateSlowToFast(Handle object, int unused_property_fields); // Access fast-case object properties at index. static Handle FastPropertyAt(Handle object, Representation representation, FieldIndex index); inline Object* RawFastPropertyAt(FieldIndex index); inline void FastPropertyAtPut(FieldIndex index, Object* value); void WriteToField(int descriptor, Object* value); // Access to in object properties. inline int GetInObjectPropertyOffset(int index); inline Object* InObjectPropertyAt(int index); inline Object* InObjectPropertyAtPut(int index, Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); // Set the object's prototype (only JSReceiver and null are allowed values). MUST_USE_RESULT static MaybeHandle SetPrototype( Handle object, Handle value, bool from_javascript); // Initializes the body after properties slot, properties slot is // initialized by set_properties. Fill the pre-allocated fields with // pre_allocated_value and the rest with filler_value. // Note: this call does not update write barrier, the caller is responsible // to ensure that |filler_value| can be collected without WB here. inline void InitializeBody(Map* map, Object* pre_allocated_value, Object* filler_value); // Check whether this object references another object bool ReferencesObject(Object* obj); // Disalow further properties to be added to the object. MUST_USE_RESULT static MaybeHandle PreventExtensions( Handle object); // ES5 Object.freeze MUST_USE_RESULT static MaybeHandle Freeze(Handle object); // Called the first time an object is observed with ES7 Object.observe. static void SetObserved(Handle object); // Copy object. enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 }; static Handle Copy(Handle object); MUST_USE_RESULT static MaybeHandle DeepCopy( Handle object, AllocationSiteUsageContext* site_context, DeepCopyHints hints = kNoHints); MUST_USE_RESULT static MaybeHandle DeepWalk( Handle object, AllocationSiteCreationContext* site_context); static Handle GetDataProperty(Handle object, Handle key); static Handle GetDataProperty(LookupIterator* it); DECLARE_CAST(JSObject) // Dispatched behavior. void JSObjectShortPrint(StringStream* accumulator); DECLARE_PRINTER(JSObject) DECLARE_VERIFIER(JSObject) #ifdef OBJECT_PRINT void PrintProperties(OStream& os); // NOLINT void PrintElements(OStream& os); // NOLINT void PrintTransitions(OStream& os); // NOLINT #endif static void PrintElementsTransition( FILE* file, Handle object, ElementsKind from_kind, Handle from_elements, ElementsKind to_kind, Handle to_elements); void PrintInstanceMigration(FILE* file, Map* original_map, Map* new_map); #ifdef DEBUG // Structure for collecting spill information about JSObjects. class SpillInformation { public: void Clear(); void Print(); int number_of_objects_; int number_of_objects_with_fast_properties_; int number_of_objects_with_fast_elements_; int number_of_fast_used_fields_; int number_of_fast_unused_fields_; int number_of_slow_used_properties_; int number_of_slow_unused_properties_; int number_of_fast_used_elements_; int number_of_fast_unused_elements_; int number_of_slow_used_elements_; int number_of_slow_unused_elements_; }; void IncrementSpillStatistics(SpillInformation* info); #endif #ifdef VERIFY_HEAP // If a GC was caused while constructing this object, the elements pointer // may point to a one pointer filler map. The object won't be rooted, but // our heap verification code could stumble across it. bool ElementsAreSafeToExamine(); #endif Object* SlowReverseLookup(Object* value); // Maximal number of elements (numbered 0 .. kMaxElementCount - 1). // Also maximal value of JSArray's length property. static const uint32_t kMaxElementCount = 0xffffffffu; // Constants for heuristics controlling conversion of fast elements // to slow elements. // Maximal gap that can be introduced by adding an element beyond // the current elements length. static const uint32_t kMaxGap = 1024; // Maximal length of fast elements array that won't be checked for // being dense enough on expansion. static const int kMaxUncheckedFastElementsLength = 5000; // Same as above but for old arrays. This limit is more strict. We // don't want to be wasteful with long lived objects. static const int kMaxUncheckedOldFastElementsLength = 500; // Note that Page::kMaxRegularHeapObjectSize puts a limit on // permissible values (see the DCHECK in heap.cc). static const int kInitialMaxFastElementArray = 100000; // This constant applies only to the initial map of "$Object" aka // "global.Object" and not to arbitrary other JSObject maps. static const int kInitialGlobalObjectUnusedPropertiesCount = 4; static const int kMaxInstanceSize = 255 * kPointerSize; // When extending the backing storage for property values, we increase // its size by more than the 1 entry necessary, so sequentially adding fields // to the same object requires fewer allocations and copies. static const int kFieldsAdded = 3; // Layout description. static const int kPropertiesOffset = HeapObject::kHeaderSize; static const int kElementsOffset = kPropertiesOffset + kPointerSize; static const int kHeaderSize = kElementsOffset + kPointerSize; STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize); class BodyDescriptor : public FlexibleBodyDescriptor { public: static inline int SizeOf(Map* map, HeapObject* object); }; Context* GetCreationContext(); // Enqueue change record for Object.observe. May cause GC. static void EnqueueChangeRecord(Handle object, const char* type, Handle name, Handle old_value); static void MigrateToNewProperty(Handle object, Handle transition, Handle value); private: friend class DictionaryElementsAccessor; friend class JSReceiver; friend class Object; static void MigrateFastToFast(Handle object, Handle new_map); static void MigrateFastToSlow(Handle object, Handle new_map, int expected_additional_properties); static void GeneralizeFieldRepresentation(Handle object, int modify_index, Representation new_representation, Handle new_field_type); static void UpdateAllocationSite(Handle object, ElementsKind to_kind); // Used from Object::GetProperty(). MUST_USE_RESULT static MaybeHandle GetPropertyWithFailedAccessCheck( LookupIterator* it); MUST_USE_RESULT static MaybeHandle GetElementWithCallback( Handle object, Handle receiver, Handle structure, uint32_t index, Handle holder); MUST_USE_RESULT static Maybe GetElementAttributeWithInterceptor(Handle object, Handle receiver, uint32_t index, bool continue_search); MUST_USE_RESULT static Maybe GetElementAttributeWithoutInterceptor(Handle object, Handle receiver, uint32_t index, bool continue_search); MUST_USE_RESULT static MaybeHandle SetElementWithCallback( Handle object, Handle structure, uint32_t index, Handle value, Handle holder, StrictMode strict_mode); MUST_USE_RESULT static MaybeHandle SetElementWithInterceptor( Handle object, uint32_t index, Handle value, PropertyAttributes attributes, StrictMode strict_mode, bool check_prototype, SetPropertyMode set_mode); MUST_USE_RESULT static MaybeHandle SetElementWithoutInterceptor( Handle object, uint32_t index, Handle value, PropertyAttributes attributes, StrictMode strict_mode, bool check_prototype, SetPropertyMode set_mode); MUST_USE_RESULT static MaybeHandle SetElementWithCallbackSetterInPrototypes( Handle object, uint32_t index, Handle value, bool* found, StrictMode strict_mode); MUST_USE_RESULT static MaybeHandle SetDictionaryElement( Handle object, uint32_t index, Handle value, PropertyAttributes attributes, StrictMode strict_mode, bool check_prototype, SetPropertyMode set_mode = SET_PROPERTY); MUST_USE_RESULT static MaybeHandle SetFastDoubleElement( Handle object, uint32_t index, Handle value, StrictMode strict_mode, bool check_prototype = true); MUST_USE_RESULT static MaybeHandle SetPropertyWithFailedAccessCheck( LookupIterator* it, Handle value, StrictMode strict_mode); // Add a property to a slow-case object. static void AddSlowProperty(Handle object, Handle name, Handle value, PropertyAttributes attributes); MUST_USE_RESULT static MaybeHandle DeleteProperty( Handle object, Handle name, DeleteMode mode); MUST_USE_RESULT static MaybeHandle DeletePropertyWithInterceptor( Handle holder, Handle receiver, Handle name); // Deletes the named property in a normalized object. static Handle DeleteNormalizedProperty(Handle object, Handle name, DeleteMode mode); MUST_USE_RESULT static MaybeHandle DeleteElement( Handle object, uint32_t index, DeleteMode mode); MUST_USE_RESULT static MaybeHandle DeleteElementWithInterceptor( Handle object, uint32_t index); bool ReferencesObjectFromElements(FixedArray* elements, ElementsKind kind, Object* object); // Returns true if most of the elements backing storage is used. bool HasDenseElements(); // Gets the current elements capacity and the number of used elements. void GetElementsCapacityAndUsage(int* capacity, int* used); static bool CanSetCallback(Handle object, Handle name); static void SetElementCallback(Handle object, uint32_t index, Handle structure, PropertyAttributes attributes); static void SetPropertyCallback(Handle object, Handle name, Handle structure, PropertyAttributes attributes); static void DefineElementAccessor(Handle object, uint32_t index, Handle getter, Handle setter, PropertyAttributes attributes); // Return the hash table backing store or the inline stored identity hash, // whatever is found. MUST_USE_RESULT Object* GetHiddenPropertiesHashTable(); // Return the hash table backing store for hidden properties. If there is no // backing store, allocate one. static Handle GetOrCreateHiddenPropertiesHashtable( Handle object); // Set the hidden property backing store to either a hash table or // the inline-stored identity hash. static Handle SetHiddenPropertiesHashTable( Handle object, Handle value); MUST_USE_RESULT Object* GetIdentityHash(); static Handle GetOrCreateIdentityHash(Handle object); DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject); }; // Common superclass for FixedArrays that allow implementations to share // common accessors and some code paths. class FixedArrayBase: public HeapObject { public: // [length]: length of the array. inline int length() const; inline void set_length(int value); // Get and set the length using acquire loads and release stores. inline int synchronized_length() const; inline void synchronized_set_length(int value); DECLARE_CAST(FixedArrayBase) // Layout description. // Length is smi tagged when it is stored. static const int kLengthOffset = HeapObject::kHeaderSize; static const int kHeaderSize = kLengthOffset + kPointerSize; }; class FixedDoubleArray; class IncrementalMarking; // FixedArray describes fixed-sized arrays with element type Object*. class FixedArray: public FixedArrayBase { public: // Setter and getter for elements. inline Object* get(int index); static inline Handle get(Handle array, int index); // Setter that uses write barrier. inline void set(int index, Object* value); inline bool is_the_hole(int index); // Setter that doesn't need write barrier. inline void set(int index, Smi* value); // Setter with explicit barrier mode. inline void set(int index, Object* value, WriteBarrierMode mode); // Setters for frequently used oddballs located in old space. inline void set_undefined(int index); inline void set_null(int index); inline void set_the_hole(int index); inline Object** GetFirstElementAddress(); inline bool ContainsOnlySmisOrHoles(); // Gives access to raw memory which stores the array's data. inline Object** data_start(); inline void FillWithHoles(int from, int to); // Shrink length and insert filler objects. void Shrink(int length); // Copy operation. static Handle CopySize(Handle array, int new_length, PretenureFlag pretenure = NOT_TENURED); // Add the elements of a JSArray to this FixedArray. MUST_USE_RESULT static MaybeHandle AddKeysFromArrayLike( Handle content, Handle array); // Computes the union of keys and return the result. // Used for implementing "for (n in object) { }" MUST_USE_RESULT static MaybeHandle UnionOfKeys( Handle first, Handle second); // Copy a sub array from the receiver to dest. void CopyTo(int pos, FixedArray* dest, int dest_pos, int len); // Garbage collection support. static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; } // Code Generation support. static int OffsetOfElementAt(int index) { return SizeFor(index); } // Garbage collection support. Object** RawFieldOfElementAt(int index) { return HeapObject::RawField(this, OffsetOfElementAt(index)); } DECLARE_CAST(FixedArray) // Maximal allowed size, in bytes, of a single FixedArray. // Prevents overflowing size computations, as well as extreme memory // consumption. static const int kMaxSize = 128 * MB * kPointerSize; // Maximally allowed length of a FixedArray. static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize; // Dispatched behavior. DECLARE_PRINTER(FixedArray) DECLARE_VERIFIER(FixedArray) #ifdef DEBUG // Checks if two FixedArrays have identical contents. bool IsEqualTo(FixedArray* other); #endif // Swap two elements in a pair of arrays. If this array and the // numbers array are the same object, the elements are only swapped // once. void SwapPairs(FixedArray* numbers, int i, int j); // Sort prefix of this array and the numbers array as pairs wrt. the // numbers. If the numbers array and the this array are the same // object, the prefix of this array is sorted. void SortPairs(FixedArray* numbers, uint32_t len); class BodyDescriptor : public FlexibleBodyDescriptor { public: static inline int SizeOf(Map* map, HeapObject* object) { return SizeFor(reinterpret_cast(object)->length()); } }; protected: // Set operation on FixedArray without using write barriers. Can // only be used for storing old space objects or smis. static inline void NoWriteBarrierSet(FixedArray* array, int index, Object* value); // Set operation on FixedArray without incremental write barrier. Can // only be used if the object is guaranteed to be white (whiteness witness // is present). static inline void NoIncrementalWriteBarrierSet(FixedArray* array, int index, Object* value); private: STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize); DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray); }; // FixedDoubleArray describes fixed-sized arrays with element type double. class FixedDoubleArray: public FixedArrayBase { public: // Setter and getter for elements. inline double get_scalar(int index); inline int64_t get_representation(int index); static inline Handle get(Handle array, int index); inline void set(int index, double value); inline void set_the_hole(int index); // Checking for the hole. inline bool is_the_hole(int index); // Garbage collection support. inline static int SizeFor(int length) { return kHeaderSize + length * kDoubleSize; } // Gives access to raw memory which stores the array's data. inline double* data_start(); inline void FillWithHoles(int from, int to); // Code Generation support. static int OffsetOfElementAt(int index) { return SizeFor(index); } inline static bool is_the_hole_nan(double value); inline static double hole_nan_as_double(); inline static double canonical_not_the_hole_nan_as_double(); DECLARE_CAST(FixedDoubleArray) // Maximal allowed size, in bytes, of a single FixedDoubleArray. // Prevents overflowing size computations, as well as extreme memory // consumption. static const int kMaxSize = 512 * MB; // Maximally allowed length of a FixedArray. static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize; // Dispatched behavior. DECLARE_PRINTER(FixedDoubleArray) DECLARE_VERIFIER(FixedDoubleArray) private: DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray); }; // ConstantPoolArray describes a fixed-sized array containing constant pool // entries. // // A ConstantPoolArray can be structured in two different ways depending upon // whether it is extended or small. The is_extended_layout() method can be used // to discover which layout the constant pool has. // // The format of a small constant pool is: // [kSmallLayout1Offset] : Small section layout bitmap 1 // [kSmallLayout2Offset] : Small section layout bitmap 2 // [first_index(INT64, SMALL_SECTION)] : 64 bit entries // ... : ... // [first_index(CODE_PTR, SMALL_SECTION)] : code pointer entries // ... : ... // [first_index(HEAP_PTR, SMALL_SECTION)] : heap pointer entries // ... : ... // [first_index(INT32, SMALL_SECTION)] : 32 bit entries // ... : ... // // If the constant pool has an extended layout, the extended section constant // pool also contains an extended section, which has the following format at // location get_extended_section_header_offset(): // [kExtendedInt64CountOffset] : count of extended 64 bit entries // [kExtendedCodePtrCountOffset] : count of extended code pointers // [kExtendedHeapPtrCountOffset] : count of extended heap pointers // [kExtendedInt32CountOffset] : count of extended 32 bit entries // [first_index(INT64, EXTENDED_SECTION)] : 64 bit entries // ... : ... // [first_index(CODE_PTR, EXTENDED_SECTION)]: code pointer entries // ... : ... // [first_index(HEAP_PTR, EXTENDED_SECTION)]: heap pointer entries // ... : ... // [first_index(INT32, EXTENDED_SECTION)] : 32 bit entries // ... : ... // class ConstantPoolArray: public HeapObject { public: enum WeakObjectState { NO_WEAK_OBJECTS, WEAK_OBJECTS_IN_OPTIMIZED_CODE, WEAK_OBJECTS_IN_IC }; enum Type { INT64 = 0, CODE_PTR, HEAP_PTR, INT32, // Number of types stored by the ConstantPoolArrays. NUMBER_OF_TYPES, FIRST_TYPE = INT64, LAST_TYPE = INT32 }; enum LayoutSection { SMALL_SECTION = 0, EXTENDED_SECTION, NUMBER_OF_LAYOUT_SECTIONS }; class NumberOfEntries BASE_EMBEDDED { public: inline NumberOfEntries() { for (int i = 0; i < NUMBER_OF_TYPES; i++) { element_counts_[i] = 0; } } inline NumberOfEntries(int int64_count, int code_ptr_count, int heap_ptr_count, int int32_count) { element_counts_[INT64] = int64_count; element_counts_[CODE_PTR] = code_ptr_count; element_counts_[HEAP_PTR] = heap_ptr_count; element_counts_[INT32] = int32_count; } inline NumberOfEntries(ConstantPoolArray* array, LayoutSection section) { element_counts_[INT64] = array->number_of_entries(INT64, section); element_counts_[CODE_PTR] = array->number_of_entries(CODE_PTR, section); element_counts_[HEAP_PTR] = array->number_of_entries(HEAP_PTR, section); element_counts_[INT32] = array->number_of_entries(INT32, section); } inline void increment(Type type); inline int equals(const NumberOfEntries& other) const; inline bool is_empty() const; inline int count_of(Type type) const; inline int base_of(Type type) const; inline int total_count() const; inline int are_in_range(int min, int max) const; private: int element_counts_[NUMBER_OF_TYPES]; }; class Iterator BASE_EMBEDDED { public: inline Iterator(ConstantPoolArray* array, Type type) : array_(array), type_(type), final_section_(array->final_section()), current_section_(SMALL_SECTION), next_index_(array->first_index(type, SMALL_SECTION)) { update_section(); } inline Iterator(ConstantPoolArray* array, Type type, LayoutSection section) : array_(array), type_(type), final_section_(section), current_section_(section), next_index_(array->first_index(type, section)) { update_section(); } inline int next_index(); inline bool is_finished(); private: inline void update_section(); ConstantPoolArray* array_; const Type type_; const LayoutSection final_section_; LayoutSection current_section_; int next_index_; }; // Getters for the first index, the last index and the count of entries of // a given type for a given layout section. inline int first_index(Type type, LayoutSection layout_section); inline int last_index(Type type, LayoutSection layout_section); inline int number_of_entries(Type type, LayoutSection layout_section); // Returns the type of the entry at the given index. inline Type get_type(int index); inline bool offset_is_type(int offset, Type type); // Setter and getter for pool elements. inline Address get_code_ptr_entry(int index); inline Object* get_heap_ptr_entry(int index); inline int64_t get_int64_entry(int index); inline int32_t get_int32_entry(int index); inline double get_int64_entry_as_double(int index); inline void set(int index, Address value); inline void set(int index, Object* value); inline void set(int index, int64_t value); inline void set(int index, double value); inline void set(int index, int32_t value); // Setters which take a raw offset rather than an index (for code generation). inline void set_at_offset(int offset, int32_t value); inline void set_at_offset(int offset, int64_t value); inline void set_at_offset(int offset, double value); inline void set_at_offset(int offset, Address value); inline void set_at_offset(int offset, Object* value); // Setter and getter for weak objects state inline void set_weak_object_state(WeakObjectState state); inline WeakObjectState get_weak_object_state(); // Returns true if the constant pool has an extended layout, false if it has // only the small layout. inline bool is_extended_layout(); // Returns the last LayoutSection in this constant pool array. inline LayoutSection final_section(); // Set up initial state for a small layout constant pool array. inline void Init(const NumberOfEntries& small); // Set up initial state for an extended layout constant pool array. inline void InitExtended(const NumberOfEntries& small, const NumberOfEntries& extended); // Clears the pointer entries with GC safe values. void ClearPtrEntries(Isolate* isolate); // returns the total number of entries in the constant pool array. inline int length(); // Garbage collection support. inline int size(); inline static int MaxInt64Offset(int number_of_int64) { return kFirstEntryOffset + (number_of_int64 * kInt64Size); } inline static int SizeFor(const NumberOfEntries& small) { int size = kFirstEntryOffset + (small.count_of(INT64) * kInt64Size) + (small.count_of(CODE_PTR) * kPointerSize) + (small.count_of(HEAP_PTR) * kPointerSize) + (small.count_of(INT32) * kInt32Size); return RoundUp(size, kPointerSize); } inline static int SizeForExtended(const NumberOfEntries& small, const NumberOfEntries& extended) { int size = SizeFor(small); size = RoundUp(size, kInt64Size); // Align extended header to 64 bits. size += kExtendedFirstOffset + (extended.count_of(INT64) * kInt64Size) + (extended.count_of(CODE_PTR) * kPointerSize) + (extended.count_of(HEAP_PTR) * kPointerSize) + (extended.count_of(INT32) * kInt32Size); return RoundUp(size, kPointerSize); } inline static int entry_size(Type type) { switch (type) { case INT32: return kInt32Size; case INT64: return kInt64Size; case CODE_PTR: case HEAP_PTR: return kPointerSize; default: UNREACHABLE(); return 0; } } // Code Generation support. inline int OffsetOfElementAt(int index) { int offset; LayoutSection section; if (is_extended_layout() && index >= first_extended_section_index()) { section = EXTENDED_SECTION; offset = get_extended_section_header_offset() + kExtendedFirstOffset; } else { section = SMALL_SECTION; offset = kFirstEntryOffset; } // Add offsets for the preceding type sections. DCHECK(index <= last_index(LAST_TYPE, section)); for (Type type = FIRST_TYPE; index > last_index(type, section); type = next_type(type)) { offset += entry_size(type) * number_of_entries(type, section); } // Add offset for the index in it's type. Type type = get_type(index); offset += entry_size(type) * (index - first_index(type, section)); return offset; } DECLARE_CAST(ConstantPoolArray) // Garbage collection support. Object** RawFieldOfElementAt(int index) { return HeapObject::RawField(this, OffsetOfElementAt(index)); } // Small Layout description. static const int kSmallLayout1Offset = HeapObject::kHeaderSize; static const int kSmallLayout2Offset = kSmallLayout1Offset + kInt32Size; static const int kHeaderSize = kSmallLayout2Offset + kInt32Size; static const int kFirstEntryOffset = ROUND_UP(kHeaderSize, kInt64Size); static const int kSmallLayoutCountBits = 10; static const int kMaxSmallEntriesPerType = (1 << kSmallLayoutCountBits) - 1; // Fields in kSmallLayout1Offset. class Int64CountField: public BitField {}; class CodePtrCountField: public BitField {}; class HeapPtrCountField: public BitField {}; class IsExtendedField: public BitField {}; // Fields in kSmallLayout2Offset. class Int32CountField: public BitField {}; class TotalCountField: public BitField {}; class WeakObjectStateField: public BitField {}; // Extended layout description, which starts at // get_extended_section_header_offset(). static const int kExtendedInt64CountOffset = 0; static const int kExtendedCodePtrCountOffset = kExtendedInt64CountOffset + kPointerSize; static const int kExtendedHeapPtrCountOffset = kExtendedCodePtrCountOffset + kPointerSize; static const int kExtendedInt32CountOffset = kExtendedHeapPtrCountOffset + kPointerSize; static const int kExtendedFirstOffset = kExtendedInt32CountOffset + kPointerSize; // Dispatched behavior. void ConstantPoolIterateBody(ObjectVisitor* v); DECLARE_PRINTER(ConstantPoolArray) DECLARE_VERIFIER(ConstantPoolArray) private: inline int first_extended_section_index(); inline int get_extended_section_header_offset(); inline static Type next_type(Type type) { DCHECK(type >= FIRST_TYPE && type < NUMBER_OF_TYPES); int type_int = static_cast(type); return static_cast(++type_int); } DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolArray); }; // DescriptorArrays are fixed arrays used to hold instance descriptors. // The format of the these objects is: // [0]: Number of descriptors // [1]: Either Smi(0) if uninitialized, or a pointer to small fixed array: // [0]: pointer to fixed array with enum cache // [1]: either Smi(0) or pointer to fixed array with indices // [2]: first key // [2 + number of descriptors * kDescriptorSize]: start of slack class DescriptorArray: public FixedArray { public: // Returns true for both shared empty_descriptor_array and for smis, which the // map uses to encode additional bit fields when the descriptor array is not // yet used. inline bool IsEmpty(); // Returns the number of descriptors in the array. int number_of_descriptors() { DCHECK(length() >= kFirstIndex || IsEmpty()); int len = length(); return len == 0 ? 0 : Smi::cast(get(kDescriptorLengthIndex))->value(); } int number_of_descriptors_storage() { int len = length(); return len == 0 ? 0 : (len - kFirstIndex) / kDescriptorSize; } int NumberOfSlackDescriptors() { return number_of_descriptors_storage() - number_of_descriptors(); } inline void SetNumberOfDescriptors(int number_of_descriptors); inline int number_of_entries() { return number_of_descriptors(); } bool HasEnumCache() { return !IsEmpty() && !get(kEnumCacheIndex)->IsSmi(); } void CopyEnumCacheFrom(DescriptorArray* array) { set(kEnumCacheIndex, array->get(kEnumCacheIndex)); } FixedArray* GetEnumCache() { DCHECK(HasEnumCache()); FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex)); return FixedArray::cast(bridge->get(kEnumCacheBridgeCacheIndex)); } bool HasEnumIndicesCache() { if (IsEmpty()) return false; Object* object = get(kEnumCacheIndex); if (object->IsSmi()) return false; FixedArray* bridge = FixedArray::cast(object); return !bridge->get(kEnumCacheBridgeIndicesCacheIndex)->IsSmi(); } FixedArray* GetEnumIndicesCache() { DCHECK(HasEnumIndicesCache()); FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex)); return FixedArray::cast(bridge->get(kEnumCacheBridgeIndicesCacheIndex)); } Object** GetEnumCacheSlot() { DCHECK(HasEnumCache()); return HeapObject::RawField(reinterpret_cast(this), kEnumCacheOffset); } void ClearEnumCache(); // Initialize or change the enum cache, // using the supplied storage for the small "bridge". void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache, Object* new_index_cache); bool CanHoldValue(int descriptor, Object* value); // Accessors for fetching instance descriptor at descriptor number. inline Name* GetKey(int descriptor_number); inline Object** GetKeySlot(int descriptor_number); inline Object* GetValue(int descriptor_number); inline void SetValue(int descriptor_number, Object* value); inline Object** GetValueSlot(int descriptor_number); static inline int GetValueOffset(int descriptor_number); inline Object** GetDescriptorStartSlot(int descriptor_number); inline Object** GetDescriptorEndSlot(int descriptor_number); inline PropertyDetails GetDetails(int descriptor_number); inline PropertyType GetType(int descriptor_number); inline int GetFieldIndex(int descriptor_number); inline HeapType* GetFieldType(int descriptor_number); inline Object* GetConstant(int descriptor_number); inline Object* GetCallbacksObject(int descriptor_number); inline AccessorDescriptor* GetCallbacks(int descriptor_number); inline Name* GetSortedKey(int descriptor_number); inline int GetSortedKeyIndex(int descriptor_number); inline void SetSortedKey(int pointer, int descriptor_number); inline void SetRepresentation(int descriptor_number, Representation representation); // Accessor for complete descriptor. inline void Get(int descriptor_number, Descriptor* desc); inline void Set(int descriptor_number, Descriptor* desc); void Replace(int descriptor_number, Descriptor* descriptor); // Append automatically sets the enumeration index. This should only be used // to add descriptors in bulk at the end, followed by sorting the descriptor // array. inline void Append(Descriptor* desc); static Handle CopyUpTo(Handle desc, int enumeration_index, int slack = 0); static Handle CopyUpToAddAttributes( Handle desc, int enumeration_index, PropertyAttributes attributes, int slack = 0); // Sort the instance descriptors by the hash codes of their keys. void Sort(); // Search the instance descriptors for given name. INLINE(int Search(Name* name, int number_of_own_descriptors)); // As the above, but uses DescriptorLookupCache and updates it when // necessary. INLINE(int SearchWithCache(Name* name, Map* map)); // Allocates a DescriptorArray, but returns the singleton // empty descriptor array object if number_of_descriptors is 0. static Handle Allocate(Isolate* isolate, int number_of_descriptors, int slack = 0); DECLARE_CAST(DescriptorArray) // Constant for denoting key was not found. static const int kNotFound = -1; static const int kDescriptorLengthIndex = 0; static const int kEnumCacheIndex = 1; static const int kFirstIndex = 2; // The length of the "bridge" to the enum cache. static const int kEnumCacheBridgeLength = 2; static const int kEnumCacheBridgeCacheIndex = 0; static const int kEnumCacheBridgeIndicesCacheIndex = 1; // Layout description. static const int kDescriptorLengthOffset = FixedArray::kHeaderSize; static const int kEnumCacheOffset = kDescriptorLengthOffset + kPointerSize; static const int kFirstOffset = kEnumCacheOffset + kPointerSize; // Layout description for the bridge array. static const int kEnumCacheBridgeCacheOffset = FixedArray::kHeaderSize; // Layout of descriptor. static const int kDescriptorKey = 0; static const int kDescriptorDetails = 1; static const int kDescriptorValue = 2; static const int kDescriptorSize = 3; #ifdef OBJECT_PRINT // Print all the descriptors. void PrintDescriptors(OStream& os); // NOLINT #endif #ifdef DEBUG // Is the descriptor array sorted and without duplicates? bool IsSortedNoDuplicates(int valid_descriptors = -1); // Is the descriptor array consistent with the back pointers in targets? bool IsConsistentWithBackPointers(Map* current_map); // Are two DescriptorArrays equal? bool IsEqualTo(DescriptorArray* other); #endif // Returns the fixed array length required to hold number_of_descriptors // descriptors. static int LengthFor(int number_of_descriptors) { return ToKeyIndex(number_of_descriptors); } private: // WhitenessWitness is used to prove that a descriptor array is white // (unmarked), so incremental write barriers can be skipped because the // marking invariant cannot be broken and slots pointing into evacuation // candidates will be discovered when the object is scanned. A witness is // always stack-allocated right after creating an array. By allocating a // witness, incremental marking is globally disabled. The witness is then // passed along wherever needed to statically prove that the array is known to // be white. class WhitenessWitness { public: inline explicit WhitenessWitness(DescriptorArray* array); inline ~WhitenessWitness(); private: IncrementalMarking* marking_; }; // An entry in a DescriptorArray, represented as an (array, index) pair. class Entry { public: inline explicit Entry(DescriptorArray* descs, int index) : descs_(descs), index_(index) { } inline PropertyType type() { return descs_->GetType(index_); } inline Object* GetCallbackObject() { return descs_->GetValue(index_); } private: DescriptorArray* descs_; int index_; }; // Conversion from descriptor number to array indices. static int ToKeyIndex(int descriptor_number) { return kFirstIndex + (descriptor_number * kDescriptorSize) + kDescriptorKey; } static int ToDetailsIndex(int descriptor_number) { return kFirstIndex + (descriptor_number * kDescriptorSize) + kDescriptorDetails; } static int ToValueIndex(int descriptor_number) { return kFirstIndex + (descriptor_number * kDescriptorSize) + kDescriptorValue; } // Transfer a complete descriptor from the src descriptor array to this // descriptor array. void CopyFrom(int index, DescriptorArray* src, const WhitenessWitness&); inline void Set(int descriptor_number, Descriptor* desc, const WhitenessWitness&); // Swap first and second descriptor. inline void SwapSortedKeys(int first, int second); DISALLOW_IMPLICIT_CONSTRUCTORS(DescriptorArray); }; enum SearchMode { ALL_ENTRIES, VALID_ENTRIES }; template inline int LinearSearch(T* array, Name* name, int len, int valid_entries); template inline int Search(T* array, Name* name, int valid_entries = 0); // HashTable is a subclass of FixedArray that implements a hash table // that uses open addressing and quadratic probing. // // In order for the quadratic probing to work, elements that have not // yet been used and elements that have been deleted are // distinguished. Probing continues when deleted elements are // encountered and stops when unused elements are encountered. // // - Elements with key == undefined have not been used yet. // - Elements with key == the_hole have been deleted. // // The hash table class is parameterized with a Shape and a Key. // Shape must be a class with the following interface: // class ExampleShape { // public: // // Tells whether key matches other. // static bool IsMatch(Key key, Object* other); // // Returns the hash value for key. // static uint32_t Hash(Key key); // // Returns the hash value for object. // static uint32_t HashForObject(Key key, Object* object); // // Convert key to an object. // static inline Handle AsHandle(Isolate* isolate, Key key); // // The prefix size indicates number of elements in the beginning // // of the backing storage. // static const int kPrefixSize = ..; // // The Element size indicates number of elements per entry. // static const int kEntrySize = ..; // }; // The prefix size indicates an amount of memory in the // beginning of the backing storage that can be used for non-element // information by subclasses. template class BaseShape { public: static const bool UsesSeed = false; static uint32_t Hash(Key key) { return 0; } static uint32_t SeededHash(Key key, uint32_t seed) { DCHECK(UsesSeed); return Hash(key); } static uint32_t HashForObject(Key key, Object* object) { return 0; } static uint32_t SeededHashForObject(Key key, uint32_t seed, Object* object) { DCHECK(UsesSeed); return HashForObject(key, object); } }; template class HashTable: public FixedArray { public: // Wrapper methods inline uint32_t Hash(Key key) { if (Shape::UsesSeed) { return Shape::SeededHash(key, GetHeap()->HashSeed()); } else { return Shape::Hash(key); } } inline uint32_t HashForObject(Key key, Object* object) { if (Shape::UsesSeed) { return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object); } else { return Shape::HashForObject(key, object); } } // Returns the number of elements in the hash table. int NumberOfElements() { return Smi::cast(get(kNumberOfElementsIndex))->value(); } // Returns the number of deleted elements in the hash table. int NumberOfDeletedElements() { return Smi::cast(get(kNumberOfDeletedElementsIndex))->value(); } // Returns the capacity of the hash table. int Capacity() { return Smi::cast(get(kCapacityIndex))->value(); } // ElementAdded should be called whenever an element is added to a // hash table. void ElementAdded() { SetNumberOfElements(NumberOfElements() + 1); } // ElementRemoved should be called whenever an element is removed from // a hash table. void ElementRemoved() { SetNumberOfElements(NumberOfElements() - 1); SetNumberOfDeletedElements(NumberOfDeletedElements() + 1); } void ElementsRemoved(int n) { SetNumberOfElements(NumberOfElements() - n); SetNumberOfDeletedElements(NumberOfDeletedElements() + n); } // Returns a new HashTable object. MUST_USE_RESULT static Handle New( Isolate* isolate, int at_least_space_for, MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY, PretenureFlag pretenure = NOT_TENURED); // Computes the required capacity for a table holding the given // number of elements. May be more than HashTable::kMaxCapacity. static int ComputeCapacity(int at_least_space_for); // Returns the key at entry. Object* KeyAt(int entry) { return get(EntryToIndex(entry)); } // Tells whether k is a real key. The hole and undefined are not allowed // as keys and can be used to indicate missing or deleted elements. bool IsKey(Object* k) { return !k->IsTheHole() && !k->IsUndefined(); } // Garbage collection support. void IteratePrefix(ObjectVisitor* visitor); void IterateElements(ObjectVisitor* visitor); DECLARE_CAST(HashTable) // Compute the probe offset (quadratic probing). INLINE(static uint32_t GetProbeOffset(uint32_t n)) { return (n + n * n) >> 1; } static const int kNumberOfElementsIndex = 0; static const int kNumberOfDeletedElementsIndex = 1; static const int kCapacityIndex = 2; static const int kPrefixStartIndex = 3; static const int kElementsStartIndex = kPrefixStartIndex + Shape::kPrefixSize; static const int kEntrySize = Shape::kEntrySize; static const int kElementsStartOffset = kHeaderSize + kElementsStartIndex * kPointerSize; static const int kCapacityOffset = kHeaderSize + kCapacityIndex * kPointerSize; // Constant used for denoting a absent entry. static const int kNotFound = -1; // Maximal capacity of HashTable. Based on maximal length of underlying // FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex // cannot overflow. static const int kMaxCapacity = (FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize; // Find entry for key otherwise return kNotFound. inline int FindEntry(Key key); int FindEntry(Isolate* isolate, Key key); // Rehashes the table in-place. void Rehash(Key key); protected: friend class ObjectHashTable; // Find the entry at which to insert element with the given key that // has the given hash value. uint32_t FindInsertionEntry(uint32_t hash); // Returns the index for an entry (of the key) static inline int EntryToIndex(int entry) { return (entry * kEntrySize) + kElementsStartIndex; } // Update the number of elements in the hash table. void SetNumberOfElements(int nof) { set(kNumberOfElementsIndex, Smi::FromInt(nof)); } // Update the number of deleted elements in the hash table. void SetNumberOfDeletedElements(int nod) { set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod)); } // Sets the capacity of the hash table. void SetCapacity(int capacity) { // To scale a computed hash code to fit within the hash table, we // use bit-wise AND with a mask, so the capacity must be positive // and non-zero. DCHECK(capacity > 0); DCHECK(capacity <= kMaxCapacity); set(kCapacityIndex, Smi::FromInt(capacity)); } // Returns probe entry. static uint32_t GetProbe(uint32_t hash, uint32_t number, uint32_t size) { DCHECK(base::bits::IsPowerOfTwo32(size)); return (hash + GetProbeOffset(number)) & (size - 1); } inline static uint32_t FirstProbe(uint32_t hash, uint32_t size) { return hash & (size - 1); } inline static uint32_t NextProbe( uint32_t last, uint32_t number, uint32_t size) { return (last + number) & (size - 1); } // Attempt to shrink hash table after removal of key. MUST_USE_RESULT static Handle Shrink(Handle table, Key key); // Ensure enough space for n additional elements. MUST_USE_RESULT static Handle EnsureCapacity( Handle table, int n, Key key, PretenureFlag pretenure = NOT_TENURED); private: // Returns _expected_ if one of entries given by the first _probe_ probes is // equal to _expected_. Otherwise, returns the entry given by the probe // number _probe_. uint32_t EntryForProbe(Key key, Object* k, int probe, uint32_t expected); void Swap(uint32_t entry1, uint32_t entry2, WriteBarrierMode mode); // Rehashes this hash-table into the new table. void Rehash(Handle new_table, Key key); }; // HashTableKey is an abstract superclass for virtual key behavior. class HashTableKey { public: // Returns whether the other object matches this key. virtual bool IsMatch(Object* other) = 0; // Returns the hash value for this key. virtual uint32_t Hash() = 0; // Returns the hash value for object. virtual uint32_t HashForObject(Object* key) = 0; // Returns the key object for storing into the hash table. MUST_USE_RESULT virtual Handle AsHandle(Isolate* isolate) = 0; // Required. virtual ~HashTableKey() {} }; class StringTableShape : public BaseShape { public: static inline bool IsMatch(HashTableKey* key, Object* value) { return key->IsMatch(value); } static inline uint32_t Hash(HashTableKey* key) { return key->Hash(); } static inline uint32_t HashForObject(HashTableKey* key, Object* object) { return key->HashForObject(object); } static inline Handle AsHandle(Isolate* isolate, HashTableKey* key); static const int kPrefixSize = 0; static const int kEntrySize = 1; }; class SeqOneByteString; // StringTable. // // No special elements in the prefix and the element size is 1 // because only the string itself (the key) needs to be stored. class StringTable: public HashTable { public: // Find string in the string table. If it is not there yet, it is // added. The return value is the string found. static Handle LookupString(Isolate* isolate, Handle key); static Handle LookupKey(Isolate* isolate, HashTableKey* key); // Tries to internalize given string and returns string handle on success // or an empty handle otherwise. MUST_USE_RESULT static MaybeHandle InternalizeStringIfExists( Isolate* isolate, Handle string); // Looks up a string that is equal to the given string and returns // string handle if it is found, or an empty handle otherwise. MUST_USE_RESULT static MaybeHandle LookupStringIfExists( Isolate* isolate, Handle str); MUST_USE_RESULT static MaybeHandle LookupTwoCharsStringIfExists( Isolate* isolate, uint16_t c1, uint16_t c2); DECLARE_CAST(StringTable) private: template friend class JsonParser; DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable); }; class MapCacheShape : public BaseShape { public: static inline bool IsMatch(HashTableKey* key, Object* value) { return key->IsMatch(value); } static inline uint32_t Hash(HashTableKey* key) { return key->Hash(); } static inline uint32_t HashForObject(HashTableKey* key, Object* object) { return key->HashForObject(object); } static inline Handle AsHandle(Isolate* isolate, HashTableKey* key); static const int kPrefixSize = 0; static const int kEntrySize = 2; }; // MapCache. // // Maps keys that are a fixed array of unique names to a map. // Used for canonicalize maps for object literals. class MapCache: public HashTable { public: // Find cached value for a name key, otherwise return null. Object* Lookup(FixedArray* key); static Handle Put( Handle map_cache, Handle key, Handle value); DECLARE_CAST(MapCache) private: DISALLOW_IMPLICIT_CONSTRUCTORS(MapCache); }; template class Dictionary: public HashTable { protected: typedef HashTable DerivedHashTable; public: // Returns the value at entry. Object* ValueAt(int entry) { return this->get(DerivedHashTable::EntryToIndex(entry) + 1); } // Set the value for entry. void ValueAtPut(int entry, Object* value) { this->set(DerivedHashTable::EntryToIndex(entry) + 1, value); } // Returns the property details for the property at entry. PropertyDetails DetailsAt(int entry) { DCHECK(entry >= 0); // Not found is -1, which is not caught by get(). return PropertyDetails( Smi::cast(this->get(DerivedHashTable::EntryToIndex(entry) + 2))); } // Set the details for entry. void DetailsAtPut(int entry, PropertyDetails value) { this->set(DerivedHashTable::EntryToIndex(entry) + 2, value.AsSmi()); } // Sorting support void CopyValuesTo(FixedArray* elements); // Delete a property from the dictionary. static Handle DeleteProperty( Handle dictionary, int entry, JSObject::DeleteMode mode); // Attempt to shrink the dictionary after deletion of key. MUST_USE_RESULT static inline Handle Shrink( Handle dictionary, Key key) { return DerivedHashTable::Shrink(dictionary, key); } // Returns the number of elements in the dictionary filtering out properties // with the specified attributes. int NumberOfElementsFilterAttributes(PropertyAttributes filter); // Returns the number of enumerable elements in the dictionary. int NumberOfEnumElements(); enum SortMode { UNSORTED, SORTED }; // Copies keys to preallocated fixed array. void CopyKeysTo(FixedArray* storage, PropertyAttributes filter, SortMode sort_mode); // Fill in details for properties into storage. void CopyKeysTo(FixedArray* storage, int index, PropertyAttributes filter, SortMode sort_mode); // Accessors for next enumeration index. void SetNextEnumerationIndex(int index) { DCHECK(index != 0); this->set(kNextEnumerationIndexIndex, Smi::FromInt(index)); } int NextEnumerationIndex() { return Smi::cast(this->get(kNextEnumerationIndexIndex))->value(); } // Creates a new dictionary. MUST_USE_RESULT static Handle New( Isolate* isolate, int at_least_space_for, PretenureFlag pretenure = NOT_TENURED); // Ensure enough space for n additional elements. static Handle EnsureCapacity(Handle obj, int n, Key key); #ifdef OBJECT_PRINT void Print(OStream& os); // NOLINT #endif // Returns the key (slow). Object* SlowReverseLookup(Object* value); // Sets the entry to (key, value) pair. inline void SetEntry(int entry, Handle key, Handle value); inline void SetEntry(int entry, Handle key, Handle value, PropertyDetails details); MUST_USE_RESULT static Handle Add( Handle dictionary, Key key, Handle value, PropertyDetails details); protected: // Generic at put operation. MUST_USE_RESULT static Handle AtPut( Handle dictionary, Key key, Handle value); // Add entry to dictionary. static void AddEntry( Handle dictionary, Key key, Handle value, PropertyDetails details, uint32_t hash); // Generate new enumeration indices to avoid enumeration index overflow. static void GenerateNewEnumerationIndices(Handle dictionary); static const int kMaxNumberKeyIndex = DerivedHashTable::kPrefixStartIndex; static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1; }; class NameDictionaryShape : public BaseShape > { public: static inline bool IsMatch(Handle key, Object* other); static inline uint32_t Hash(Handle key); static inline uint32_t HashForObject(Handle key, Object* object); static inline Handle AsHandle(Isolate* isolate, Handle key); static const int kPrefixSize = 2; static const int kEntrySize = 3; static const bool kIsEnumerable = true; }; class NameDictionary: public Dictionary > { typedef Dictionary< NameDictionary, NameDictionaryShape, Handle > DerivedDictionary; public: DECLARE_CAST(NameDictionary) // Copies enumerable keys to preallocated fixed array. void CopyEnumKeysTo(FixedArray* storage); inline static void DoGenerateNewEnumerationIndices( Handle dictionary); // Find entry for key, otherwise return kNotFound. Optimized version of // HashTable::FindEntry. int FindEntry(Handle key); }; class NumberDictionaryShape : public BaseShape { public: static inline bool IsMatch(uint32_t key, Object* other); static inline Handle AsHandle(Isolate* isolate, uint32_t key); static const int kEntrySize = 3; static const bool kIsEnumerable = false; }; class SeededNumberDictionaryShape : public NumberDictionaryShape { public: static const bool UsesSeed = true; static const int kPrefixSize = 2; static inline uint32_t SeededHash(uint32_t key, uint32_t seed); static inline uint32_t SeededHashForObject(uint32_t key, uint32_t seed, Object* object); }; class UnseededNumberDictionaryShape : public NumberDictionaryShape { public: static const int kPrefixSize = 0; static inline uint32_t Hash(uint32_t key); static inline uint32_t HashForObject(uint32_t key, Object* object); }; class SeededNumberDictionary : public Dictionary { public: DECLARE_CAST(SeededNumberDictionary) // Type specific at put (default NONE attributes is used when adding). MUST_USE_RESULT static Handle AtNumberPut( Handle dictionary, uint32_t key, Handle value); MUST_USE_RESULT static Handle AddNumberEntry( Handle dictionary, uint32_t key, Handle value, PropertyDetails details); // Set an existing entry or add a new one if needed. // Return the updated dictionary. MUST_USE_RESULT static Handle Set( Handle dictionary, uint32_t key, Handle value, PropertyDetails details); void UpdateMaxNumberKey(uint32_t key); // If slow elements are required we will never go back to fast-case // for the elements kept in this dictionary. We require slow // elements if an element has been added at an index larger than // kRequiresSlowElementsLimit or set_requires_slow_elements() has been called // when defining a getter or setter with a number key. inline bool requires_slow_elements(); inline void set_requires_slow_elements(); // Get the value of the max number key that has been added to this // dictionary. max_number_key can only be called if // requires_slow_elements returns false. inline uint32_t max_number_key(); // Bit masks. static const int kRequiresSlowElementsMask = 1; static const int kRequiresSlowElementsTagSize = 1; static const uint32_t kRequiresSlowElementsLimit = (1 << 29) - 1; }; class UnseededNumberDictionary : public Dictionary { public: DECLARE_CAST(UnseededNumberDictionary) // Type specific at put (default NONE attributes is used when adding). MUST_USE_RESULT static Handle AtNumberPut( Handle dictionary, uint32_t key, Handle value); MUST_USE_RESULT static Handle AddNumberEntry( Handle dictionary, uint32_t key, Handle value); // Set an existing entry or add a new one if needed. // Return the updated dictionary. MUST_USE_RESULT static Handle Set( Handle dictionary, uint32_t key, Handle value); }; class ObjectHashTableShape : public BaseShape > { public: static inline bool IsMatch(Handle key, Object* other); static inline uint32_t Hash(Handle key); static inline uint32_t HashForObject(Handle key, Object* object); static inline Handle AsHandle(Isolate* isolate, Handle key); static const int kPrefixSize = 0; static const int kEntrySize = 2; }; // ObjectHashTable maps keys that are arbitrary objects to object values by // using the identity hash of the key for hashing purposes. class ObjectHashTable: public HashTable > { typedef HashTable< ObjectHashTable, ObjectHashTableShape, Handle > DerivedHashTable; public: DECLARE_CAST(ObjectHashTable) // Attempt to shrink hash table after removal of key. MUST_USE_RESULT static inline Handle Shrink( Handle table, Handle key); // Looks up the value associated with the given key. The hole value is // returned in case the key is not present. Object* Lookup(Handle key); // Adds (or overwrites) the value associated with the given key. static Handle Put(Handle table, Handle key, Handle value); // Returns an ObjectHashTable (possibly |table|) where |key| has been removed. static Handle Remove(Handle table, Handle key, bool* was_present); private: friend class MarkCompactCollector; void AddEntry(int entry, Object* key, Object* value); void RemoveEntry(int entry); // Returns the index to the value of an entry. static inline int EntryToValueIndex(int entry) { return EntryToIndex(entry) + 1; } }; // OrderedHashTable is a HashTable with Object keys that preserves // insertion order. There are Map and Set interfaces (OrderedHashMap // and OrderedHashTable, below). It is meant to be used by JSMap/JSSet. // // Only Object* keys are supported, with Object::SameValueZero() used as the // equality operator and Object::GetHash() for the hash function. // // Based on the "Deterministic Hash Table" as described by Jason Orendorff at // https://wiki.mozilla.org/User:Jorend/Deterministic_hash_tables // Originally attributed to Tyler Close. // // Memory layout: // [0]: bucket count // [1]: element count // [2]: deleted element count // [3..(3 + NumberOfBuckets() - 1)]: "hash table", where each item is an // offset into the data table (see below) where the // first item in this bucket is stored. // [3 + NumberOfBuckets()..length]: "data table", an array of length // Capacity() * kEntrySize, where the first entrysize // items are handled by the derived class and the // item at kChainOffset is another entry into the // data table indicating the next entry in this hash // bucket. // // When we transition the table to a new version we obsolete it and reuse parts // of the memory to store information how to transition an iterator to the new // table: // // Memory layout for obsolete table: // [0]: bucket count // [1]: Next newer table // [2]: Number of removed holes or -1 when the table was cleared. // [3..(3 + NumberOfRemovedHoles() - 1)]: The indexes of the removed holes. // [3 + NumberOfRemovedHoles()..length]: Not used // template class OrderedHashTable: public FixedArray { public: // Returns an OrderedHashTable with a capacity of at least |capacity|. static Handle Allocate( Isolate* isolate, int capacity, PretenureFlag pretenure = NOT_TENURED); // Returns an OrderedHashTable (possibly |table|) with enough space // to add at least one new element. static Handle EnsureGrowable(Handle table); // Returns an OrderedHashTable (possibly |table|) that's shrunken // if possible. static Handle Shrink(Handle table); // Returns a new empty OrderedHashTable and records the clearing so that // exisiting iterators can be updated. static Handle Clear(Handle table); // Returns an OrderedHashTable (possibly |table|) where |key| has been // removed. static Handle Remove(Handle table, Handle key, bool* was_present); // Returns kNotFound if the key isn't present. int FindEntry(Handle key, int hash); // Like the above, but doesn't require the caller to provide a hash. int FindEntry(Handle key); int NumberOfElements() { return Smi::cast(get(kNumberOfElementsIndex))->value(); } int NumberOfDeletedElements() { return Smi::cast(get(kNumberOfDeletedElementsIndex))->value(); } int UsedCapacity() { return NumberOfElements() + NumberOfDeletedElements(); } int NumberOfBuckets() { return Smi::cast(get(kNumberOfBucketsIndex))->value(); } // Returns the index into the data table where the new entry // should be placed. The table is assumed to have enough space // for a new entry. int AddEntry(int hash); // Removes the entry, and puts the_hole in entrysize pointers // (leaving the hash table chain intact). void RemoveEntry(int entry); // Returns an index into |this| for the given entry. int EntryToIndex(int entry) { return kHashTableStartIndex + NumberOfBuckets() + (entry * kEntrySize); } Object* KeyAt(int entry) { return get(EntryToIndex(entry)); } bool IsObsolete() { return !get(kNextTableIndex)->IsSmi(); } // The next newer table. This is only valid if the table is obsolete. Derived* NextTable() { return Derived::cast(get(kNextTableIndex)); } // When the table is obsolete we store the indexes of the removed holes. int RemovedIndexAt(int index) { return Smi::cast(get(kRemovedHolesIndex + index))->value(); } static const int kNotFound = -1; static const int kMinCapacity = 4; private: static Handle Rehash(Handle table, int new_capacity); void SetNumberOfBuckets(int num) { set(kNumberOfBucketsIndex, Smi::FromInt(num)); } void SetNumberOfElements(int num) { set(kNumberOfElementsIndex, Smi::FromInt(num)); } void SetNumberOfDeletedElements(int num) { set(kNumberOfDeletedElementsIndex, Smi::FromInt(num)); } int Capacity() { return NumberOfBuckets() * kLoadFactor; } // Returns the next entry for the given entry. int ChainAt(int entry) { return Smi::cast(get(EntryToIndex(entry) + kChainOffset))->value(); } int HashToBucket(int hash) { return hash & (NumberOfBuckets() - 1); } int HashToEntry(int hash) { int bucket = HashToBucket(hash); return Smi::cast(get(kHashTableStartIndex + bucket))->value(); } void SetNextTable(Derived* next_table) { set(kNextTableIndex, next_table); } void SetRemovedIndexAt(int index, int removed_index) { return set(kRemovedHolesIndex + index, Smi::FromInt(removed_index)); } static const int kNumberOfBucketsIndex = 0; static const int kNumberOfElementsIndex = kNumberOfBucketsIndex + 1; static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1; static const int kHashTableStartIndex = kNumberOfDeletedElementsIndex + 1; static const int kNextTableIndex = kNumberOfElementsIndex; static const int kRemovedHolesIndex = kHashTableStartIndex; static const int kEntrySize = entrysize + 1; static const int kChainOffset = entrysize; static const int kLoadFactor = 2; static const int kMaxCapacity = (FixedArray::kMaxLength - kHashTableStartIndex) / (1 + (kEntrySize * kLoadFactor)); }; class JSSetIterator; class OrderedHashSet: public OrderedHashTable< OrderedHashSet, JSSetIterator, 1> { public: DECLARE_CAST(OrderedHashSet) bool Contains(Handle key); static Handle Add( Handle table, Handle key); }; class JSMapIterator; class OrderedHashMap:public OrderedHashTable< OrderedHashMap, JSMapIterator, 2> { public: DECLARE_CAST(OrderedHashMap) Object* Lookup(Handle key); static Handle Put( Handle table, Handle key, Handle value); Object* ValueAt(int entry) { return get(EntryToIndex(entry) + kValueOffset); } private: static const int kValueOffset = 1; }; template class WeakHashTableShape : public BaseShape > { public: static inline bool IsMatch(Handle key, Object* other); static inline uint32_t Hash(Handle key); static inline uint32_t HashForObject(Handle key, Object* object); static inline Handle AsHandle(Isolate* isolate, Handle key); static const int kPrefixSize = 0; static const int kEntrySize = entrysize; }; // WeakHashTable maps keys that are arbitrary objects to object values. // It is used for the global weak hash table that maps objects // embedded in optimized code to dependent code lists. class WeakHashTable: public HashTable, Handle > { typedef HashTable< WeakHashTable, WeakHashTableShape<2>, Handle > DerivedHashTable; public: DECLARE_CAST(WeakHashTable) // Looks up the value associated with the given key. The hole value is // returned in case the key is not present. Object* Lookup(Handle key); // Adds (or overwrites) the value associated with the given key. Mapping a // key to the hole value causes removal of the whole entry. MUST_USE_RESULT static Handle Put(Handle table, Handle key, Handle value); // This function is called when heap verification is turned on. void Zap(Object* value) { int capacity = Capacity(); for (int i = 0; i < capacity; i++) { set(EntryToIndex(i), value); set(EntryToValueIndex(i), value); } } private: friend class MarkCompactCollector; void AddEntry(int entry, Handle key, Handle value); // Returns the index to the value of an entry. static inline int EntryToValueIndex(int entry) { return EntryToIndex(entry) + 1; } }; // JSFunctionResultCache caches results of some JSFunction invocation. // It is a fixed array with fixed structure: // [0]: factory function // [1]: finger index // [2]: current cache size // [3]: dummy field. // The rest of array are key/value pairs. class JSFunctionResultCache: public FixedArray { public: static const int kFactoryIndex = 0; static const int kFingerIndex = kFactoryIndex + 1; static const int kCacheSizeIndex = kFingerIndex + 1; static const int kDummyIndex = kCacheSizeIndex + 1; static const int kEntriesIndex = kDummyIndex + 1; static const int kEntrySize = 2; // key + value static const int kFactoryOffset = kHeaderSize; static const int kFingerOffset = kFactoryOffset + kPointerSize; static const int kCacheSizeOffset = kFingerOffset + kPointerSize; inline void MakeZeroSize(); inline void Clear(); inline int size(); inline void set_size(int size); inline int finger_index(); inline void set_finger_index(int finger_index); DECLARE_CAST(JSFunctionResultCache) DECLARE_VERIFIER(JSFunctionResultCache) }; // ScopeInfo represents information about different scopes of a source // program and the allocation of the scope's variables. Scope information // is stored in a compressed form in ScopeInfo objects and is used // at runtime (stack dumps, deoptimization, etc.). // This object provides quick access to scope info details for runtime // routines. class ScopeInfo : public FixedArray { public: DECLARE_CAST(ScopeInfo) // Return the type of this scope. ScopeType scope_type(); // Does this scope call eval? bool CallsEval(); // Return the strict mode of this scope. StrictMode strict_mode(); // Does this scope make a sloppy eval call? bool CallsSloppyEval() { return CallsEval() && strict_mode() == SLOPPY; } // Return the total number of locals allocated on the stack and in the // context. This includes the parameters that are allocated in the context. int LocalCount(); // Return the number of stack slots for code. This number consists of two // parts: // 1. One stack slot per stack allocated local. // 2. One stack slot for the function name if it is stack allocated. int StackSlotCount(); // Return the number of context slots for code if a context is allocated. This // number consists of three parts: // 1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS // 2. One context slot per context allocated local. // 3. One context slot for the function name if it is context allocated. // Parameters allocated in the context count as context allocated locals. If // no contexts are allocated for this scope ContextLength returns 0. int ContextLength(); // Is this scope the scope of a named function expression? bool HasFunctionName(); // Return if this has context allocated locals. bool HasHeapAllocatedLocals(); // Return if contexts are allocated for this scope. bool HasContext(); // Return if this is a function scope with "use asm". bool IsAsmModule() { return AsmModuleField::decode(Flags()); } // Return if this is a nested function within an asm module scope. bool IsAsmFunction() { return AsmFunctionField::decode(Flags()); } // Return the function_name if present. String* FunctionName(); // Return the name of the given parameter. String* ParameterName(int var); // Return the name of the given local. String* LocalName(int var); // Return the name of the given stack local. String* StackLocalName(int var); // Return the name of the given context local. String* ContextLocalName(int var); // Return the mode of the given context local. VariableMode ContextLocalMode(int var); // Return the initialization flag of the given context local. InitializationFlag ContextLocalInitFlag(int var); // Return the initialization flag of the given context local. MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var); // Return true if this local was introduced by the compiler, and should not be // exposed to the user in a debugger. bool LocalIsSynthetic(int var); // Lookup support for serialized scope info. Returns the // the stack slot index for a given slot name if the slot is // present; otherwise returns a value < 0. The name must be an internalized // string. int StackSlotIndex(String* name); // Lookup support for serialized scope info. Returns the // context slot index for a given slot name if the slot is present; otherwise // returns a value < 0. The name must be an internalized string. // If the slot is present and mode != NULL, sets *mode to the corresponding // mode for that variable. static int ContextSlotIndex(Handle scope_info, Handle name, VariableMode* mode, InitializationFlag* init_flag, MaybeAssignedFlag* maybe_assigned_flag); // Lookup support for serialized scope info. Returns the // parameter index for a given parameter name if the parameter is present; // otherwise returns a value < 0. The name must be an internalized string. int ParameterIndex(String* name); // Lookup support for serialized scope info. Returns the function context // slot index if the function name is present and context-allocated (named // function expressions, only), otherwise returns a value < 0. The name // must be an internalized string. int FunctionContextSlotIndex(String* name, VariableMode* mode); // Copies all the context locals into an object used to materialize a scope. static bool CopyContextLocalsToScopeObject(Handle scope_info, Handle context, Handle scope_object); static Handle Create(Scope* scope, Zone* zone); // Serializes empty scope info. static ScopeInfo* Empty(Isolate* isolate); #ifdef DEBUG void Print(); #endif // The layout of the static part of a ScopeInfo is as follows. Each entry is // numeric and occupies one array slot. // 1. A set of properties of the scope // 2. The number of parameters. This only applies to function scopes. For // non-function scopes this is 0. // 3. The number of non-parameter variables allocated on the stack. // 4. The number of non-parameter and parameter variables allocated in the // context. #define FOR_EACH_NUMERIC_FIELD(V) \ V(Flags) \ V(ParameterCount) \ V(StackLocalCount) \ V(ContextLocalCount) #define FIELD_ACCESSORS(name) \ void Set##name(int value) { \ set(k##name, Smi::FromInt(value)); \ } \ int name() { \ if (length() > 0) { \ return Smi::cast(get(k##name))->value(); \ } else { \ return 0; \ } \ } FOR_EACH_NUMERIC_FIELD(FIELD_ACCESSORS) #undef FIELD_ACCESSORS private: enum { #define DECL_INDEX(name) k##name, FOR_EACH_NUMERIC_FIELD(DECL_INDEX) #undef DECL_INDEX #undef FOR_EACH_NUMERIC_FIELD kVariablePartIndex }; // The layout of the variable part of a ScopeInfo is as follows: // 1. ParameterEntries: // This part stores the names of the parameters for function scopes. One // slot is used per parameter, so in total this part occupies // ParameterCount() slots in the array. For other scopes than function // scopes ParameterCount() is 0. // 2. StackLocalEntries: // Contains the names of local variables that are allocated on the stack, // in increasing order of the stack slot index. One slot is used per stack // local, so in total this part occupies StackLocalCount() slots in the // array. // 3. ContextLocalNameEntries: // Contains the names of local variables and parameters that are allocated // in the context. They are stored in increasing order of the context slot // index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per // context local, so in total this part occupies ContextLocalCount() slots // in the array. // 4. ContextLocalInfoEntries: // Contains the variable modes and initialization flags corresponding to // the context locals in ContextLocalNameEntries. One slot is used per // context local, so in total this part occupies ContextLocalCount() // slots in the array. // 5. FunctionNameEntryIndex: // If the scope belongs to a named function expression this part contains // information about the function variable. It always occupies two array // slots: a. The name of the function variable. // b. The context or stack slot index for the variable. int ParameterEntriesIndex(); int StackLocalEntriesIndex(); int ContextLocalNameEntriesIndex(); int ContextLocalInfoEntriesIndex(); int FunctionNameEntryIndex(); // Location of the function variable for named function expressions. enum FunctionVariableInfo { NONE, // No function name present. STACK, // Function CONTEXT, UNUSED }; // Properties of scopes. class ScopeTypeField: public BitField {}; class CallsEvalField: public BitField {}; class StrictModeField: public BitField {}; class FunctionVariableField: public BitField {}; class FunctionVariableMode: public BitField {}; class AsmModuleField : public BitField {}; class AsmFunctionField : public BitField {}; // BitFields representing the encoded information for context locals in the // ContextLocalInfoEntries part. class ContextLocalMode: public BitField {}; class ContextLocalInitFlag: public BitField {}; class ContextLocalMaybeAssignedFlag : public BitField {}; }; // The cache for maps used by normalized (dictionary mode) objects. // Such maps do not have property descriptors, so a typical program // needs very limited number of distinct normalized maps. class NormalizedMapCache: public FixedArray { public: static Handle New(Isolate* isolate); MUST_USE_RESULT MaybeHandle Get(Handle fast_map, PropertyNormalizationMode mode); void Set(Handle fast_map, Handle normalized_map); void Clear(); DECLARE_CAST(NormalizedMapCache) static inline bool IsNormalizedMapCache(const Object* obj); DECLARE_VERIFIER(NormalizedMapCache) private: static const int kEntries = 64; static inline int GetIndex(Handle map); // The following declarations hide base class methods. Object* get(int index); void set(int index, Object* value); }; // ByteArray represents fixed sized byte arrays. Used for the relocation info // that is attached to code objects. class ByteArray: public FixedArrayBase { public: inline int Size() { return RoundUp(length() + kHeaderSize, kPointerSize); } // Setter and getter. inline byte get(int index); inline void set(int index, byte value); // Treat contents as an int array. inline int get_int(int index); static int SizeFor(int length) { return OBJECT_POINTER_ALIGN(kHeaderSize + length); } // We use byte arrays for free blocks in the heap. Given a desired size in // bytes that is a multiple of the word size and big enough to hold a byte // array, this function returns the number of elements a byte array should // have. static int LengthFor(int size_in_bytes) { DCHECK(IsAligned(size_in_bytes, kPointerSize)); DCHECK(size_in_bytes >= kHeaderSize); return size_in_bytes - kHeaderSize; } // Returns data start address. inline Address GetDataStartAddress(); // Returns a pointer to the ByteArray object for a given data start address. static inline ByteArray* FromDataStartAddress(Address address); DECLARE_CAST(ByteArray) // Dispatched behavior. inline int ByteArraySize() { return SizeFor(this->length()); } DECLARE_PRINTER(ByteArray) DECLARE_VERIFIER(ByteArray) // Layout description. static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize); // Maximal memory consumption for a single ByteArray. static const int kMaxSize = 512 * MB; // Maximal length of a single ByteArray. static const int kMaxLength = kMaxSize - kHeaderSize; private: DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray); }; // FreeSpace represents fixed sized areas of the heap that are not currently in // use. Used by the heap and GC. class FreeSpace: public HeapObject { public: // [size]: size of the free space including the header. inline int size() const; inline void set_size(int value); inline int nobarrier_size() const; inline void nobarrier_set_size(int value); inline int Size() { return size(); } DECLARE_CAST(FreeSpace) // Dispatched behavior. DECLARE_PRINTER(FreeSpace) DECLARE_VERIFIER(FreeSpace) // Layout description. // Size is smi tagged when it is stored. static const int kSizeOffset = HeapObject::kHeaderSize; static const int kHeaderSize = kSizeOffset + kPointerSize; static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize); private: DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace); }; // V has parameters (Type, type, TYPE, C type, element_size) #define TYPED_ARRAYS(V) \ V(Uint8, uint8, UINT8, uint8_t, 1) \ V(Int8, int8, INT8, int8_t, 1) \ V(Uint16, uint16, UINT16, uint16_t, 2) \ V(Int16, int16, INT16, int16_t, 2) \ V(Uint32, uint32, UINT32, uint32_t, 4) \ V(Int32, int32, INT32, int32_t, 4) \ V(Float32, float32, FLOAT32, float, 4) \ V(Float64, float64, FLOAT64, double, 8) \ V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1) // An ExternalArray represents a fixed-size array of primitive values // which live outside the JavaScript heap. Its subclasses are used to // implement the CanvasArray types being defined in the WebGL // specification. As of this writing the first public draft is not yet // available, but Khronos members can access the draft at: // https://cvs.khronos.org/svn/repos/3dweb/trunk/doc/spec/WebGL-spec.html // // The semantics of these arrays differ from CanvasPixelArray. // Out-of-range values passed to the setter are converted via a C // cast, not clamping. Out-of-range indices cause exceptions to be // raised rather than being silently ignored. class ExternalArray: public FixedArrayBase { public: inline bool is_the_hole(int index) { return false; } // [external_pointer]: The pointer to the external memory area backing this // external array. DECL_ACCESSORS(external_pointer, void) // Pointer to the data store. DECLARE_CAST(ExternalArray) // Maximal acceptable length for an external array. static const int kMaxLength = 0x3fffffff; // ExternalArray headers are not quadword aligned. static const int kExternalPointerOffset = POINTER_SIZE_ALIGN(FixedArrayBase::kLengthOffset + kPointerSize); static const int kHeaderSize = kExternalPointerOffset + kPointerSize; static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize); private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray); }; // A ExternalUint8ClampedArray represents a fixed-size byte array with special // semantics used for implementing the CanvasPixelArray object. Please see the // specification at: // http://www.whatwg.org/specs/web-apps/current-work/ // multipage/the-canvas-element.html#canvaspixelarray // In particular, write access clamps the value written to 0 or 255 if the // value written is outside this range. class ExternalUint8ClampedArray: public ExternalArray { public: inline uint8_t* external_uint8_clamped_pointer(); // Setter and getter. inline uint8_t get_scalar(int index); static inline Handle get(Handle array, int index); inline void set(int index, uint8_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined and clamps the converted value between 0 and 255. static Handle SetValue(Handle array, uint32_t index, Handle value); DECLARE_CAST(ExternalUint8ClampedArray) // Dispatched behavior. DECLARE_PRINTER(ExternalUint8ClampedArray) DECLARE_VERIFIER(ExternalUint8ClampedArray) private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint8ClampedArray); }; class ExternalInt8Array: public ExternalArray { public: // Setter and getter. inline int8_t get_scalar(int index); static inline Handle get(Handle array, int index); inline void set(int index, int8_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. static Handle SetValue(Handle array, uint32_t index, Handle value); DECLARE_CAST(ExternalInt8Array) // Dispatched behavior. DECLARE_PRINTER(ExternalInt8Array) DECLARE_VERIFIER(ExternalInt8Array) private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt8Array); }; class ExternalUint8Array: public ExternalArray { public: // Setter and getter. inline uint8_t get_scalar(int index); static inline Handle get(Handle array, int index); inline void set(int index, uint8_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. static Handle SetValue(Handle array, uint32_t index, Handle value); DECLARE_CAST(ExternalUint8Array) // Dispatched behavior. DECLARE_PRINTER(ExternalUint8Array) DECLARE_VERIFIER(ExternalUint8Array) private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint8Array); }; class ExternalInt16Array: public ExternalArray { public: // Setter and getter. inline int16_t get_scalar(int index); static inline Handle get(Handle array, int index); inline void set(int index, int16_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. static Handle SetValue(Handle array, uint32_t index, Handle value); DECLARE_CAST(ExternalInt16Array) // Dispatched behavior. DECLARE_PRINTER(ExternalInt16Array) DECLARE_VERIFIER(ExternalInt16Array) private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt16Array); }; class ExternalUint16Array: public ExternalArray { public: // Setter and getter. inline uint16_t get_scalar(int index); static inline Handle get(Handle array, int index); inline void set(int index, uint16_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. static Handle SetValue(Handle array, uint32_t index, Handle value); DECLARE_CAST(ExternalUint16Array) // Dispatched behavior. DECLARE_PRINTER(ExternalUint16Array) DECLARE_VERIFIER(ExternalUint16Array) private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint16Array); }; class ExternalInt32Array: public ExternalArray { public: // Setter and getter. inline int32_t get_scalar(int index); static inline Handle get(Handle array, int index); inline void set(int index, int32_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. static Handle SetValue(Handle array, uint32_t index, Handle value); DECLARE_CAST(ExternalInt32Array) // Dispatched behavior. DECLARE_PRINTER(ExternalInt32Array) DECLARE_VERIFIER(ExternalInt32Array) private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt32Array); }; class ExternalUint32Array: public ExternalArray { public: // Setter and getter. inline uint32_t get_scalar(int index); static inline Handle get(Handle array, int index); inline void set(int index, uint32_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. static Handle SetValue(Handle array, uint32_t index, Handle value); DECLARE_CAST(ExternalUint32Array) // Dispatched behavior. DECLARE_PRINTER(ExternalUint32Array) DECLARE_VERIFIER(ExternalUint32Array) private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint32Array); }; class ExternalFloat32Array: public ExternalArray { public: // Setter and getter. inline float get_scalar(int index); static inline Handle get(Handle array, int index); inline void set(int index, float value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. static Handle SetValue(Handle array, uint32_t index, Handle value); DECLARE_CAST(ExternalFloat32Array) // Dispatched behavior. DECLARE_PRINTER(ExternalFloat32Array) DECLARE_VERIFIER(ExternalFloat32Array) private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat32Array); }; class ExternalFloat64Array: public ExternalArray { public: // Setter and getter. inline double get_scalar(int index); static inline Handle get(Handle array, int index); inline void set(int index, double value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. static Handle SetValue(Handle array, uint32_t index, Handle value); DECLARE_CAST(ExternalFloat64Array) // Dispatched behavior. DECLARE_PRINTER(ExternalFloat64Array) DECLARE_VERIFIER(ExternalFloat64Array) private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat64Array); }; class FixedTypedArrayBase: public FixedArrayBase { public: DECLARE_CAST(FixedTypedArrayBase) static const int kDataOffset = kHeaderSize; inline int size(); inline int TypedArraySize(InstanceType type); // Use with care: returns raw pointer into heap. inline void* DataPtr(); inline int DataSize(); private: inline int DataSize(InstanceType type); DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase); }; template class FixedTypedArray: public FixedTypedArrayBase { public: typedef typename Traits::ElementType ElementType; static const InstanceType kInstanceType = Traits::kInstanceType; DECLARE_CAST(FixedTypedArray) static inline int ElementOffset(int index) { return kDataOffset + index * sizeof(ElementType); } static inline int SizeFor(int length) { return ElementOffset(length); } inline ElementType get_scalar(int index); static inline Handle get(Handle array, int index); inline void set(int index, ElementType value); static inline ElementType from_int(int value); static inline ElementType from_double(double value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. static Handle SetValue(Handle > array, uint32_t index, Handle value); DECLARE_PRINTER(FixedTypedArray) DECLARE_VERIFIER(FixedTypedArray) private: DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray); }; #define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \ class Type##ArrayTraits { \ public: /* NOLINT */ \ typedef elementType ElementType; \ static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \ static const char* Designator() { return #type " array"; } \ static inline Handle ToHandle(Isolate* isolate, \ elementType scalar); \ static inline elementType defaultValue(); \ }; \ \ typedef FixedTypedArray Fixed##Type##Array; TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS) #undef FIXED_TYPED_ARRAY_TRAITS // DeoptimizationInputData is a fixed array used to hold the deoptimization // data for code generated by the Hydrogen/Lithium compiler. It also // contains information about functions that were inlined. If N different // functions were inlined then first N elements of the literal array will // contain these functions. // // It can be empty. class DeoptimizationInputData: public FixedArray { public: // Layout description. Indices in the array. static const int kTranslationByteArrayIndex = 0; static const int kInlinedFunctionCountIndex = 1; static const int kLiteralArrayIndex = 2; static const int kOsrAstIdIndex = 3; static const int kOsrPcOffsetIndex = 4; static const int kOptimizationIdIndex = 5; static const int kSharedFunctionInfoIndex = 6; static const int kFirstDeoptEntryIndex = 7; // Offsets of deopt entry elements relative to the start of the entry. static const int kAstIdRawOffset = 0; static const int kTranslationIndexOffset = 1; static const int kArgumentsStackHeightOffset = 2; static const int kPcOffset = 3; static const int kDeoptEntrySize = 4; // Simple element accessors. #define DEFINE_ELEMENT_ACCESSORS(name, type) \ type* name() { \ return type::cast(get(k##name##Index)); \ } \ void Set##name(type* value) { \ set(k##name##Index, value); \ } DEFINE_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray) DEFINE_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi) DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray) DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi) DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi) DEFINE_ELEMENT_ACCESSORS(OptimizationId, Smi) DEFINE_ELEMENT_ACCESSORS(SharedFunctionInfo, Object) #undef DEFINE_ELEMENT_ACCESSORS // Accessors for elements of the ith deoptimization entry. #define DEFINE_ENTRY_ACCESSORS(name, type) \ type* name(int i) { \ return type::cast(get(IndexForEntry(i) + k##name##Offset)); \ } \ void Set##name(int i, type* value) { \ set(IndexForEntry(i) + k##name##Offset, value); \ } DEFINE_ENTRY_ACCESSORS(AstIdRaw, Smi) DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi) DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi) DEFINE_ENTRY_ACCESSORS(Pc, Smi) #undef DEFINE_DEOPT_ENTRY_ACCESSORS BailoutId AstId(int i) { return BailoutId(AstIdRaw(i)->value()); } void SetAstId(int i, BailoutId value) { SetAstIdRaw(i, Smi::FromInt(value.ToInt())); } int DeoptCount() { return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize; } // Allocates a DeoptimizationInputData. static Handle New(Isolate* isolate, int deopt_entry_count, PretenureFlag pretenure); DECLARE_CAST(DeoptimizationInputData) #ifdef ENABLE_DISASSEMBLER void DeoptimizationInputDataPrint(OStream& os); // NOLINT #endif private: static int IndexForEntry(int i) { return kFirstDeoptEntryIndex + (i * kDeoptEntrySize); } static int LengthFor(int entry_count) { return IndexForEntry(entry_count); } }; // DeoptimizationOutputData is a fixed array used to hold the deoptimization // data for code generated by the full compiler. // The format of the these objects is // [i * 2]: Ast ID for ith deoptimization. // [i * 2 + 1]: PC and state of ith deoptimization class DeoptimizationOutputData: public FixedArray { public: int DeoptPoints() { return length() / 2; } BailoutId AstId(int index) { return BailoutId(Smi::cast(get(index * 2))->value()); } void SetAstId(int index, BailoutId id) { set(index * 2, Smi::FromInt(id.ToInt())); } Smi* PcAndState(int index) { return Smi::cast(get(1 + index * 2)); } void SetPcAndState(int index, Smi* offset) { set(1 + index * 2, offset); } static int LengthOfFixedArray(int deopt_points) { return deopt_points * 2; } // Allocates a DeoptimizationOutputData. static Handle New(Isolate* isolate, int number_of_deopt_points, PretenureFlag pretenure); DECLARE_CAST(DeoptimizationOutputData) #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER) void DeoptimizationOutputDataPrint(OStream& os); // NOLINT #endif }; // Forward declaration. class Cell; class PropertyCell; class SafepointEntry; class TypeFeedbackInfo; // Code describes objects with on-the-fly generated machine code. class Code: public HeapObject { public: // Opaque data type for encapsulating code flags like kind, inline // cache state, and arguments count. typedef uint32_t Flags; #define NON_IC_KIND_LIST(V) \ V(FUNCTION) \ V(OPTIMIZED_FUNCTION) \ V(STUB) \ V(HANDLER) \ V(BUILTIN) \ V(REGEXP) #define IC_KIND_LIST(V) \ V(LOAD_IC) \ V(KEYED_LOAD_IC) \ V(CALL_IC) \ V(STORE_IC) \ V(KEYED_STORE_IC) \ V(BINARY_OP_IC) \ V(COMPARE_IC) \ V(COMPARE_NIL_IC) \ V(TO_BOOLEAN_IC) #define CODE_KIND_LIST(V) \ NON_IC_KIND_LIST(V) \ IC_KIND_LIST(V) enum Kind { #define DEFINE_CODE_KIND_ENUM(name) name, CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM) #undef DEFINE_CODE_KIND_ENUM NUMBER_OF_KINDS }; // No more than 16 kinds. The value is currently encoded in four bits in // Flags. STATIC_ASSERT(NUMBER_OF_KINDS <= 16); static const char* Kind2String(Kind kind); // Types of stubs. enum StubType { NORMAL, FAST }; static const int kPrologueOffsetNotSet = -1; #ifdef ENABLE_DISASSEMBLER // Printing static const char* ICState2String(InlineCacheState state); static const char* StubType2String(StubType type); static void PrintExtraICState(OStream& os, // NOLINT Kind kind, ExtraICState extra); void Disassemble(const char* name, OStream& os); // NOLINT #endif // ENABLE_DISASSEMBLER // [instruction_size]: Size of the native instructions inline int instruction_size() const; inline void set_instruction_size(int value); // [relocation_info]: Code relocation information DECL_ACCESSORS(relocation_info, ByteArray) void InvalidateRelocation(); void InvalidateEmbeddedObjects(); // [handler_table]: Fixed array containing offsets of exception handlers. DECL_ACCESSORS(handler_table, FixedArray) // [deoptimization_data]: Array containing data for deopt. DECL_ACCESSORS(deoptimization_data, FixedArray) // [raw_type_feedback_info]: This field stores various things, depending on // the kind of the code object. // FUNCTION => type feedback information. // STUB and ICs => major/minor key as Smi. DECL_ACCESSORS(raw_type_feedback_info, Object) inline Object* type_feedback_info(); inline void set_type_feedback_info( Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); inline uint32_t stub_key(); inline void set_stub_key(uint32_t key); // [next_code_link]: Link for lists of optimized or deoptimized code. // Note that storage for this field is overlapped with typefeedback_info. DECL_ACCESSORS(next_code_link, Object) // [gc_metadata]: Field used to hold GC related metadata. The contents of this // field does not have to be traced during garbage collection since // it is only used by the garbage collector itself. DECL_ACCESSORS(gc_metadata, Object) // [ic_age]: Inline caching age: the value of the Heap::global_ic_age // at the moment when this object was created. inline void set_ic_age(int count); inline int ic_age() const; // [prologue_offset]: Offset of the function prologue, used for aging // FUNCTIONs and OPTIMIZED_FUNCTIONs. inline int prologue_offset() const; inline void set_prologue_offset(int offset); // Unchecked accessors to be used during GC. inline ByteArray* unchecked_relocation_info(); inline int relocation_size(); // [flags]: Various code flags. inline Flags flags(); inline void set_flags(Flags flags); // [flags]: Access to specific code flags. inline Kind kind(); inline InlineCacheState ic_state(); // Only valid for IC stubs. inline ExtraICState extra_ic_state(); // Only valid for IC stubs. inline StubType type(); // Only valid for monomorphic IC stubs. // Testers for IC stub kinds. inline bool is_inline_cache_stub(); inline bool is_debug_stub(); inline bool is_handler() { return kind() == HANDLER; } inline bool is_load_stub() { return kind() == LOAD_IC; } inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; } inline bool is_store_stub() { return kind() == STORE_IC; } inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; } inline bool is_call_stub() { return kind() == CALL_IC; } inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; } inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; } inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; } inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; } inline bool is_keyed_stub(); inline bool is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; } inline bool is_weak_stub(); inline void mark_as_weak_stub(); inline bool is_invalidated_weak_stub(); inline void mark_as_invalidated_weak_stub(); inline bool CanBeWeakStub() { Kind k = kind(); return (k == LOAD_IC || k == STORE_IC || k == KEYED_LOAD_IC || k == KEYED_STORE_IC || k == COMPARE_NIL_IC) && ic_state() == MONOMORPHIC; } inline bool IsCodeStubOrIC(); inline void set_raw_kind_specific_flags1(int value); inline void set_raw_kind_specific_flags2(int value); // [is_crankshafted]: For kind STUB or ICs, tells whether or not a code // object was generated by either the hydrogen or the TurboFan optimizing // compiler (but it may not be an optimized function). inline bool is_crankshafted(); inline bool is_hydrogen_stub(); // Crankshafted, but not a function. inline void set_is_crankshafted(bool value); // [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the // code object was generated by the TurboFan optimizing compiler. inline bool is_turbofanned(); inline void set_is_turbofanned(bool value); // [optimizable]: For FUNCTION kind, tells if it is optimizable. inline bool optimizable(); inline void set_optimizable(bool value); // [has_deoptimization_support]: For FUNCTION kind, tells if it has // deoptimization support. inline bool has_deoptimization_support(); inline void set_has_deoptimization_support(bool value); // [has_debug_break_slots]: For FUNCTION kind, tells if it has // been compiled with debug break slots. inline bool has_debug_break_slots(); inline void set_has_debug_break_slots(bool value); // [compiled_with_optimizing]: For FUNCTION kind, tells if it has // been compiled with IsOptimizing set to true. inline bool is_compiled_optimizable(); inline void set_compiled_optimizable(bool value); // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for // how long the function has been marked for OSR and therefore which // level of loop nesting we are willing to do on-stack replacement // for. inline void set_allow_osr_at_loop_nesting_level(int level); inline int allow_osr_at_loop_nesting_level(); // [profiler_ticks]: For FUNCTION kind, tells for how many profiler ticks // the code object was seen on the stack with no IC patching going on. inline int profiler_ticks(); inline void set_profiler_ticks(int ticks); // [builtin_index]: For BUILTIN kind, tells which builtin index it has. inline int builtin_index(); inline void set_builtin_index(int id); // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots // reserved in the code prologue. inline unsigned stack_slots(); inline void set_stack_slots(unsigned slots); // [safepoint_table_start]: For kind OPTIMIZED_FUNCTION, the offset in // the instruction stream where the safepoint table starts. inline unsigned safepoint_table_offset(); inline void set_safepoint_table_offset(unsigned offset); // [back_edge_table_start]: For kind FUNCTION, the offset in the // instruction stream where the back edge table starts. inline unsigned back_edge_table_offset(); inline void set_back_edge_table_offset(unsigned offset); inline bool back_edges_patched_for_osr(); // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in. inline byte to_boolean_state(); // [has_function_cache]: For kind STUB tells whether there is a function // cache is passed to the stub. inline bool has_function_cache(); inline void set_has_function_cache(bool flag); // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether // the code is going to be deoptimized because of dead embedded maps. inline bool marked_for_deoptimization(); inline void set_marked_for_deoptimization(bool flag); // [constant_pool]: The constant pool for this function. inline ConstantPoolArray* constant_pool(); inline void set_constant_pool(Object* constant_pool); // Get the safepoint entry for the given pc. SafepointEntry GetSafepointEntry(Address pc); // Find an object in a stub with a specified map Object* FindNthObject(int n, Map* match_map); // Find the first allocation site in an IC stub. AllocationSite* FindFirstAllocationSite(); // Find the first map in an IC stub. Map* FindFirstMap(); void FindAllMaps(MapHandleList* maps); // Find the first handler in an IC stub. Code* FindFirstHandler(); // Find |length| handlers and put them into |code_list|. Returns false if not // enough handlers can be found. bool FindHandlers(CodeHandleList* code_list, int length = -1); // Find the handler for |map|. MaybeHandle FindHandlerForMap(Map* map); // Find the first name in an IC stub. Name* FindFirstName(); class FindAndReplacePattern; // For each (map-to-find, object-to-replace) pair in the pattern, this // function replaces the corresponding placeholder in the code with the // object-to-replace. The function assumes that pairs in the pattern come in // the same order as the placeholders in the code. void FindAndReplace(const FindAndReplacePattern& pattern); // The entire code object including its header is copied verbatim to the // snapshot so that it can be written in one, fast, memcpy during // deserialization. The deserializer will overwrite some pointers, rather // like a runtime linker, but the random allocation addresses used in the // mksnapshot process would still be present in the unlinked snapshot data, // which would make snapshot production non-reproducible. This method wipes // out the to-be-overwritten header data for reproducible snapshots. inline void WipeOutHeader(); // Flags operations. static inline Flags ComputeFlags( Kind kind, InlineCacheState ic_state = UNINITIALIZED, ExtraICState extra_ic_state = kNoExtraICState, StubType type = NORMAL, CacheHolderFlag holder = kCacheOnReceiver); static inline Flags ComputeMonomorphicFlags( Kind kind, ExtraICState extra_ic_state = kNoExtraICState, CacheHolderFlag holder = kCacheOnReceiver, StubType type = NORMAL); static inline Flags ComputeHandlerFlags( Kind handler_kind, StubType type = NORMAL, CacheHolderFlag holder = kCacheOnReceiver); static inline InlineCacheState ExtractICStateFromFlags(Flags flags); static inline StubType ExtractTypeFromFlags(Flags flags); static inline CacheHolderFlag ExtractCacheHolderFromFlags(Flags flags); static inline Kind ExtractKindFromFlags(Flags flags); static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags); static inline Flags RemoveTypeFromFlags(Flags flags); static inline Flags RemoveTypeAndHolderFromFlags(Flags flags); // Convert a target address into a code object. static inline Code* GetCodeFromTargetAddress(Address address); // Convert an entry address into an object. static inline Object* GetObjectFromEntryAddress(Address location_of_address); // Returns the address of the first instruction. inline byte* instruction_start(); // Returns the address right after the last instruction. inline byte* instruction_end(); // Returns the size of the instructions, padding, and relocation information. inline int body_size(); // Returns the address of the first relocation info (read backwards!). inline byte* relocation_start(); // Code entry point. inline byte* entry(); // Returns true if pc is inside this object's instructions. inline bool contains(byte* pc); // Relocate the code by delta bytes. Called to signal that this code // object has been moved by delta bytes. void Relocate(intptr_t delta); // Migrate code described by desc. void CopyFrom(const CodeDesc& desc); // Returns the object size for a given body (used for allocation). static int SizeFor(int body_size) { DCHECK_SIZE_TAG_ALIGNED(body_size); return RoundUp(kHeaderSize + body_size, kCodeAlignment); } // Calculate the size of the code object to report for log events. This takes // the layout of the code object into account. int ExecutableSize() { // Check that the assumptions about the layout of the code object holds. DCHECK_EQ(static_cast(instruction_start() - address()), Code::kHeaderSize); return instruction_size() + Code::kHeaderSize; } // Locating source position. int SourcePosition(Address pc); int SourceStatementPosition(Address pc); DECLARE_CAST(Code) // Dispatched behavior. int CodeSize() { return SizeFor(body_size()); } inline void CodeIterateBody(ObjectVisitor* v); template inline void CodeIterateBody(Heap* heap); DECLARE_PRINTER(Code) DECLARE_VERIFIER(Code) void ClearInlineCaches(); void ClearInlineCaches(Kind kind); BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset); uint32_t TranslateAstIdToPcOffset(BailoutId ast_id); #define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge, enum Age { kNotExecutedCodeAge = -2, kExecutedOnceCodeAge = -1, kNoAgeCodeAge = 0, CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM) kAfterLastCodeAge, kFirstCodeAge = kNotExecutedCodeAge, kLastCodeAge = kAfterLastCodeAge - 1, kCodeAgeCount = kAfterLastCodeAge - kNotExecutedCodeAge - 1, kIsOldCodeAge = kSexagenarianCodeAge, kPreAgedCodeAge = kIsOldCodeAge - 1 }; #undef DECLARE_CODE_AGE_ENUM // Code aging. Indicates how many full GCs this code has survived without // being entered through the prologue. Used to determine when it is // relatively safe to flush this code object and replace it with the lazy // compilation stub. static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate); static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate); void MakeOlder(MarkingParity); static bool IsYoungSequence(Isolate* isolate, byte* sequence); bool IsOld(); Age GetAge(); // Gets the raw code age, including psuedo code-age values such as // kNotExecutedCodeAge and kExecutedOnceCodeAge. Age GetRawAge(); static inline Code* GetPreAgedCodeAgeStub(Isolate* isolate) { return GetCodeAgeStub(isolate, kNotExecutedCodeAge, NO_MARKING_PARITY); } void PrintDeoptLocation(FILE* out, int bailout_id); bool CanDeoptAt(Address pc); #ifdef VERIFY_HEAP void VerifyEmbeddedObjectsDependency(); #endif inline bool CanContainWeakObjects() { return is_optimized_code() || is_weak_stub(); } inline bool IsWeakObject(Object* object) { return (is_optimized_code() && !is_turbofanned() && IsWeakObjectInOptimizedCode(object)) || (is_weak_stub() && IsWeakObjectInIC(object)); } static inline bool IsWeakObjectInOptimizedCode(Object* object); static inline bool IsWeakObjectInIC(Object* object); // Max loop nesting marker used to postpose OSR. We don't take loop // nesting that is deeper than 5 levels into account. static const int kMaxLoopNestingMarker = 6; // Layout description. static const int kInstructionSizeOffset = HeapObject::kHeaderSize; static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize; static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize; static const int kDeoptimizationDataOffset = kHandlerTableOffset + kPointerSize; // For FUNCTION kind, we store the type feedback info here. static const int kTypeFeedbackInfoOffset = kDeoptimizationDataOffset + kPointerSize; static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize; static const int kGCMetadataOffset = kNextCodeLinkOffset + kPointerSize; static const int kICAgeOffset = kGCMetadataOffset + kPointerSize; static const int kFlagsOffset = kICAgeOffset + kIntSize; static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize; static const int kKindSpecificFlags2Offset = kKindSpecificFlags1Offset + kIntSize; // Note: We might be able to squeeze this into the flags above. static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize; static const int kConstantPoolOffset = kPrologueOffset + kPointerSize; static const int kHeaderPaddingStart = kConstantPoolOffset + kIntSize; // Add padding to align the instruction start following right after // the Code object header. static const int kHeaderSize = (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask; // Byte offsets within kKindSpecificFlags1Offset. static const int kOptimizableOffset = kKindSpecificFlags1Offset; static const int kFullCodeFlags = kOptimizableOffset + 1; class FullCodeFlagsHasDeoptimizationSupportField: public BitField {}; // NOLINT class FullCodeFlagsHasDebugBreakSlotsField: public BitField {}; class FullCodeFlagsIsCompiledOptimizable: public BitField {}; static const int kProfilerTicksOffset = kFullCodeFlags + 1; // Flags layout. BitField. class ICStateField : public BitField {}; class TypeField : public BitField {}; class CacheHolderField : public BitField {}; class KindField : public BitField {}; class ExtraICStateField: public BitField {}; // NOLINT // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION) static const int kStackSlotsFirstBit = 0; static const int kStackSlotsBitCount = 24; static const int kHasFunctionCacheBit = kStackSlotsFirstBit + kStackSlotsBitCount; static const int kMarkedForDeoptimizationBit = kHasFunctionCacheBit + 1; static const int kWeakStubBit = kMarkedForDeoptimizationBit + 1; static const int kInvalidatedWeakStubBit = kWeakStubBit + 1; static const int kIsTurbofannedBit = kInvalidatedWeakStubBit + 1; STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32); STATIC_ASSERT(kIsTurbofannedBit + 1 <= 32); class StackSlotsField: public BitField {}; // NOLINT class HasFunctionCacheField : public BitField { }; // NOLINT class MarkedForDeoptimizationField : public BitField {}; // NOLINT class WeakStubField : public BitField {}; // NOLINT class InvalidatedWeakStubField : public BitField {}; // NOLINT class IsTurbofannedField : public BitField { }; // NOLINT // KindSpecificFlags2 layout (ALL) static const int kIsCrankshaftedBit = 0; class IsCrankshaftedField: public BitField {}; // NOLINT // KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION) static const int kSafepointTableOffsetFirstBit = kIsCrankshaftedBit + 1; static const int kSafepointTableOffsetBitCount = 24; STATIC_ASSERT(kSafepointTableOffsetFirstBit + kSafepointTableOffsetBitCount <= 32); STATIC_ASSERT(1 + kSafepointTableOffsetBitCount <= 32); class SafepointTableOffsetField: public BitField {}; // NOLINT // KindSpecificFlags2 layout (FUNCTION) class BackEdgeTableOffsetField: public BitField {}; // NOLINT class AllowOSRAtLoopNestingLevelField: public BitField {}; // NOLINT STATIC_ASSERT(AllowOSRAtLoopNestingLevelField::kMax >= kMaxLoopNestingMarker); static const int kArgumentsBits = 16; static const int kMaxArguments = (1 << kArgumentsBits) - 1; // This constant should be encodable in an ARM instruction. static const int kFlagsNotUsedInLookup = TypeField::kMask | CacheHolderField::kMask; private: friend class RelocIterator; friend class Deoptimizer; // For FindCodeAgeSequence. void ClearInlineCaches(Kind* kind); // Code aging byte* FindCodeAgeSequence(); static void GetCodeAgeAndParity(Code* code, Age* age, MarkingParity* parity); static void GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, MarkingParity* parity); static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity); // Code aging -- platform-specific static void PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Age age, MarkingParity parity); DISALLOW_IMPLICIT_CONSTRUCTORS(Code); }; class CompilationInfo; // This class describes the layout of dependent codes array of a map. The // array is partitioned into several groups of dependent codes. Each group // contains codes with the same dependency on the map. The array has the // following layout for n dependency groups: // // +----+----+-----+----+---------+----------+-----+---------+-----------+ // | C1 | C2 | ... | Cn | group 1 | group 2 | ... | group n | undefined | // +----+----+-----+----+---------+----------+-----+---------+-----------+ // // The first n elements are Smis, each of them specifies the number of codes // in the corresponding group. The subsequent elements contain grouped code // objects. The suffix of the array can be filled with the undefined value if // the number of codes is less than the length of the array. The order of the // code objects within a group is not preserved. // // All code indexes used in the class are counted starting from the first // code object of the first group. In other words, code index 0 corresponds // to array index n = kCodesStartIndex. class DependentCode: public FixedArray { public: enum DependencyGroup { // Group of IC stubs that weakly embed this map and depend on being // invalidated when the map is garbage collected. Dependent IC stubs form // a linked list. This group stores only the head of the list. This means // that the number_of_entries(kWeakICGroup) is 0 or 1. kWeakICGroup, // Group of code that weakly embed this map and depend on being // deoptimized when the map is garbage collected. kWeakCodeGroup, // Group of code that embed a transition to this map, and depend on being // deoptimized when the transition is replaced by a new version. kTransitionGroup, // Group of code that omit run-time prototype checks for prototypes // described by this map. The group is deoptimized whenever an object // described by this map changes shape (and transitions to a new map), // possibly invalidating the assumptions embedded in the code. kPrototypeCheckGroup, // Group of code that depends on elements not being added to objects with // this map. kElementsCantBeAddedGroup, // Group of code that depends on global property values in property cells // not being changed. kPropertyCellChangedGroup, // Group of code that omit run-time type checks for the field(s) introduced // by this map. kFieldTypeGroup, // Group of code that omit run-time type checks for initial maps of // constructors. kInitialMapChangedGroup, // Group of code that depends on tenuring information in AllocationSites // not being changed. kAllocationSiteTenuringChangedGroup, // Group of code that depends on element transition information in // AllocationSites not being changed. kAllocationSiteTransitionChangedGroup }; static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1; // Array for holding the index of the first code object of each group. // The last element stores the total number of code objects. class GroupStartIndexes { public: explicit GroupStartIndexes(DependentCode* entries); void Recompute(DependentCode* entries); int at(int i) { return start_indexes_[i]; } int number_of_entries() { return start_indexes_[kGroupCount]; } private: int start_indexes_[kGroupCount + 1]; }; bool Contains(DependencyGroup group, Code* code); static Handle Insert(Handle entries, DependencyGroup group, Handle object); void UpdateToFinishedCode(DependencyGroup group, CompilationInfo* info, Code* code); void RemoveCompilationInfo(DependentCode::DependencyGroup group, CompilationInfo* info); void DeoptimizeDependentCodeGroup(Isolate* isolate, DependentCode::DependencyGroup group); bool MarkCodeForDeoptimization(Isolate* isolate, DependentCode::DependencyGroup group); void AddToDependentICList(Handle stub); // The following low-level accessors should only be used by this class // and the mark compact collector. inline int number_of_entries(DependencyGroup group); inline void set_number_of_entries(DependencyGroup group, int value); inline bool is_code_at(int i); inline Code* code_at(int i); inline CompilationInfo* compilation_info_at(int i); inline void set_object_at(int i, Object* object); inline Object** slot_at(int i); inline Object* object_at(int i); inline void clear_at(int i); inline void copy(int from, int to); DECLARE_CAST(DependentCode) static DependentCode* ForObject(Handle object, DependencyGroup group); static const char* DependencyGroupName(DependencyGroup group); static void SetMarkedForDeoptimization(Code* code, DependencyGroup group); private: // Make a room at the end of the given group by moving out the first // code objects of the subsequent groups. inline void ExtendGroup(DependencyGroup group); static const int kCodesStartIndex = kGroupCount; }; // All heap objects have a Map that describes their structure. // A Map contains information about: // - Size information about the object // - How to iterate over an object (for garbage collection) class Map: public HeapObject { public: // Instance size. // Size in bytes or kVariableSizeSentinel if instances do not have // a fixed size. inline int instance_size(); inline void set_instance_size(int value); // Count of properties allocated in the object. inline int inobject_properties(); inline void set_inobject_properties(int value); // Count of property fields pre-allocated in the object when first allocated. inline int pre_allocated_property_fields(); inline void set_pre_allocated_property_fields(int value); // Instance type. inline InstanceType instance_type(); inline void set_instance_type(InstanceType value); // Tells how many unused property fields are available in the // instance (only used for JSObject in fast mode). inline int unused_property_fields(); inline void set_unused_property_fields(int value); // Bit field. inline byte bit_field(); inline void set_bit_field(byte value); // Bit field 2. inline byte bit_field2(); inline void set_bit_field2(byte value); // Bit field 3. inline uint32_t bit_field3(); inline void set_bit_field3(uint32_t bits); class EnumLengthBits: public BitField {}; // NOLINT class NumberOfOwnDescriptorsBits: public BitField {}; // NOLINT STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20); class DictionaryMap : public BitField {}; class OwnsDescriptors : public BitField {}; class HasInstanceCallHandler : public BitField {}; class Deprecated : public BitField {}; class IsFrozen : public BitField {}; class IsUnstable : public BitField {}; class IsMigrationTarget : public BitField {}; class DoneInobjectSlackTracking : public BitField {}; // Bit 28 is free. // Keep this bit field at the very end for better code in // Builtins::kJSConstructStubGeneric stub. class ConstructionCount: public BitField {}; // Tells whether the object in the prototype property will be used // for instances created from this function. If the prototype // property is set to a value that is not a JSObject, the prototype // property will not be used to create instances of the function. // See ECMA-262, 13.2.2. inline void set_non_instance_prototype(bool value); inline bool has_non_instance_prototype(); // Tells whether function has special prototype property. If not, prototype // property will not be created when accessed (will return undefined), // and construction from this function will not be allowed. inline void set_function_with_prototype(bool value); inline bool function_with_prototype(); // Tells whether the instance with this map should be ignored by the // Object.getPrototypeOf() function and the __proto__ accessor. inline void set_is_hidden_prototype() { set_bit_field(bit_field() | (1 << kIsHiddenPrototype)); } inline bool is_hidden_prototype() { return ((1 << kIsHiddenPrototype) & bit_field()) != 0; } // Records and queries whether the instance has a named interceptor. inline void set_has_named_interceptor() { set_bit_field(bit_field() | (1 << kHasNamedInterceptor)); } inline bool has_named_interceptor() { return ((1 << kHasNamedInterceptor) & bit_field()) != 0; } // Records and queries whether the instance has an indexed interceptor. inline void set_has_indexed_interceptor() { set_bit_field(bit_field() | (1 << kHasIndexedInterceptor)); } inline bool has_indexed_interceptor() { return ((1 << kHasIndexedInterceptor) & bit_field()) != 0; } // Tells whether the instance is undetectable. // An undetectable object is a special class of JSObject: 'typeof' operator // returns undefined, ToBoolean returns false. Otherwise it behaves like // a normal JS object. It is useful for implementing undetectable // document.all in Firefox & Safari. // See https://bugzilla.mozilla.org/show_bug.cgi?id=248549. inline void set_is_undetectable() { set_bit_field(bit_field() | (1 << kIsUndetectable)); } inline bool is_undetectable() { return ((1 << kIsUndetectable) & bit_field()) != 0; } // Tells whether the instance has a call-as-function handler. inline void set_is_observed() { set_bit_field(bit_field() | (1 << kIsObserved)); } inline bool is_observed() { return ((1 << kIsObserved) & bit_field()) != 0; } inline void set_is_extensible(bool value); inline bool is_extensible(); inline void set_is_prototype_map(bool value); inline bool is_prototype_map(); inline void set_elements_kind(ElementsKind elements_kind) { DCHECK(elements_kind < kElementsKindCount); DCHECK(kElementsKindCount <= (1 << Map::ElementsKindBits::kSize)); set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind)); DCHECK(this->elements_kind() == elements_kind); } inline ElementsKind elements_kind() { return Map::ElementsKindBits::decode(bit_field2()); } // Tells whether the instance has fast elements that are only Smis. inline bool has_fast_smi_elements() { return IsFastSmiElementsKind(elements_kind()); } // Tells whether the instance has fast elements. inline bool has_fast_object_elements() { return IsFastObjectElementsKind(elements_kind()); } inline bool has_fast_smi_or_object_elements() { return IsFastSmiOrObjectElementsKind(elements_kind()); } inline bool has_fast_double_elements() { return IsFastDoubleElementsKind(elements_kind()); } inline bool has_fast_elements() { return IsFastElementsKind(elements_kind()); } inline bool has_sloppy_arguments_elements() { return elements_kind() == SLOPPY_ARGUMENTS_ELEMENTS; } inline bool has_external_array_elements() { return IsExternalArrayElementsKind(elements_kind()); } inline bool has_fixed_typed_array_elements() { return IsFixedTypedArrayElementsKind(elements_kind()); } inline bool has_dictionary_elements() { return IsDictionaryElementsKind(elements_kind()); } inline bool has_slow_elements_kind() { return elements_kind() == DICTIONARY_ELEMENTS || elements_kind() == SLOPPY_ARGUMENTS_ELEMENTS; } static bool IsValidElementsTransition(ElementsKind from_kind, ElementsKind to_kind); // Returns true if the current map doesn't have DICTIONARY_ELEMENTS but if a // map with DICTIONARY_ELEMENTS was found in the prototype chain. bool DictionaryElementsInPrototypeChainOnly(); inline bool HasTransitionArray() const; inline bool HasElementsTransition(); inline Map* elements_transition_map(); inline Map* GetTransition(int transition_index); inline int SearchTransition(Name* name); inline FixedArrayBase* GetInitialElements(); DECL_ACCESSORS(transitions, TransitionArray) static inline Handle ExpectedTransitionKey(Handle map); static inline Handle ExpectedTransitionTarget(Handle map); // Try to follow an existing transition to a field with attributes NONE. The // return value indicates whether the transition was successful. static inline Handle FindTransitionToField(Handle map, Handle key); Map* FindRootMap(); Map* FindFieldOwner(int descriptor); inline int GetInObjectPropertyOffset(int index); int NumberOfFields(); // TODO(ishell): candidate with JSObject::MigrateToMap(). bool InstancesNeedRewriting(Map* target, int target_number_of_fields, int target_inobject, int target_unused, int* old_number_of_fields); // TODO(ishell): moveit! static Handle GeneralizeAllFieldRepresentations(Handle map); MUST_USE_RESULT static Handle GeneralizeFieldType( Handle type1, Handle type2, Isolate* isolate); static void GeneralizeFieldType(Handle map, int modify_index, Handle new_field_type); static Handle GeneralizeRepresentation( Handle map, int modify_index, Representation new_representation, Handle new_field_type, StoreMode store_mode); static Handle CopyGeneralizeAllRepresentations( Handle map, int modify_index, StoreMode store_mode, PropertyAttributes attributes, const char* reason); static Handle CopyGeneralizeAllRepresentations( Handle map, int modify_index, StoreMode store_mode, const char* reason); static Handle PrepareForDataProperty(Handle old_map, int descriptor_number, Handle value); static Handle Normalize(Handle map, PropertyNormalizationMode mode); // Returns the constructor name (the name (possibly, inferred name) of the // function that was used to instantiate the object). String* constructor_name(); // Tells whether the map is used for JSObjects in dictionary mode (ie // normalized objects, ie objects for which HasFastProperties returns false). // A map can never be used for both dictionary mode and fast mode JSObjects. // False by default and for HeapObjects that are not JSObjects. inline void set_dictionary_map(bool value); inline bool is_dictionary_map(); // Tells whether the instance needs security checks when accessing its // properties. inline void set_is_access_check_needed(bool access_check_needed); inline bool is_access_check_needed(); // Returns true if map has a non-empty stub code cache. inline bool has_code_cache(); // [prototype]: implicit prototype object. DECL_ACCESSORS(prototype, Object) // [constructor]: points back to the function responsible for this map. DECL_ACCESSORS(constructor, Object) // [instance descriptors]: describes the object. DECL_ACCESSORS(instance_descriptors, DescriptorArray) inline void InitializeDescriptors(DescriptorArray* descriptors); // [stub cache]: contains stubs compiled for this map. DECL_ACCESSORS(code_cache, Object) // [dependent code]: list of optimized codes that weakly embed this map. DECL_ACCESSORS(dependent_code, DependentCode) // [back pointer]: points back to the parent map from which a transition // leads to this map. The field overlaps with prototype transitions and the // back pointer will be moved into the prototype transitions array if // required. inline Object* GetBackPointer(); inline void SetBackPointer(Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); inline void init_back_pointer(Object* undefined); // [prototype transitions]: cache of prototype transitions. // Prototype transition is a transition that happens // when we change object's prototype to a new one. // Cache format: // 0: finger - index of the first free cell in the cache // 1: back pointer that overlaps with prototype transitions field. // 2 + 2 * i: prototype // 3 + 2 * i: target map inline FixedArray* GetPrototypeTransitions(); inline bool HasPrototypeTransitions(); static const int kProtoTransitionHeaderSize = 1; static const int kProtoTransitionNumberOfEntriesOffset = 0; static const int kProtoTransitionElementsPerEntry = 2; static const int kProtoTransitionPrototypeOffset = 0; static const int kProtoTransitionMapOffset = 1; inline int NumberOfProtoTransitions() { FixedArray* cache = GetPrototypeTransitions(); if (cache->length() == 0) return 0; return Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value(); } inline void SetNumberOfProtoTransitions(int value) { FixedArray* cache = GetPrototypeTransitions(); DCHECK(cache->length() != 0); cache->set(kProtoTransitionNumberOfEntriesOffset, Smi::FromInt(value)); } // Lookup in the map's instance descriptors and fill out the result // with the given holder if the name is found. The holder may be // NULL when this function is used from the compiler. inline void LookupDescriptor(JSObject* holder, Name* name, LookupResult* result); inline void LookupTransition(JSObject* holder, Name* name, LookupResult* result); inline PropertyDetails GetLastDescriptorDetails(); // The size of transition arrays are limited so they do not end up in large // object space. Otherwise ClearNonLiveTransitions would leak memory while // applying in-place right trimming. inline bool CanHaveMoreTransitions(); int LastAdded() { int number_of_own_descriptors = NumberOfOwnDescriptors(); DCHECK(number_of_own_descriptors > 0); return number_of_own_descriptors - 1; } int NumberOfOwnDescriptors() { return NumberOfOwnDescriptorsBits::decode(bit_field3()); } void SetNumberOfOwnDescriptors(int number) { DCHECK(number <= instance_descriptors()->number_of_descriptors()); set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number)); } inline Cell* RetrieveDescriptorsPointer(); int EnumLength() { return EnumLengthBits::decode(bit_field3()); } void SetEnumLength(int length) { if (length != kInvalidEnumCacheSentinel) { DCHECK(length >= 0); DCHECK(length == 0 || instance_descriptors()->HasEnumCache()); DCHECK(length <= NumberOfOwnDescriptors()); } set_bit_field3(EnumLengthBits::update(bit_field3(), length)); } inline bool owns_descriptors(); inline void set_owns_descriptors(bool owns_descriptors); inline bool has_instance_call_handler(); inline void set_has_instance_call_handler(); inline void freeze(); inline bool is_frozen(); inline void mark_unstable(); inline bool is_stable(); inline void set_migration_target(bool value); inline bool is_migration_target(); inline void set_done_inobject_slack_tracking(bool value); inline bool done_inobject_slack_tracking(); inline void set_construction_count(int value); inline int construction_count(); inline void deprecate(); inline bool is_deprecated(); inline bool CanBeDeprecated(); // Returns a non-deprecated version of the input. If the input was not // deprecated, it is directly returned. Otherwise, the non-deprecated version // is found by re-transitioning from the root of the transition tree using the // descriptor array of the map. Returns NULL if no updated map is found. // This method also applies any pending migrations along the prototype chain. static MaybeHandle TryUpdate(Handle map) WARN_UNUSED_RESULT; // Same as above, but does not touch the prototype chain. static MaybeHandle TryUpdateInternal(Handle map) WARN_UNUSED_RESULT; // Returns a non-deprecated version of the input. This method may deprecate // existing maps along the way if encodings conflict. Not for use while // gathering type feedback. Use TryUpdate in those cases instead. static Handle Update(Handle map); static Handle CopyDropDescriptors(Handle map); static Handle CopyInsertDescriptor(Handle map, Descriptor* descriptor, TransitionFlag flag); MUST_USE_RESULT static MaybeHandle CopyWithField( Handle map, Handle name, Handle type, PropertyAttributes attributes, Representation representation, TransitionFlag flag); MUST_USE_RESULT static MaybeHandle CopyWithConstant( Handle map, Handle name, Handle constant, PropertyAttributes attributes, TransitionFlag flag); // Returns a new map with all transitions dropped from the given map and // the ElementsKind set. static Handle TransitionElementsTo(Handle map, ElementsKind to_kind); static Handle AsElementsKind(Handle map, ElementsKind kind); static Handle CopyAsElementsKind(Handle map, ElementsKind kind, TransitionFlag flag); static Handle CopyForObserved(Handle map); static Handle CopyForFreeze(Handle map); // Maximal number of fast properties. Used to restrict the number of map // transitions to avoid an explosion in the number of maps for objects used as // dictionaries. inline bool TooManyFastProperties(StoreFromKeyed store_mode); static Handle TransitionToDataProperty(Handle map, Handle name, Handle value, PropertyAttributes attributes, StoreFromKeyed store_mode); static Handle TransitionToAccessorProperty( Handle map, Handle name, AccessorComponent component, Handle accessor, PropertyAttributes attributes); static Handle ReconfigureDataProperty(Handle map, int descriptor, PropertyAttributes attributes); inline void AppendDescriptor(Descriptor* desc); // Returns a copy of the map, with all transitions dropped from the // instance descriptors. static Handle Copy(Handle map); static Handle Create(Isolate* isolate, int inobject_properties); // Returns the next free property index (only valid for FAST MODE). int NextFreePropertyIndex(); // Returns the number of properties described in instance_descriptors // filtering out properties with the specified attributes. int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS, PropertyAttributes filter = NONE); // Returns the number of slots allocated for the initial properties // backing storage for instances of this map. int InitialPropertiesLength() { return pre_allocated_property_fields() + unused_property_fields() - inobject_properties(); } DECLARE_CAST(Map) // Code cache operations. // Clears the code cache. inline void ClearCodeCache(Heap* heap); // Update code cache. static void UpdateCodeCache(Handle map, Handle name, Handle code); // Extend the descriptor array of the map with the list of descriptors. // In case of duplicates, the latest descriptor is used. static void AppendCallbackDescriptors(Handle map, Handle descriptors); static void EnsureDescriptorSlack(Handle map, int slack); // Returns the found code or undefined if absent. Object* FindInCodeCache(Name* name, Code::Flags flags); // Returns the non-negative index of the code object if it is in the // cache and -1 otherwise. int IndexInCodeCache(Object* name, Code* code); // Removes a code object from the code cache at the given index. void RemoveFromCodeCache(Name* name, Code* code, int index); // Set all map transitions from this map to dead maps to null. Also clear // back pointers in transition targets so that we do not process this map // again while following back pointers. void ClearNonLiveTransitions(Heap* heap); // Computes a hash value for this map, to be used in HashTables and such. int Hash(); // Returns the map that this map transitions to if its elements_kind // is changed to |elements_kind|, or NULL if no such map is cached yet. // |safe_to_add_transitions| is set to false if adding transitions is not // allowed. Map* LookupElementsTransitionMap(ElementsKind elements_kind); // Returns the transitioned map for this map with the most generic // elements_kind that's found in |candidates|, or null handle if no match is // found at all. Handle FindTransitionedMap(MapHandleList* candidates); bool CanTransition() { // Only JSObject and subtypes have map transitions and back pointers. STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE); return instance_type() >= FIRST_JS_OBJECT_TYPE; } bool IsJSObjectMap() { return instance_type() >= FIRST_JS_OBJECT_TYPE; } bool IsJSProxyMap() { InstanceType type = instance_type(); return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE; } bool IsJSGlobalProxyMap() { return instance_type() == JS_GLOBAL_PROXY_TYPE; } bool IsJSGlobalObjectMap() { return instance_type() == JS_GLOBAL_OBJECT_TYPE; } bool IsGlobalObjectMap() { const InstanceType type = instance_type(); return type == JS_GLOBAL_OBJECT_TYPE || type == JS_BUILTINS_OBJECT_TYPE; } inline bool CanOmitMapChecks(); static void AddDependentCompilationInfo(Handle map, DependentCode::DependencyGroup group, CompilationInfo* info); static void AddDependentCode(Handle map, DependentCode::DependencyGroup group, Handle code); static void AddDependentIC(Handle map, Handle stub); bool IsMapInArrayPrototypeChain(); // Dispatched behavior. DECLARE_PRINTER(Map) DECLARE_VERIFIER(Map) #ifdef VERIFY_HEAP void DictionaryMapVerify(); void VerifyOmittedMapChecks(); #endif inline int visitor_id(); inline void set_visitor_id(int visitor_id); typedef void (*TraverseCallback)(Map* map, void* data); void TraverseTransitionTree(TraverseCallback callback, void* data); // When you set the prototype of an object using the __proto__ accessor you // need a new map for the object (the prototype is stored in the map). In // order not to multiply maps unnecessarily we store these as transitions in // the original map. That way we can transition to the same map if the same // prototype is set, rather than creating a new map every time. The // transitions are in the form of a map where the keys are prototype objects // and the values are the maps the are transitioned to. static const int kMaxCachedPrototypeTransitions = 256; static Handle TransitionToPrototype(Handle map, Handle prototype); static const int kMaxPreAllocatedPropertyFields = 255; // Layout description. static const int kInstanceSizesOffset = HeapObject::kHeaderSize; static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize; static const int kBitField3Offset = kInstanceAttributesOffset + kIntSize; static const int kPrototypeOffset = kBitField3Offset + kPointerSize; static const int kConstructorOffset = kPrototypeOffset + kPointerSize; // Storage for the transition array is overloaded to directly contain a back // pointer if unused. When the map has transitions, the back pointer is // transferred to the transition array and accessed through an extra // indirection. static const int kTransitionsOrBackPointerOffset = kConstructorOffset + kPointerSize; static const int kDescriptorsOffset = kTransitionsOrBackPointerOffset + kPointerSize; static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize; static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize; static const int kSize = kDependentCodeOffset + kPointerSize; // Layout of pointer fields. Heap iteration code relies on them // being continuously allocated. static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset; static const int kPointerFieldsEndOffset = kSize; // Byte offsets within kInstanceSizesOffset. static const int kInstanceSizeOffset = kInstanceSizesOffset + 0; static const int kInObjectPropertiesByte = 1; static const int kInObjectPropertiesOffset = kInstanceSizesOffset + kInObjectPropertiesByte; static const int kPreAllocatedPropertyFieldsByte = 2; static const int kPreAllocatedPropertyFieldsOffset = kInstanceSizesOffset + kPreAllocatedPropertyFieldsByte; static const int kVisitorIdByte = 3; static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte; // Byte offsets within kInstanceAttributesOffset attributes. #if V8_TARGET_LITTLE_ENDIAN // Order instance type and bit field together such that they can be loaded // together as a 16-bit word with instance type in the lower 8 bits regardless // of endianess. Also provide endian-independent offset to that 16-bit word. static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0; static const int kBitFieldOffset = kInstanceAttributesOffset + 1; #else static const int kBitFieldOffset = kInstanceAttributesOffset + 0; static const int kInstanceTypeOffset = kInstanceAttributesOffset + 1; #endif static const int kInstanceTypeAndBitFieldOffset = kInstanceAttributesOffset + 0; static const int kBitField2Offset = kInstanceAttributesOffset + 2; static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 3; STATIC_ASSERT(kInstanceTypeAndBitFieldOffset == Internals::kMapInstanceTypeAndBitFieldOffset); // Bit positions for bit field. static const int kHasNonInstancePrototype = 0; static const int kIsHiddenPrototype = 1; static const int kHasNamedInterceptor = 2; static const int kHasIndexedInterceptor = 3; static const int kIsUndetectable = 4; static const int kIsObserved = 5; static const int kIsAccessCheckNeeded = 6; class FunctionWithPrototype: public BitField {}; // Bit positions for bit field 2 static const int kIsExtensible = 0; static const int kStringWrapperSafeForDefaultValueOf = 1; class IsPrototypeMapBits : public BitField {}; class ElementsKindBits: public BitField {}; // Derived values from bit field 2 static const int8_t kMaximumBitField2FastElementValue = static_cast( (FAST_ELEMENTS + 1) << Map::ElementsKindBits::kShift) - 1; static const int8_t kMaximumBitField2FastSmiElementValue = static_cast((FAST_SMI_ELEMENTS + 1) << Map::ElementsKindBits::kShift) - 1; static const int8_t kMaximumBitField2FastHoleyElementValue = static_cast((FAST_HOLEY_ELEMENTS + 1) << Map::ElementsKindBits::kShift) - 1; static const int8_t kMaximumBitField2FastHoleySmiElementValue = static_cast((FAST_HOLEY_SMI_ELEMENTS + 1) << Map::ElementsKindBits::kShift) - 1; typedef FixedBodyDescriptor BodyDescriptor; // Compares this map to another to see if they describe equivalent objects. // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if // it had exactly zero inobject properties. // The "shared" flags of both this map and |other| are ignored. bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode); private: static void ConnectElementsTransition(Handle parent, Handle child); static void ConnectTransition(Handle parent, Handle child, Handle name, SimpleTransitionFlag flag); bool EquivalentToForTransition(Map* other); static Handle RawCopy(Handle map, int instance_size); static Handle ShareDescriptor(Handle map, Handle descriptors, Descriptor* descriptor); static Handle CopyInstallDescriptors( Handle map, int new_descriptor, Handle descriptors); static Handle CopyAddDescriptor(Handle map, Descriptor* descriptor, TransitionFlag flag); static Handle CopyReplaceDescriptors( Handle map, Handle descriptors, TransitionFlag flag, MaybeHandle maybe_name, SimpleTransitionFlag simple_flag = FULL_TRANSITION); static Handle CopyReplaceDescriptor(Handle map, Handle descriptors, Descriptor* descriptor, int index, TransitionFlag flag); static Handle CopyNormalized(Handle map, PropertyNormalizationMode mode); // Fires when the layout of an object with a leaf map changes. // This includes adding transitions to the leaf map or changing // the descriptor array. inline void NotifyLeafMapLayoutChange(); static Handle TransitionElementsToSlow(Handle object, ElementsKind to_kind); // Zaps the contents of backing data structures. Note that the // heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects // holding weak references when incremental marking is used, because it also // iterates over objects that are otherwise unreachable. // In general we only want to call these functions in release mode when // heap verification is turned on. void ZapPrototypeTransitions(); void ZapTransitions(); void DeprecateTransitionTree(); void DeprecateTarget(Name* key, DescriptorArray* new_descriptors); Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors); void UpdateFieldType(int descriptor_number, Handle name, Handle new_type); void PrintGeneralization(FILE* file, const char* reason, int modify_index, int split, int descriptors, bool constant_to_field, Representation old_representation, Representation new_representation, HeapType* old_field_type, HeapType* new_field_type); static inline void SetPrototypeTransitions( Handle map, Handle prototype_transitions); static Handle GetPrototypeTransition(Handle map, Handle prototype); static Handle PutPrototypeTransition(Handle map, Handle prototype, Handle target_map); static const int kFastPropertiesSoftLimit = 12; static const int kMaxFastProperties = 128; DISALLOW_IMPLICIT_CONSTRUCTORS(Map); }; // An abstract superclass, a marker class really, for simple structure classes. // It doesn't carry much functionality but allows struct classes to be // identified in the type system. class Struct: public HeapObject { public: inline void InitializeBody(int object_size); DECLARE_CAST(Struct) }; // A simple one-element struct, useful where smis need to be boxed. class Box : public Struct { public: // [value]: the boxed contents. DECL_ACCESSORS(value, Object) DECLARE_CAST(Box) // Dispatched behavior. DECLARE_PRINTER(Box) DECLARE_VERIFIER(Box) static const int kValueOffset = HeapObject::kHeaderSize; static const int kSize = kValueOffset + kPointerSize; private: DISALLOW_IMPLICIT_CONSTRUCTORS(Box); }; // Script describes a script which has been added to the VM. class Script: public Struct { public: // Script types. enum Type { TYPE_NATIVE = 0, TYPE_EXTENSION = 1, TYPE_NORMAL = 2 }; // Script compilation types. enum CompilationType { COMPILATION_TYPE_HOST = 0, COMPILATION_TYPE_EVAL = 1 }; // Script compilation state. enum CompilationState { COMPILATION_STATE_INITIAL = 0, COMPILATION_STATE_COMPILED = 1 }; // [source]: the script source. DECL_ACCESSORS(source, Object) // [name]: the script name. DECL_ACCESSORS(name, Object) // [id]: the script id. DECL_ACCESSORS(id, Smi) // [line_offset]: script line offset in resource from where it was extracted. DECL_ACCESSORS(line_offset, Smi) // [column_offset]: script column offset in resource from where it was // extracted. DECL_ACCESSORS(column_offset, Smi) // [context_data]: context data for the context this script was compiled in. DECL_ACCESSORS(context_data, Object) // [wrapper]: the wrapper cache. DECL_ACCESSORS(wrapper, Foreign) // [type]: the script type. DECL_ACCESSORS(type, Smi) // [line_ends]: FixedArray of line ends positions. DECL_ACCESSORS(line_ends, Object) // [eval_from_shared]: for eval scripts the shared funcion info for the // function from which eval was called. DECL_ACCESSORS(eval_from_shared, Object) // [eval_from_instructions_offset]: the instruction offset in the code for the // function from which eval was called where eval was called. DECL_ACCESSORS(eval_from_instructions_offset, Smi) // [flags]: Holds an exciting bitfield. DECL_ACCESSORS(flags, Smi) // [source_url]: sourceURL from magic comment DECL_ACCESSORS(source_url, Object) // [source_url]: sourceMappingURL magic comment DECL_ACCESSORS(source_mapping_url, Object) // [compilation_type]: how the the script was compiled. Encoded in the // 'flags' field. inline CompilationType compilation_type(); inline void set_compilation_type(CompilationType type); // [compilation_state]: determines whether the script has already been // compiled. Encoded in the 'flags' field. inline CompilationState compilation_state(); inline void set_compilation_state(CompilationState state); // [is_shared_cross_origin]: An opaque boolean set by the embedder via // ScriptOrigin, and used by the embedder to make decisions about the // script's level of privilege. V8 just passes this through. Encoded in // the 'flags' field. DECL_BOOLEAN_ACCESSORS(is_shared_cross_origin) DECLARE_CAST(Script) // If script source is an external string, check that the underlying // resource is accessible. Otherwise, always return true. inline bool HasValidSource(); // Convert code position into column number. static int GetColumnNumber(Handle