Chromium Code Reviews| Index: src/heap/heap.h |
| diff --git a/src/heap/heap.h b/src/heap/heap.h |
| index 15b047c62e553b83b7bed625b427724b4d383249..d8a00c99c8da0f2e498e620d0df54b3322628ad9 100644 |
| --- a/src/heap/heap.h |
| +++ b/src/heap/heap.h |
| @@ -577,6 +577,241 @@ enum ArrayStorageAllocationMode { |
| class Heap { |
| public: |
| + // Declare all the root indices. This defines the root list order. |
| + enum RootListIndex { |
| +#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |
| + STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) |
| +#undef ROOT_INDEX_DECLARATION |
| + |
| +#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, |
| + INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) |
| +#undef STRING_DECLARATION |
| + |
| +#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex, |
| + PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| +#undef SYMBOL_INDEX_DECLARATION |
| + |
| +#define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex, |
| + PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| +#undef SYMBOL_INDEX_DECLARATION |
| + |
| +// Utility type maps |
| +#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, |
| + STRUCT_LIST(DECLARE_STRUCT_MAP) |
| +#undef DECLARE_STRUCT_MAP |
| + kStringTableRootIndex, |
| + |
| +#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |
| + SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) |
| +#undef ROOT_INDEX_DECLARATION |
| + kRootListLength, |
| + kStrongRootListLength = kStringTableRootIndex, |
| + kSmiRootsStart = kStringTableRootIndex + 1 |
| + }; |
| + |
| + // Indicates whether live bytes adjustment is triggered |
| + // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), |
| + // - or from within GC (CONCURRENT_TO_SWEEPER), |
| + // - or mutator code (CONCURRENT_TO_SWEEPER). |
| + enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; |
| + |
| + enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; |
| + |
| + enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |
| + |
| + // ObjectStats are kept in two arrays, counts and sizes. Related stats are |
| + // stored in a contiguous linear buffer. Stats groups are stored one after |
| + // another. |
| + enum { |
| + FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, |
| + FIRST_FIXED_ARRAY_SUB_TYPE = |
| + FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, |
| + FIRST_CODE_AGE_SUB_TYPE = |
| + FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, |
| + OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 |
| + }; |
| + |
| + // Taking this lock prevents the GC from entering a phase that relocates |
| + // object references. |
| + class RelocationLock { |
| + public: |
| + explicit RelocationLock(Heap* heap) : heap_(heap) { |
| + heap_->relocation_mutex_.Lock(); |
| + } |
| + |
| + ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } |
| + |
| + private: |
| + Heap* heap_; |
| + }; |
| + |
| + // An optional version of the above lock that can be used for some critical |
| + // sections on the mutator thread; only safe since the GC currently does not |
| + // do concurrent compaction. |
| + class OptionalRelocationLock { |
| + public: |
| + OptionalRelocationLock(Heap* heap, bool concurrent) |
| + : heap_(heap), concurrent_(concurrent) { |
| + if (concurrent_) heap_->relocation_mutex_.Lock(); |
| + } |
| + |
| + ~OptionalRelocationLock() { |
| + if (concurrent_) heap_->relocation_mutex_.Unlock(); |
| + } |
| + |
| + private: |
| + Heap* heap_; |
| + bool concurrent_; |
| + }; |
| + |
| + // Support for partial snapshots. After calling this we have a linear |
| + // space to write objects in each space. |
| + struct Chunk { |
| + uint32_t size; |
| + Address start; |
| + Address end; |
| + }; |
| + typedef List<Chunk> Reservation; |
| + |
| + static const intptr_t kMinimumOldGenerationAllocationLimit = |
| + 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); |
| + |
| + static const int kInitalOldGenerationLimitFactor = 2; |
| + |
| +#if V8_OS_ANDROID |
| + // Don't apply pointer multiplier on Android since it has no swap space and |
| + // should instead adapt it's heap size based on available physical memory. |
| + static const int kPointerMultiplier = 1; |
| +#else |
| + static const int kPointerMultiplier = i::kPointerSize / 4; |
| +#endif |
| + |
| + // The new space size has to be a power of 2. Sizes are in MB. |
| + static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; |
| + static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; |
| + static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; |
| + static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; |
| + |
| + // The old space size has to be a multiple of Page::kPageSize. |
| + // Sizes are in MB. |
| + static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; |
| + static const int kMaxOldSpaceSizeMediumMemoryDevice = |
| + 256 * kPointerMultiplier; |
| + static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; |
| + static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; |
| + |
| + // The executable size has to be a multiple of Page::kPageSize. |
| + // Sizes are in MB. |
| + static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; |
| + static const int kMaxExecutableSizeMediumMemoryDevice = |
| + 192 * kPointerMultiplier; |
| + static const int kMaxExecutableSizeHighMemoryDevice = |
| + 256 * kPointerMultiplier; |
| + static const int kMaxExecutableSizeHugeMemoryDevice = |
| + 256 * kPointerMultiplier; |
| + |
| + static const int kTraceRingBufferSize = 512; |
| + static const int kStacktraceBufferSize = 512; |
| + |
| + static const double kMinHeapGrowingFactor = 1.1; |
| + static const double kMaxHeapGrowingFactor = 4.0; |
| + static const double kMaxHeapGrowingFactorMemoryConstrained = 2.0; |
| + static const double kMaxHeapGrowingFactorIdle = 1.5; |
| + static const double kTargetMutatorUtilization = 0.97; |
| + |
| + // Sloppy mode arguments object size. |
| + static const int kSloppyArgumentsObjectSize = |
| + JSObject::kHeaderSize + 2 * kPointerSize; |
| + |
| + // Strict mode arguments has no callee so it is smaller. |
| + static const int kStrictArgumentsObjectSize = |
| + JSObject::kHeaderSize + 1 * kPointerSize; |
| + |
| + // Indicies for direct access into argument objects. |
| + static const int kArgumentsLengthIndex = 0; |
| + |
| + // callee is only valid in sloppy mode. |
| + static const int kArgumentsCalleeIndex = 1; |
| + |
| + static const int kNoGCFlags = 0; |
| + static const int kReduceMemoryFootprintMask = 1; |
| + static const int kAbortIncrementalMarkingMask = 2; |
| + static const int kFinalizeIncrementalMarkingMask = 4; |
| + |
| + // Making the heap iterable requires us to abort incremental marking. |
| + static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; |
| + |
| + // The roots that have an index less than this are always in old space. |
| + static const int kOldSpaceRoots = 0x20; |
| + |
| + STATIC_ASSERT(kUndefinedValueRootIndex == |
| + Internals::kUndefinedValueRootIndex); |
| + STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); |
| + STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); |
| + STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); |
| + STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); |
| + |
| + // Calculates the maximum amount of filler that could be required by the |
| + // given alignment. |
| + static int GetMaximumFillToAlign(AllocationAlignment alignment); |
| + // Calculates the actual amount of filler required for a given address at the |
| + // given alignment. |
| + static int GetFillToAlign(Address address, AllocationAlignment alignment); |
| + |
| + template <typename T> |
| + static inline bool IsOneByte(T t, int chars); |
| + |
| + // Callback function passed to Heap::Iterate etc. Copies an object if |
| + // necessary, the object might be promoted to an old space. The caller must |
| + // ensure the precondition that the object is (a) a heap object and (b) in |
| + // the heap's from space. |
| + static inline void ScavengePointer(HeapObject** p); |
| + static inline void ScavengeObject(HeapObject** p, HeapObject* object); |
| + |
| + // Slow part of scavenge object. |
| + static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); |
| + |
| + static void FatalProcessOutOfMemory(const char* location, |
| + bool take_snapshot = false); |
| + |
| + static bool RootIsImmortalImmovable(int root_index); |
| + |
| + // Checks whether the space is valid. |
| + static bool IsValidAllocationSpace(AllocationSpace space); |
| + |
| + // An object may have an AllocationSite associated with it through a trailing |
| + // AllocationMemento. Its feedback should be updated when objects are found |
| + // in the heap. |
| + static inline void UpdateAllocationSiteFeedback(HeapObject* object, |
| + ScratchpadSlotMode mode); |
| + |
| + // Generated code can embed direct references to non-writable roots if |
| + // they are in new space. |
| + static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); |
| + |
| + // Zapping is needed for verify heap, and always done in debug builds. |
| + static inline bool ShouldZapGarbage() { |
| +#ifdef DEBUG |
| + return true; |
| +#else |
| +#ifdef VERIFY_HEAP |
| + return FLAG_verify_heap; |
| +#else |
| + return false; |
| +#endif |
| +#endif |
| + } |
| + |
| + static double HeapGrowingFactor(double gc_speed, double mutator_speed); |
| + |
| + // Copy block of memory from src to dst. Size of block should be aligned |
| + // by pointer size. |
| + static inline void CopyBlock(Address dst, Address src, int byte_size); |
| + |
| + // Optimized version of memmove for blocks with pointer size aligned sizes and |
| + // pointer size aligned addresses. |
| + static inline void MoveBlock(Address dst, Address src, int byte_size); |
| + |
| // Configure heap size in MB before setup. Return false if the heap has been |
| // set up already. |
| bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, |
| @@ -668,6 +903,7 @@ class Heap { |
| OldSpace* code_space() { return code_space_; } |
| MapSpace* map_space() { return map_space_; } |
| LargeObjectSpace* lo_space() { return lo_space_; } |
| + |
| PagedSpace* paged_space(int idx) { |
| switch (idx) { |
| case OLD_SPACE: |
| @@ -682,6 +918,7 @@ class Heap { |
| } |
| return NULL; |
| } |
| + |
| Space* space(int idx) { |
| switch (idx) { |
| case NEW_SPACE: |
| @@ -721,30 +958,6 @@ class Heap { |
| return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize(); |
| } |
| - // Returns a deep copy of the JavaScript object. |
| - // Properties and elements are copied too. |
| - // Optionally takes an AllocationSite to be appended in an AllocationMemento. |
| - MUST_USE_RESULT AllocationResult |
| - CopyJSObject(JSObject* source, AllocationSite* site = NULL); |
| - |
| - // Calculates the maximum amount of filler that could be required by the |
| - // given alignment. |
| - static int GetMaximumFillToAlign(AllocationAlignment alignment); |
| - // Calculates the actual amount of filler required for a given address at the |
| - // given alignment. |
| - static int GetFillToAlign(Address address, AllocationAlignment alignment); |
| - |
| - // Creates a filler object and returns a heap object immediately after it. |
| - MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object, |
| - int filler_size); |
| - // Creates a filler object if needed for alignment and returns a heap object |
| - // immediately after it. If any space is left after the returned object, |
| - // another filler object is created so the over allocated memory is iterable. |
| - MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object, |
| - int object_size, |
| - int allocation_size, |
| - AllocationAlignment alignment); |
| - |
| // Clear the Instanceof cache (used when a prototype changes). |
| inline void ClearInstanceofCache(); |
| @@ -754,24 +967,10 @@ class Heap { |
| // FreeSpace objects have a null map after deserialization. Update the map. |
| void RepairFreeListsAfterDeserialization(); |
| - template <typename T> |
| - static inline bool IsOneByte(T t, int chars); |
| - |
| // Move len elements within a given array from src_index index to dst_index |
| // index. |
| void MoveElements(FixedArray* array, int dst_index, int src_index, int len); |
| - // Sloppy mode arguments object size. |
| - static const int kSloppyArgumentsObjectSize = |
| - JSObject::kHeaderSize + 2 * kPointerSize; |
| - // Strict mode arguments has no callee so it is smaller. |
| - static const int kStrictArgumentsObjectSize = |
| - JSObject::kHeaderSize + 1 * kPointerSize; |
| - // Indicies for direct access into argument objects. |
| - static const int kArgumentsLengthIndex = 0; |
| - // callee is only valid in sloppy mode. |
| - static const int kArgumentsCalleeIndex = 1; |
| - |
| // Finalizes an external string by deleting the associated external |
| // data and clearing the resource pointer. |
| inline void FinalizeExternalString(String* string); |
| @@ -782,12 +981,6 @@ class Heap { |
| bool CanMoveObjectStart(HeapObject* object); |
| - // Indicates whether live bytes adjustment is triggered |
| - // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), |
| - // - or from within GC (CONCURRENT_TO_SWEEPER), |
| - // - or mutator code (CONCURRENT_TO_SWEEPER). |
| - enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; |
| - |
| // Maintain consistency of live bytes during incremental marking. |
| void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode); |
| @@ -809,14 +1002,6 @@ class Heap { |
| AllocationSpace space, const char* gc_reason = NULL, |
| const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| - static const int kNoGCFlags = 0; |
| - static const int kReduceMemoryFootprintMask = 1; |
| - static const int kAbortIncrementalMarkingMask = 2; |
| - static const int kFinalizeIncrementalMarkingMask = 4; |
| - |
| - // Making the heap iterable requires us to abort incremental marking. |
| - static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; |
| - |
| // Invoked when GC was requested via the stack guard. |
| void HandleGCRequest(); |
| @@ -981,9 +1166,6 @@ class Heap { |
| bool InSpace(Address addr, AllocationSpace space); |
| bool InSpace(HeapObject* value, AllocationSpace space); |
| - // Checks whether the space is valid. |
| - static bool IsValidAllocationSpace(AllocationSpace space); |
| - |
| // Checks whether the given object is allowed to be migrated from it's |
| // current space into the given destination space. Used for debugging. |
| inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); |
| @@ -1017,36 +1199,8 @@ class Heap { |
| return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); |
| } |
| - static bool RootIsImmortalImmovable(int root_index); |
| void CheckHandleCount(); |
| -#ifdef VERIFY_HEAP |
| - // Verify the heap is in its normal state before or after a GC. |
| - void Verify(); |
| -#endif |
| - |
| -#ifdef DEBUG |
| - void Print(); |
| - void PrintHandles(); |
| - |
| - // Report heap statistics. |
| - void ReportHeapStatistics(const char* title); |
| - void ReportCodeStatistics(const char* title); |
| -#endif |
| - |
| - // Zapping is needed for verify heap, and always done in debug builds. |
| - static inline bool ShouldZapGarbage() { |
| -#ifdef DEBUG |
| - return true; |
| -#else |
| -#ifdef VERIFY_HEAP |
| - return FLAG_verify_heap; |
| -#else |
| - return false; |
| -#endif |
| -#endif |
| - } |
| - |
| // Number of "runtime allocations" done so far. |
| uint32_t allocations_count() { return allocations_count_; } |
| @@ -1060,6 +1214,7 @@ class Heap { |
| size_t object_count_last_gc(size_t index) { |
| return index < OBJECT_STATS_COUNT ? object_counts_last_time_[index] : 0; |
| } |
| + |
| size_t object_size_last_gc(size_t index) { |
| return index < OBJECT_STATS_COUNT ? object_sizes_last_time_[index] : 0; |
| } |
| @@ -1070,51 +1225,34 @@ class Heap { |
| // Write barrier support for address[start : start + len[ = o. |
| INLINE(void RecordWrites(Address address, int start, int len)); |
| - enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |
| inline HeapState gc_state() { return gc_state_; } |
| inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } |
| +#ifdef VERIFY_HEAP |
|
Hannes Payer (out of office)
2015/08/24 10:38:38
Can we move these ifdefs to the end of the methods
Michael Lippautz
2015/08/24 11:14:27
Done.
|
| + // Verify the heap is in its normal state before or after a GC. |
| + void Verify(); |
| +#endif |
| + |
| #ifdef DEBUG |
| void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } |
| void TracePathToObjectFrom(Object* target, Object* root); |
| void TracePathToObject(Object* target); |
| void TracePathToGlobal(); |
| -#endif |
| - |
| - // Callback function passed to Heap::Iterate etc. Copies an object if |
| - // necessary, the object might be promoted to an old space. The caller must |
| - // ensure the precondition that the object is (a) a heap object and (b) in |
| - // the heap's from space. |
| - static inline void ScavengePointer(HeapObject** p); |
| - static inline void ScavengeObject(HeapObject** p, HeapObject* object); |
| - // Slow part of scavenge object. |
| - static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); |
| + void Print(); |
| + void PrintHandles(); |
| - enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; |
| + // Report heap statistics. |
| + void ReportHeapStatistics(const char* title); |
| + void ReportCodeStatistics(const char* title); |
| +#endif |
| // If an object has an AllocationMemento trailing it, return it, otherwise |
| // return NULL; |
| inline AllocationMemento* FindAllocationMemento(HeapObject* object); |
| - // An object may have an AllocationSite associated with it through a trailing |
| - // AllocationMemento. Its feedback should be updated when objects are found |
| - // in the heap. |
| - static inline void UpdateAllocationSiteFeedback(HeapObject* object, |
| - ScratchpadSlotMode mode); |
| - |
| - // Support for partial snapshots. After calling this we have a linear |
| - // space to write objects in each space. |
| - struct Chunk { |
| - uint32_t size; |
| - Address start; |
| - Address end; |
| - }; |
| - |
| - typedef List<Chunk> Reservation; |
| - |
| // Returns false if not able to reserve. |
| bool ReserveSpace(Reservation* reservations); |
| @@ -1142,54 +1280,6 @@ class Heap { |
| return max_old_generation_size_ - PromotedTotalSize(); |
| } |
| - static const intptr_t kMinimumOldGenerationAllocationLimit = |
| - 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); |
| - |
| - static const int kInitalOldGenerationLimitFactor = 2; |
| - |
| -#if V8_OS_ANDROID |
| - // Don't apply pointer multiplier on Android since it has no swap space and |
| - // should instead adapt it's heap size based on available physical memory. |
| - static const int kPointerMultiplier = 1; |
| -#else |
| - static const int kPointerMultiplier = i::kPointerSize / 4; |
| -#endif |
| - |
| - // The new space size has to be a power of 2. Sizes are in MB. |
| - static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; |
| - static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; |
| - static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; |
| - static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; |
| - |
| - // The old space size has to be a multiple of Page::kPageSize. |
| - // Sizes are in MB. |
| - static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; |
| - static const int kMaxOldSpaceSizeMediumMemoryDevice = |
| - 256 * kPointerMultiplier; |
| - static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; |
| - static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; |
| - |
| - // The executable size has to be a multiple of Page::kPageSize. |
| - // Sizes are in MB. |
| - static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; |
| - static const int kMaxExecutableSizeMediumMemoryDevice = |
| - 192 * kPointerMultiplier; |
| - static const int kMaxExecutableSizeHighMemoryDevice = |
| - 256 * kPointerMultiplier; |
| - static const int kMaxExecutableSizeHugeMemoryDevice = |
| - 256 * kPointerMultiplier; |
| - |
| - static const int kTraceRingBufferSize = 512; |
| - static const int kStacktraceBufferSize = 512; |
| - |
| - static const double kMinHeapGrowingFactor; |
| - static const double kMaxHeapGrowingFactor; |
| - static const double kMaxHeapGrowingFactorMemoryConstrained; |
| - static const double kMaxHeapGrowingFactorIdle; |
| - static const double kTargetMutatorUtilization; |
| - |
| - static double HeapGrowingFactor(double gc_speed, double mutator_speed); |
| - |
| // Calculates the allocation limit based on a given growing factor and a |
| // given old generation size. |
| intptr_t CalculateOldGenerationAllocationLimit(double factor, |
| @@ -1218,50 +1308,8 @@ class Heap { |
| double MonotonicallyIncreasingTimeInMs(); |
| - // Declare all the root indices. This defines the root list order. |
| - enum RootListIndex { |
| -#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |
| - STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) |
| -#undef ROOT_INDEX_DECLARATION |
| - |
| -#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, |
| - INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) |
| -#undef STRING_DECLARATION |
| - |
| -#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex, |
| - PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| -#undef SYMBOL_INDEX_DECLARATION |
| - |
| -#define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex, |
| - PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| -#undef SYMBOL_INDEX_DECLARATION |
| - |
| -// Utility type maps |
| -#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, |
| - STRUCT_LIST(DECLARE_STRUCT_MAP) |
| -#undef DECLARE_STRUCT_MAP |
| - kStringTableRootIndex, |
| - |
| -#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |
| - SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) |
| -#undef ROOT_INDEX_DECLARATION |
| - kRootListLength, |
| - kStrongRootListLength = kStringTableRootIndex, |
| - kSmiRootsStart = kStringTableRootIndex + 1 |
| - }; |
| - |
| Object* root(RootListIndex index) { return roots_[index]; } |
| - STATIC_ASSERT(kUndefinedValueRootIndex == |
| - Internals::kUndefinedValueRootIndex); |
| - STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); |
| - STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); |
| - STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); |
| - STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); |
| - |
| - // Generated code can embed direct references to non-writable roots if |
| - // they are in new space. |
| - static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); |
| // Generated code can treat direct references to this root as constant. |
| bool RootCanBeTreatedAsConstant(RootListIndex root_index); |
| @@ -1273,14 +1321,6 @@ class Heap { |
| void RecordStats(HeapStats* stats, bool take_snapshot = false); |
| - // Copy block of memory from src to dst. Size of block should be aligned |
| - // by pointer size. |
| - static inline void CopyBlock(Address dst, Address src, int byte_size); |
| - |
| - // Optimized version of memmove for blocks with pointer size aligned sizes and |
| - // pointer size aligned addresses. |
| - static inline void MoveBlock(Address dst, Address src, int byte_size); |
| - |
| // Check new space expansion criteria and expand semispaces if it was hit. |
| void CheckNewSpaceExpansionCriteria(); |
| @@ -1433,9 +1473,6 @@ class Heap { |
| // around a GC). |
| inline void CompletelyClearInstanceofCache(); |
| - // The roots that have an index less than this are always in old space. |
| - static const int kOldSpaceRoots = 0x20; |
| - |
| inline uint32_t HashSeed(); |
| inline Smi* NextScriptId(); |
| @@ -1468,18 +1505,6 @@ class Heap { |
| return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; |
| } |
| - // ObjectStats are kept in two arrays, counts and sizes. Related stats are |
| - // stored in a contiguous linear buffer. Stats groups are stored one after |
| - // another. |
| - enum { |
| - FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, |
| - FIRST_FIXED_ARRAY_SUB_TYPE = |
| - FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, |
| - FIRST_CODE_AGE_SUB_TYPE = |
| - FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, |
| - OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 |
| - }; |
| - |
| void RecordObjectStats(InstanceType type, size_t size) { |
| DCHECK(type <= LAST_TYPE); |
| object_counts_[type]++; |
| @@ -1515,39 +1540,6 @@ class Heap { |
| void RegisterStrongRoots(Object** start, Object** end); |
| void UnregisterStrongRoots(Object** start); |
| - // Taking this lock prevents the GC from entering a phase that relocates |
| - // object references. |
| - class RelocationLock { |
| - public: |
| - explicit RelocationLock(Heap* heap) : heap_(heap) { |
| - heap_->relocation_mutex_.Lock(); |
| - } |
| - |
| - ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } |
| - |
| - private: |
| - Heap* heap_; |
| - }; |
| - |
| - // An optional version of the above lock that can be used for some critical |
| - // sections on the mutator thread; only safe since the GC currently does not |
| - // do concurrent compaction. |
| - class OptionalRelocationLock { |
| - public: |
| - OptionalRelocationLock(Heap* heap, bool concurrent) |
| - : heap_(heap), concurrent_(concurrent) { |
| - if (concurrent_) heap_->relocation_mutex_.Lock(); |
| - } |
| - |
| - ~OptionalRelocationLock() { |
| - if (concurrent_) heap_->relocation_mutex_.Unlock(); |
| - } |
| - |
| - private: |
| - Heap* heap_; |
| - bool concurrent_; |
| - }; |
| - |
| void AddWeakObjectToCodeDependency(Handle<HeapObject> obj, |
| Handle<DependentCode> dep); |
| @@ -1555,9 +1547,6 @@ class Heap { |
| void AddRetainedMap(Handle<Map> map); |
| - static void FatalProcessOutOfMemory(const char* location, |
| - bool take_snapshot = false); |
| - |
| // This event is triggered after successful allocation of a new object made |
| // by runtime. Allocations of target space for object evacuation do not |
| // trigger the event. In order to track ALL allocations one must turn off |
| @@ -1599,19 +1588,98 @@ class Heap { |
| bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; } |
| - private: |
| - static const int kInitialStringTableSize = 2048; |
| - static const int kInitialEvalCacheSize = 64; |
| - static const int kInitialNumberStringCacheSize = 256; |
| + // Returns a deep copy of the JavaScript object. |
| + // Properties and elements are copied too. |
| + // Optionally takes an AllocationSite to be appended in an AllocationMemento. |
| + MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source, |
| + AllocationSite* site = NULL); |
| - Heap(); |
| + // Creates a filler object and returns a heap object immediately after it. |
| + MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object, |
| + int filler_size); |
| + // Creates a filler object if needed for alignment and returns a heap object |
| + // immediately after it. If any space is left after the returned object, |
| + // another filler object is created so the over allocated memory is iterable. |
| + MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object, |
| + int object_size, |
| + int allocation_size, |
| + AllocationAlignment alignment); |
| - int current_gc_flags() { return current_gc_flags_; } |
| - void set_current_gc_flags(int flags) { |
| - current_gc_flags_ = flags; |
| - DCHECK(!ShouldFinalizeIncrementalMarking() || |
| - !ShouldAbortIncrementalMarking()); |
| - } |
| + private: |
| + struct StrongRootsList; |
| + |
| + struct StringTypeTable { |
| + InstanceType type; |
| + int size; |
| + RootListIndex index; |
| + }; |
| + |
| + struct ConstantStringTable { |
| + const char* contents; |
| + RootListIndex index; |
| + }; |
| + |
| + struct StructTable { |
| + InstanceType type; |
| + int size; |
| + RootListIndex index; |
| + }; |
| + |
| + struct GCCallbackPair { |
| + GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type, |
| + bool pass_isolate) |
| + : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {} |
| + |
| + bool operator==(const GCCallbackPair& other) const { |
| + return other.callback == callback; |
| + } |
| + |
| + v8::Isolate::GCCallback callback; |
| + GCType gc_type; |
| + bool pass_isolate; |
| + }; |
| + |
| + static const int kInitialStringTableSize = 2048; |
| + static const int kInitialEvalCacheSize = 64; |
| + static const int kInitialNumberStringCacheSize = 256; |
| + |
| + static const int kRememberedUnmappedPages = 128; |
| + |
| + static const StringTypeTable string_type_table[]; |
| + static const ConstantStringTable constant_string_table[]; |
| + static const StructTable struct_table[]; |
| + |
| + static const int kYoungSurvivalRateHighThreshold = 90; |
| + static const int kYoungSurvivalRateAllowedDeviation = 15; |
| + static const int kOldSurvivalRateLowThreshold = 10; |
| + |
| + static const int kMaxMarkCompactsInIdleRound = 7; |
| + static const int kIdleScavengeThreshold = 5; |
| + |
| + static const int kAllocationSiteScratchpadSize = 256; |
| + |
| + Heap(); |
| + |
| + static String* UpdateNewSpaceReferenceInExternalStringTableEntry( |
| + Heap* heap, Object** pointer); |
| + |
| + static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, |
| + StoreBufferEvent event); |
| + |
| + // Selects the proper allocation space depending on the given object |
| + // size and pretenuring decision. |
| + static AllocationSpace SelectSpace(int object_size, PretenureFlag pretenure) { |
| + if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; |
| + return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE; |
| + } |
| + |
| + int current_gc_flags() { return current_gc_flags_; } |
| + |
| + void set_current_gc_flags(int flags) { |
| + current_gc_flags_ = flags; |
| + DCHECK(!ShouldFinalizeIncrementalMarking() || |
| + !ShouldAbortIncrementalMarking()); |
| + } |
| inline bool ShouldReduceMemory() const { |
| return current_gc_flags_ & kReduceMemoryFootprintMask; |
| @@ -1625,265 +1693,234 @@ class Heap { |
| return current_gc_flags_ & kFinalizeIncrementalMarkingMask; |
| } |
| - // Allocates a JS Map in the heap. |
| - MUST_USE_RESULT AllocationResult |
| - AllocateMap(InstanceType instance_type, int instance_size, |
| - ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); |
| - |
| - // Allocates and initializes a new JavaScript object based on a |
| - // constructor. |
| - // If allocation_site is non-null, then a memento is emitted after the object |
| - // that points to the site. |
| - MUST_USE_RESULT AllocationResult |
| - AllocateJSObject(JSFunction* constructor, |
| - PretenureFlag pretenure = NOT_TENURED, |
| - AllocationSite* allocation_site = NULL); |
| - |
| - // Allocates and initializes a new JavaScript object based on a map. |
| - // Passing an allocation site means that a memento will be created that |
| - // points to the site. |
| - MUST_USE_RESULT AllocationResult |
| - AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, |
| - AllocationSite* allocation_site = NULL); |
| - |
| - // Allocates a HeapNumber from value. |
| - MUST_USE_RESULT AllocationResult |
| - AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, |
| - PretenureFlag pretenure = NOT_TENURED); |
| +#define ROOT_ACCESSOR(type, name, camel_name) \ |
| + inline void set_##name(type* value); |
| + ROOT_LIST(ROOT_ACCESSOR) |
| +#undef ROOT_ACCESSOR |
| -// Allocates SIMD values from the given lane values. |
| -#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \ |
| - AllocationResult Allocate##Type(lane_type lanes[lane_count], \ |
| - PretenureFlag pretenure = NOT_TENURED); |
| - SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION) |
| -#undef SIMD_ALLOCATE_DECLARATION |
| + // Code that should be run before and after each GC. Includes some |
| + // reporting/verification activities when compiled with DEBUG set. |
| + void GarbageCollectionPrologue(); |
| + void GarbageCollectionEpilogue(); |
| - // Allocates a byte array of the specified length |
| - MUST_USE_RESULT AllocationResult |
| - AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); |
| + void PreprocessStackTraces(); |
| - // Allocates a bytecode array with given contents. |
| - MUST_USE_RESULT AllocationResult |
| - AllocateBytecodeArray(int length, const byte* raw_bytecodes, |
| - int frame_size); |
| + // Pretenuring decisions are made based on feedback collected during new |
| + // space evacuation. Note that between feedback collection and calling this |
| + // method object in old space must not move. |
| + // Right now we only process pretenuring feedback in high promotion mode. |
| + bool ProcessPretenuringFeedback(); |
| - // Copy the code and scope info part of the code object, but insert |
| - // the provided data as the relocation information. |
| - MUST_USE_RESULT AllocationResult |
| - CopyCode(Code* code, Vector<byte> reloc_info); |
| + // Checks whether a global GC is necessary |
| + GarbageCollector SelectGarbageCollector(AllocationSpace space, |
| + const char** reason); |
| - MUST_USE_RESULT AllocationResult CopyCode(Code* code); |
| + // Make sure there is a filler value behind the top of the new space |
| + // so that the GC does not confuse some unintialized/stale memory |
| + // with the allocation memento of the object at the top |
| + void EnsureFillerObjectAtTop(); |
| - // Allocates a fixed array initialized with undefined values |
| - MUST_USE_RESULT AllocationResult |
| - AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); |
| + // Ensure that we have swept all spaces in such a way that we can iterate |
| + // over all objects. May cause a GC. |
| + void MakeHeapIterable(); |
| - // The amount of external memory registered through the API kept alive |
| - // by global handles |
| - int64_t amount_of_external_allocated_memory_; |
| + // Performs garbage collection operation. |
| + // Returns whether there is a chance that another major GC could |
| + // collect more garbage. |
| + bool CollectGarbage( |
| + GarbageCollector collector, const char* gc_reason, |
| + const char* collector_reason, |
| + const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| - // Caches the amount of external memory registered at the last global gc. |
| - int64_t amount_of_external_allocated_memory_at_last_global_gc_; |
| + // Performs garbage collection |
| + // Returns whether there is a chance another major GC could |
| + // collect more garbage. |
| + bool PerformGarbageCollection( |
| + GarbageCollector collector, |
| + const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| - // This can be calculated directly from a pointer to the heap; however, it is |
| - // more expedient to get at the isolate directly from within Heap methods. |
| - Isolate* isolate_; |
| + inline void UpdateOldSpaceLimits(); |
| - Object* roots_[kRootListLength]; |
| + // Initializes a JSObject based on its map. |
| + void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, |
| + Map* map); |
| + void InitializeAllocationMemento(AllocationMemento* memento, |
| + AllocationSite* allocation_site); |
| - size_t code_range_size_; |
| - int reserved_semispace_size_; |
| - int max_semi_space_size_; |
| - int initial_semispace_size_; |
| - int target_semispace_size_; |
| - intptr_t max_old_generation_size_; |
| - intptr_t initial_old_generation_size_; |
| - bool old_generation_size_configured_; |
| - intptr_t max_executable_size_; |
| - intptr_t maximum_committed_; |
| + bool CreateInitialMaps(); |
| + void CreateInitialObjects(); |
| - // For keeping track of how much data has survived |
| - // scavenge since last new space expansion. |
| - int survived_since_last_expansion_; |
| + // These five Create*EntryStub functions are here and forced to not be inlined |
| + // because of a gcc-4.4 bug that assigns wrong vtable entries. |
| + NO_INLINE(void CreateJSEntryStub()); |
| + NO_INLINE(void CreateJSConstructEntryStub()); |
| - // ... and since the last scavenge. |
| - int survived_last_scavenge_; |
| + void CreateFixedStubs(); |
| - int always_allocate_scope_depth_; |
| + HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size); |
| - // For keeping track of context disposals. |
| - int contexts_disposed_; |
| + // Performs a minor collection in new generation. |
| + void Scavenge(); |
| - int global_ic_age_; |
| + // Commits from space if it is uncommitted. |
| + void EnsureFromSpaceIsCommitted(); |
| - int scan_on_scavenge_pages_; |
| + // Uncommit unused semi space. |
| + bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } |
| - NewSpace new_space_; |
| - OldSpace* old_space_; |
| - OldSpace* code_space_; |
| - MapSpace* map_space_; |
| - LargeObjectSpace* lo_space_; |
| - HeapState gc_state_; |
| - int gc_post_processing_depth_; |
| - Address new_space_top_after_last_gc_; |
| + // Fill in bogus values in from space |
| + void ZapFromSpace(); |
| - // Returns the amount of external memory registered since last global gc. |
| - int64_t PromotedExternalMemorySize(); |
| + Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); |
| - // How many "runtime allocations" happened. |
| - uint32_t allocations_count_; |
| + // Performs a major collection in the whole heap. |
| + void MarkCompact(); |
| - // Running hash over allocations performed. |
| - uint32_t raw_allocations_hash_; |
| + // Code to be run before and after mark-compact. |
| + void MarkCompactPrologue(); |
| + void MarkCompactEpilogue(); |
| - // Countdown counter, dumps allocation hash when 0. |
| - uint32_t dump_allocations_hash_countdown_; |
| + void ProcessNativeContexts(WeakObjectRetainer* retainer); |
| + void ProcessAllocationSites(WeakObjectRetainer* retainer); |
| - // How many mark-sweep collections happened. |
| - unsigned int ms_count_; |
| + // Deopts all code that contains allocation instruction which are tenured or |
| + // not tenured. Moreover it clears the pretenuring allocation site statistics. |
| + void ResetAllAllocationSitesDependentCode(PretenureFlag flag); |
| - // How many gc happened. |
| - unsigned int gc_count_; |
| + // Evaluates local pretenuring for the old space and calls |
| + // ResetAllTenuredAllocationSitesDependentCode if too many objects died in |
| + // the old space. |
| + void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); |
| - // For post mortem debugging. |
| - static const int kRememberedUnmappedPages = 128; |
| - int remembered_unmapped_pages_index_; |
| - Address remembered_unmapped_pages_[kRememberedUnmappedPages]; |
| + // Called on heap tear-down. Frees all remaining ArrayBuffer backing stores. |
| + void TearDownArrayBuffers(); |
| -#define ROOT_ACCESSOR(type, name, camel_name) \ |
| - inline void set_##name(type* value); |
| - ROOT_LIST(ROOT_ACCESSOR) |
| -#undef ROOT_ACCESSOR |
| + // These correspond to the non-Helper versions. |
| + void RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers, |
| + void* data, size_t length); |
| + void UnregisterArrayBufferHelper( |
| + std::map<void*, size_t>& live_buffers, |
| + std::map<void*, size_t>& not_yet_discovered_buffers, void* data); |
| + void RegisterLiveArrayBufferHelper( |
| + std::map<void*, size_t>& not_yet_discovered_buffers, void* data); |
| + size_t FreeDeadArrayBuffersHelper( |
| + Isolate* isolate, std::map<void*, size_t>& live_buffers, |
| + std::map<void*, size_t>& not_yet_discovered_buffers); |
| + void TearDownArrayBuffersHelper( |
| + Isolate* isolate, std::map<void*, size_t>& live_buffers, |
| + std::map<void*, size_t>& not_yet_discovered_buffers); |
| -#ifdef DEBUG |
| - // If the --gc-interval flag is set to a positive value, this |
| - // variable holds the value indicating the number of allocations |
| - // remain until the next failure and garbage collection. |
| - int allocation_timeout_; |
| -#endif // DEBUG |
| + // Record statistics before and after garbage collection. |
| + void ReportStatisticsBeforeGC(); |
| + void ReportStatisticsAfterGC(); |
| - // Limit that triggers a global GC on the next (normally caused) GC. This |
| - // is checked when we have already decided to do a GC to help determine |
| - // which collector to invoke, before expanding a paged space in the old |
| - // generation and on every allocation in large object space. |
| - intptr_t old_generation_allocation_limit_; |
| + // Creates and installs the full-sized number string cache. |
| + int FullSizeNumberStringCacheLength(); |
| + // Flush the number to string cache. |
| + void FlushNumberStringCache(); |
| - // Indicates that an allocation has failed in the old generation since the |
| - // last GC. |
| - bool old_gen_exhausted_; |
| + // Sets used allocation sites entries to undefined. |
| + void FlushAllocationSitesScratchpad(); |
| - // Indicates that memory usage is more important than latency. |
| - // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed. |
| - bool optimize_for_memory_usage_; |
| + // Initializes the allocation sites scratchpad with undefined values. |
| + void InitializeAllocationSitesScratchpad(); |
| - // Indicates that inline bump-pointer allocation has been globally disabled |
| - // for all spaces. This is used to disable allocations in generated code. |
| - bool inline_allocation_disabled_; |
| + // Adds an allocation site to the scratchpad if there is space left. |
| + void AddAllocationSiteToScratchpad(AllocationSite* site, |
| + ScratchpadSlotMode mode); |
| - // Weak list heads, threaded through the objects. |
| - // List heads are initialized lazily and contain the undefined_value at start. |
| - Object* native_contexts_list_; |
| - Object* allocation_sites_list_; |
| + void UpdateSurvivalStatistics(int start_new_space_size); |
| - // List of encountered weak collections (JSWeakMap and JSWeakSet) during |
| - // marking. It is initialized during marking, destroyed after marking and |
| - // contains Smi(0) while marking is not active. |
| - Object* encountered_weak_collections_; |
| + // TODO(hpayer): Allocation site pretenuring may make this method obsolete. |
| + // Re-visit incremental marking heuristics. |
| + bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } |
| - Object* encountered_weak_cells_; |
| + void ConfigureInitialOldGenerationSize(); |
| - StoreBufferRebuilder store_buffer_rebuilder_; |
| + void SelectScavengingVisitorsTable(); |
| - struct StringTypeTable { |
| - InstanceType type; |
| - int size; |
| - RootListIndex index; |
| - }; |
| + bool HasLowYoungGenerationAllocationRate(); |
| + bool HasLowOldGenerationAllocationRate(); |
| + double YoungGenerationMutatorUtilization(); |
| + double OldGenerationMutatorUtilization(); |
| - struct ConstantStringTable { |
| - const char* contents; |
| - RootListIndex index; |
| - }; |
| + void ReduceNewSpaceSize(); |
| - struct StructTable { |
| - InstanceType type; |
| - int size; |
| - RootListIndex index; |
| - }; |
| + bool TryFinalizeIdleIncrementalMarking( |
| + double idle_time_in_ms, size_t size_of_objects, |
| + size_t mark_compact_speed_in_bytes_per_ms); |
| - static const StringTypeTable string_type_table[]; |
| - static const ConstantStringTable constant_string_table[]; |
| - static const StructTable struct_table[]; |
| + GCIdleTimeHandler::HeapState ComputeHeapState(); |
| - struct GCCallbackPair { |
| - GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type, |
| - bool pass_isolate) |
| - : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {} |
| + bool PerformIdleTimeAction(GCIdleTimeAction action, |
| + GCIdleTimeHandler::HeapState heap_state, |
| + double deadline_in_ms); |
| - bool operator==(const GCCallbackPair& other) const { |
| - return other.callback == callback; |
| - } |
| + void IdleNotificationEpilogue(GCIdleTimeAction action, |
| + GCIdleTimeHandler::HeapState heap_state, |
| + double start_ms, double deadline_in_ms); |
| + void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms, |
| + double now_ms); |
| - v8::Isolate::GCCallback callback; |
| - GCType gc_type; |
| - bool pass_isolate; |
| - }; |
| + void ClearObjectStats(bool clear_last_time_stats = false); |
| - List<GCCallbackPair> gc_epilogue_callbacks_; |
| - List<GCCallbackPair> gc_prologue_callbacks_; |
| + inline void UpdateAllocationsHash(HeapObject* object); |
| + inline void UpdateAllocationsHash(uint32_t value); |
| + inline void PrintAlloctionsHash(); |
| - // Code that should be run before and after each GC. Includes some |
| - // reporting/verification activities when compiled with DEBUG set. |
| - void GarbageCollectionPrologue(); |
| - void GarbageCollectionEpilogue(); |
| + void AddToRingBuffer(const char* string); |
| + void GetFromRingBuffer(char* buffer); |
| - void PreprocessStackTraces(); |
| + // Allocates a JS Map in the heap. |
| + MUST_USE_RESULT AllocationResult |
| + AllocateMap(InstanceType instance_type, int instance_size, |
| + ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); |
| - // Pretenuring decisions are made based on feedback collected during new |
| - // space evacuation. Note that between feedback collection and calling this |
| - // method object in old space must not move. |
| - // Right now we only process pretenuring feedback in high promotion mode. |
| - bool ProcessPretenuringFeedback(); |
| + // Allocates and initializes a new JavaScript object based on a |
| + // constructor. |
| + // If allocation_site is non-null, then a memento is emitted after the object |
| + // that points to the site. |
| + MUST_USE_RESULT AllocationResult AllocateJSObject( |
| + JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED, |
| + AllocationSite* allocation_site = NULL); |
| - // Checks whether a global GC is necessary |
| - GarbageCollector SelectGarbageCollector(AllocationSpace space, |
| - const char** reason); |
| + // Allocates and initializes a new JavaScript object based on a map. |
| + // Passing an allocation site means that a memento will be created that |
| + // points to the site. |
| + MUST_USE_RESULT AllocationResult |
| + AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, |
| + AllocationSite* allocation_site = NULL); |
| - // Make sure there is a filler value behind the top of the new space |
| - // so that the GC does not confuse some unintialized/stale memory |
| - // with the allocation memento of the object at the top |
| - void EnsureFillerObjectAtTop(); |
| + // Allocates a HeapNumber from value. |
| + MUST_USE_RESULT AllocationResult |
| + AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, |
| + PretenureFlag pretenure = NOT_TENURED); |
| - // Ensure that we have swept all spaces in such a way that we can iterate |
| - // over all objects. May cause a GC. |
| - void MakeHeapIterable(); |
| +// Allocates SIMD values from the given lane values. |
| +#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \ |
| + AllocationResult Allocate##Type(lane_type lanes[lane_count], \ |
| + PretenureFlag pretenure = NOT_TENURED); |
| + SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION) |
| +#undef SIMD_ALLOCATE_DECLARATION |
| - // Performs garbage collection operation. |
| - // Returns whether there is a chance that another major GC could |
| - // collect more garbage. |
| - bool CollectGarbage( |
| - GarbageCollector collector, const char* gc_reason, |
| - const char* collector_reason, |
| - const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| + // Allocates a byte array of the specified length |
| + MUST_USE_RESULT AllocationResult |
| + AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); |
| - // Performs garbage collection |
| - // Returns whether there is a chance another major GC could |
| - // collect more garbage. |
| - bool PerformGarbageCollection( |
| - GarbageCollector collector, |
| - const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| + // Allocates a bytecode array with given contents. |
| + MUST_USE_RESULT AllocationResult |
| + AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size); |
| - inline void UpdateOldSpaceLimits(); |
| + // Copy the code and scope info part of the code object, but insert |
| + // the provided data as the relocation information. |
| + MUST_USE_RESULT AllocationResult CopyCode(Code* code, |
| + Vector<byte> reloc_info); |
| - // Selects the proper allocation space depending on the given object |
| - // size and pretenuring decision. |
| - static AllocationSpace SelectSpace(int object_size, |
| - PretenureFlag pretenure) { |
| - if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; |
| - return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE; |
| - } |
| + MUST_USE_RESULT AllocationResult CopyCode(Code* code); |
| - HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size); |
| + // Allocates a fixed array initialized with undefined values |
| + MUST_USE_RESULT AllocationResult |
| + AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); |
| // Allocate an uninitialized object. The memory is non-executable if the |
| // hardware and OS allow. This is the single choke-point for allocations |
| @@ -1902,12 +1939,6 @@ class Heap { |
| MUST_USE_RESULT AllocationResult |
| AllocatePartialMap(InstanceType instance_type, int instance_size); |
| - // Initializes a JSObject based on its map. |
| - void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, |
| - Map* map); |
| - void InitializeAllocationMemento(AllocationMemento* memento, |
| - AllocationSite* allocation_site); |
| - |
| // Allocate a block of memory in the given space (filled with a filler). |
| // Used as a fall-back for generated code when the space is full. |
| MUST_USE_RESULT AllocationResult |
| @@ -1935,9 +1966,6 @@ class Heap { |
| MUST_USE_RESULT AllocationResult |
| AllocateRawTwoByteString(int length, PretenureFlag pretenure); |
| - bool CreateInitialMaps(); |
| - void CreateInitialObjects(); |
| - |
| // Allocates an internalized string in old space based on the character |
| // stream. |
| MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( |
| @@ -2004,13 +2032,6 @@ class Heap { |
| MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( |
| int length, PretenureFlag pretenure = NOT_TENURED); |
| - // These five Create*EntryStub functions are here and forced to not be inlined |
| - // because of a gcc-4.4 bug that assigns wrong vtable entries. |
| - NO_INLINE(void CreateJSEntryStub()); |
| - NO_INLINE(void CreateJSConstructEntryStub()); |
| - |
| - void CreateFixedStubs(); |
| - |
| // Allocate empty fixed array. |
| MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); |
| @@ -2040,94 +2061,125 @@ class Heap { |
| MUST_USE_RESULT AllocationResult InternalizeString(String* str); |
| - // Performs a minor collection in new generation. |
| - void Scavenge(); |
| + // The amount of external memory registered through the API kept alive |
| + // by global handles |
| + int64_t amount_of_external_allocated_memory_; |
| - // Commits from space if it is uncommitted. |
| - void EnsureFromSpaceIsCommitted(); |
| + // Caches the amount of external memory registered at the last global gc. |
| + int64_t amount_of_external_allocated_memory_at_last_global_gc_; |
| - // Uncommit unused semi space. |
| - bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } |
| + // This can be calculated directly from a pointer to the heap; however, it is |
| + // more expedient to get at the isolate directly from within Heap methods. |
| + Isolate* isolate_; |
| - // Fill in bogus values in from space |
| - void ZapFromSpace(); |
| + Object* roots_[kRootListLength]; |
| - static String* UpdateNewSpaceReferenceInExternalStringTableEntry( |
| - Heap* heap, Object** pointer); |
| + size_t code_range_size_; |
| + int reserved_semispace_size_; |
| + int max_semi_space_size_; |
| + int initial_semispace_size_; |
| + int target_semispace_size_; |
| + intptr_t max_old_generation_size_; |
| + intptr_t initial_old_generation_size_; |
| + bool old_generation_size_configured_; |
| + intptr_t max_executable_size_; |
| + intptr_t maximum_committed_; |
| - Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); |
| - static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, |
| - StoreBufferEvent event); |
| + // For keeping track of how much data has survived |
| + // scavenge since last new space expansion. |
| + int survived_since_last_expansion_; |
| - // Performs a major collection in the whole heap. |
| - void MarkCompact(); |
| + // ... and since the last scavenge. |
| + int survived_last_scavenge_; |
| - // Code to be run before and after mark-compact. |
| - void MarkCompactPrologue(); |
| - void MarkCompactEpilogue(); |
| + int always_allocate_scope_depth_; |
| - void ProcessNativeContexts(WeakObjectRetainer* retainer); |
| - void ProcessAllocationSites(WeakObjectRetainer* retainer); |
| + // For keeping track of context disposals. |
| + int contexts_disposed_; |
| - // Deopts all code that contains allocation instruction which are tenured or |
| - // not tenured. Moreover it clears the pretenuring allocation site statistics. |
| - void ResetAllAllocationSitesDependentCode(PretenureFlag flag); |
| + int global_ic_age_; |
| - // Evaluates local pretenuring for the old space and calls |
| - // ResetAllTenuredAllocationSitesDependentCode if too many objects died in |
| - // the old space. |
| - void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); |
| + int scan_on_scavenge_pages_; |
| - // Called on heap tear-down. Frees all remaining ArrayBuffer backing stores. |
| - void TearDownArrayBuffers(); |
| + NewSpace new_space_; |
| + OldSpace* old_space_; |
| + OldSpace* code_space_; |
| + MapSpace* map_space_; |
| + LargeObjectSpace* lo_space_; |
| + HeapState gc_state_; |
| + int gc_post_processing_depth_; |
| + Address new_space_top_after_last_gc_; |
| - // These correspond to the non-Helper versions. |
| - void RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers, |
| - void* data, size_t length); |
| - void UnregisterArrayBufferHelper( |
| - std::map<void*, size_t>& live_buffers, |
| - std::map<void*, size_t>& not_yet_discovered_buffers, void* data); |
| - void RegisterLiveArrayBufferHelper( |
| - std::map<void*, size_t>& not_yet_discovered_buffers, void* data); |
| - size_t FreeDeadArrayBuffersHelper( |
| - Isolate* isolate, std::map<void*, size_t>& live_buffers, |
| - std::map<void*, size_t>& not_yet_discovered_buffers); |
| - void TearDownArrayBuffersHelper( |
| - Isolate* isolate, std::map<void*, size_t>& live_buffers, |
| - std::map<void*, size_t>& not_yet_discovered_buffers); |
| + // Returns the amount of external memory registered since last global gc. |
| + int64_t PromotedExternalMemorySize(); |
| - // Record statistics before and after garbage collection. |
| - void ReportStatisticsBeforeGC(); |
| - void ReportStatisticsAfterGC(); |
| + // How many "runtime allocations" happened. |
| + uint32_t allocations_count_; |
| - // Total RegExp code ever generated |
| - double total_regexp_code_generated_; |
| + // Running hash over allocations performed. |
| + uint32_t raw_allocations_hash_; |
| - int deferred_counters_[v8::Isolate::kUseCounterFeatureCount]; |
| + // Countdown counter, dumps allocation hash when 0. |
| + uint32_t dump_allocations_hash_countdown_; |
| - GCTracer* tracer_; |
| + // How many mark-sweep collections happened. |
| + unsigned int ms_count_; |
| - // Creates and installs the full-sized number string cache. |
| - int FullSizeNumberStringCacheLength(); |
| - // Flush the number to string cache. |
| - void FlushNumberStringCache(); |
| + // How many gc happened. |
| + unsigned int gc_count_; |
| - // Sets used allocation sites entries to undefined. |
| - void FlushAllocationSitesScratchpad(); |
| + // For post mortem debugging. |
| + int remembered_unmapped_pages_index_; |
| + Address remembered_unmapped_pages_[kRememberedUnmappedPages]; |
| - // Initializes the allocation sites scratchpad with undefined values. |
| - void InitializeAllocationSitesScratchpad(); |
| +#ifdef DEBUG |
| + // If the --gc-interval flag is set to a positive value, this |
| + // variable holds the value indicating the number of allocations |
| + // remain until the next failure and garbage collection. |
| + int allocation_timeout_; |
| +#endif // DEBUG |
| - // Adds an allocation site to the scratchpad if there is space left. |
| - void AddAllocationSiteToScratchpad(AllocationSite* site, |
| - ScratchpadSlotMode mode); |
| + // Limit that triggers a global GC on the next (normally caused) GC. This |
| + // is checked when we have already decided to do a GC to help determine |
| + // which collector to invoke, before expanding a paged space in the old |
| + // generation and on every allocation in large object space. |
| + intptr_t old_generation_allocation_limit_; |
| - void UpdateSurvivalStatistics(int start_new_space_size); |
| + // Indicates that an allocation has failed in the old generation since the |
| + // last GC. |
| + bool old_gen_exhausted_; |
| - static const int kYoungSurvivalRateHighThreshold = 90; |
| - static const int kYoungSurvivalRateAllowedDeviation = 15; |
| + // Indicates that memory usage is more important than latency. |
| + // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed. |
| + bool optimize_for_memory_usage_; |
| - static const int kOldSurvivalRateLowThreshold = 10; |
| + // Indicates that inline bump-pointer allocation has been globally disabled |
| + // for all spaces. This is used to disable allocations in generated code. |
| + bool inline_allocation_disabled_; |
| + |
| + // Weak list heads, threaded through the objects. |
| + // List heads are initialized lazily and contain the undefined_value at start. |
| + Object* native_contexts_list_; |
| + Object* allocation_sites_list_; |
| + |
| + // List of encountered weak collections (JSWeakMap and JSWeakSet) during |
| + // marking. It is initialized during marking, destroyed after marking and |
| + // contains Smi(0) while marking is not active. |
| + Object* encountered_weak_collections_; |
| + |
| + Object* encountered_weak_cells_; |
| + |
| + StoreBufferRebuilder store_buffer_rebuilder_; |
| + |
| + List<GCCallbackPair> gc_epilogue_callbacks_; |
| + List<GCCallbackPair> gc_prologue_callbacks_; |
| + |
| + // Total RegExp code ever generated |
| + double total_regexp_code_generated_; |
| + |
| + int deferred_counters_[v8::Isolate::kUseCounterFeatureCount]; |
| + |
| + GCTracer* tracer_; |
| int high_survival_rate_period_length_; |
| intptr_t promoted_objects_size_; |
| @@ -2146,46 +2198,6 @@ class Heap { |
| // of the allocation site. |
| unsigned int maximum_size_scavenges_; |
| - // TODO(hpayer): Allocation site pretenuring may make this method obsolete. |
| - // Re-visit incremental marking heuristics. |
| - bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } |
| - |
| - void ConfigureInitialOldGenerationSize(); |
| - |
| - void SelectScavengingVisitorsTable(); |
| - |
| - bool HasLowYoungGenerationAllocationRate(); |
| - bool HasLowOldGenerationAllocationRate(); |
| - double YoungGenerationMutatorUtilization(); |
| - double OldGenerationMutatorUtilization(); |
| - |
| - void ReduceNewSpaceSize(); |
| - |
| - bool TryFinalizeIdleIncrementalMarking( |
| - double idle_time_in_ms, size_t size_of_objects, |
| - size_t mark_compact_speed_in_bytes_per_ms); |
| - |
| - GCIdleTimeHandler::HeapState ComputeHeapState(); |
| - |
| - bool PerformIdleTimeAction(GCIdleTimeAction action, |
| - GCIdleTimeHandler::HeapState heap_state, |
| - double deadline_in_ms); |
| - |
| - void IdleNotificationEpilogue(GCIdleTimeAction action, |
| - GCIdleTimeHandler::HeapState heap_state, |
| - double start_ms, double deadline_in_ms); |
| - void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms, |
| - double now_ms); |
| - |
| - void ClearObjectStats(bool clear_last_time_stats = false); |
| - |
| - inline void UpdateAllocationsHash(HeapObject* object); |
| - inline void UpdateAllocationsHash(uint32_t value); |
| - inline void PrintAlloctionsHash(); |
| - |
| - void AddToRingBuffer(const char* string); |
| - void GetFromRingBuffer(char* buffer); |
| - |
| // Object counts and used memory by InstanceType |
| size_t object_counts_[OBJECT_STATS_COUNT]; |
| size_t object_counts_last_time_[OBJECT_STATS_COUNT]; |
| @@ -2248,7 +2260,6 @@ class Heap { |
| // deoptimization triggered by garbage collection. |
| int gcs_since_last_deopt_; |
| - static const int kAllocationSiteScratchpadSize = 256; |
| int allocation_sites_scratchpad_length_; |
| char trace_ring_buffer_[kTraceRingBufferSize]; |
| @@ -2258,9 +2269,6 @@ class Heap { |
| bool ring_buffer_full_; |
| size_t ring_buffer_end_; |
| - static const int kMaxMarkCompactsInIdleRound = 7; |
| - static const int kIdleScavengeThreshold = 5; |
| - |
| // Shared state read by the scavenge collector and set by ScavengeObject. |
| PromotionQueue promotion_queue_; |
| @@ -2304,7 +2312,6 @@ class Heap { |
| std::map<void*, size_t> live_array_buffers_for_scavenge_; |
| std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_; |
| - struct StrongRootsList; |
| StrongRootsList* strong_roots_list_; |
| friend class AlwaysAllocateScope; |