Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(369)

Unified Diff: src/heap/heap.h

Issue 1312503004: [heap] Enforce coding style decl order in {Heap} round #1. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Addressed comment and reverted back to regular static const double Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/heap/gc-tracer.cc ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/heap/heap.h
diff --git a/src/heap/heap.h b/src/heap/heap.h
index 15b047c62e553b83b7bed625b427724b4d383249..f7eca4011a2b71d6dfe9c926a261f4693977d526 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -577,124 +577,256 @@ enum ArrayStorageAllocationMode {
class Heap {
public:
- // Configure heap size in MB before setup. Return false if the heap has been
- // set up already.
- bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
- int max_executable_size, size_t code_range_size);
- bool ConfigureHeapDefault();
+ // Declare all the root indices. This defines the root list order.
+ enum RootListIndex {
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+ STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
- // Prepares the heap, setting up memory areas that are needed in the isolate
- // without actually creating any objects.
- bool SetUp();
+#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
+ INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
+#undef STRING_DECLARATION
- // Bootstraps the object heap with the core set of objects required to run.
- // Returns whether it succeeded.
- bool CreateHeapObjects();
+#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
+ PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+#undef SYMBOL_INDEX_DECLARATION
- // Destroys all memory allocated by the heap.
- void TearDown();
+#define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex,
+ PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+#undef SYMBOL_INDEX_DECLARATION
- // Set the stack limit in the roots_ array. Some architectures generate
- // code that looks here, because it is faster than loading from the static
- // jslimit_/real_jslimit_ variable in the StackGuard.
- void SetStackLimits();
+// Utility type maps
+#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
+ STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+ kStringTableRootIndex,
- // Notifies the heap that is ok to start marking or other activities that
- // should not happen during deserialization.
- void NotifyDeserializationComplete();
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+ SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+ kRootListLength,
+ kStrongRootListLength = kStringTableRootIndex,
+ kSmiRootsStart = kStringTableRootIndex + 1
+ };
- // Returns whether SetUp has been called.
- bool HasBeenSetUp();
+ // Indicates whether live bytes adjustment is triggered
+ // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
+ // - or from within GC (CONCURRENT_TO_SWEEPER),
+ // - or mutator code (CONCURRENT_TO_SWEEPER).
+ enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
- // Returns the maximum amount of memory reserved for the heap. For
- // the young generation, we reserve 4 times the amount needed for a
- // semi space. The young generation consists of two semi spaces and
- // we reserve twice the amount needed for those in order to ensure
- // that new space can be aligned to its size.
- intptr_t MaxReserved() {
- return 4 * reserved_semispace_size_ + max_old_generation_size_;
- }
- int MaxSemiSpaceSize() { return max_semi_space_size_; }
- int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
- int InitialSemiSpaceSize() { return initial_semispace_size_; }
- int TargetSemiSpaceSize() { return target_semispace_size_; }
- intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
- intptr_t MaxExecutableSize() { return max_executable_size_; }
+ enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
- // Returns the capacity of the heap in bytes w/o growing. Heap grows when
- // more spaces are needed until it reaches the limit.
- intptr_t Capacity();
+ enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
- // Returns the amount of memory currently committed for the heap.
- intptr_t CommittedMemory();
+ // ObjectStats are kept in two arrays, counts and sizes. Related stats are
+ // stored in a contiguous linear buffer. Stats groups are stored one after
+ // another.
+ enum {
+ FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
+ FIRST_FIXED_ARRAY_SUB_TYPE =
+ FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
+ FIRST_CODE_AGE_SUB_TYPE =
+ FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+ OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
+ };
- // Returns the amount of memory currently committed for the old space.
- intptr_t CommittedOldGenerationMemory();
+ // Taking this lock prevents the GC from entering a phase that relocates
+ // object references.
+ class RelocationLock {
+ public:
+ explicit RelocationLock(Heap* heap) : heap_(heap) {
+ heap_->relocation_mutex_.Lock();
+ }
- // Returns the amount of executable memory currently committed for the heap.
- intptr_t CommittedMemoryExecutable();
+ ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
- // Returns the amount of phyical memory currently committed for the heap.
- size_t CommittedPhysicalMemory();
+ private:
+ Heap* heap_;
+ };
- // Returns the maximum amount of memory ever committed for the heap.
- intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+ // An optional version of the above lock that can be used for some critical
+ // sections on the mutator thread; only safe since the GC currently does not
+ // do concurrent compaction.
+ class OptionalRelocationLock {
+ public:
+ OptionalRelocationLock(Heap* heap, bool concurrent)
+ : heap_(heap), concurrent_(concurrent) {
+ if (concurrent_) heap_->relocation_mutex_.Lock();
+ }
- // Updates the maximum committed memory for the heap. Should be called
- // whenever a space grows.
- void UpdateMaximumCommitted();
+ ~OptionalRelocationLock() {
+ if (concurrent_) heap_->relocation_mutex_.Unlock();
+ }
- // Returns the available bytes in space w/o growing.
- // Heap doesn't guarantee that it can allocate an object that requires
- // all available bytes. Check MaxHeapObjectSize() instead.
- intptr_t Available();
+ private:
+ Heap* heap_;
+ bool concurrent_;
+ };
- // Returns of size of all objects residing in the heap.
- intptr_t SizeOfObjects();
+ // Support for partial snapshots. After calling this we have a linear
+ // space to write objects in each space.
+ struct Chunk {
+ uint32_t size;
+ Address start;
+ Address end;
+ };
+ typedef List<Chunk> Reservation;
- intptr_t old_generation_allocation_limit() const {
- return old_generation_allocation_limit_;
- }
+ static const intptr_t kMinimumOldGenerationAllocationLimit =
+ 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
- // Return the starting address and a mask for the new space. And-masking an
- // address with the mask will result in the start address of the new space
- // for all addresses in either semispace.
- Address NewSpaceStart() { return new_space_.start(); }
- uintptr_t NewSpaceMask() { return new_space_.mask(); }
- Address NewSpaceTop() { return new_space_.top(); }
+ static const int kInitalOldGenerationLimitFactor = 2;
- NewSpace* new_space() { return &new_space_; }
- OldSpace* old_space() { return old_space_; }
- OldSpace* code_space() { return code_space_; }
- MapSpace* map_space() { return map_space_; }
- LargeObjectSpace* lo_space() { return lo_space_; }
- PagedSpace* paged_space(int idx) {
- switch (idx) {
- case OLD_SPACE:
- return old_space();
- case MAP_SPACE:
- return map_space();
- case CODE_SPACE:
- return code_space();
- case NEW_SPACE:
- case LO_SPACE:
- UNREACHABLE();
- }
- return NULL;
- }
- Space* space(int idx) {
- switch (idx) {
- case NEW_SPACE:
- return new_space();
- case LO_SPACE:
- return lo_space();
- default:
- return paged_space(idx);
- }
+#if V8_OS_ANDROID
+ // Don't apply pointer multiplier on Android since it has no swap space and
+ // should instead adapt it's heap size based on available physical memory.
+ static const int kPointerMultiplier = 1;
+#else
+ static const int kPointerMultiplier = i::kPointerSize / 4;
+#endif
+
+ // The new space size has to be a power of 2. Sizes are in MB.
+ static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
+
+ // The old space size has to be a multiple of Page::kPageSize.
+ // Sizes are in MB.
+ static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeMediumMemoryDevice =
+ 256 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
+
+ // The executable size has to be a multiple of Page::kPageSize.
+ // Sizes are in MB.
+ static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
+ static const int kMaxExecutableSizeMediumMemoryDevice =
+ 192 * kPointerMultiplier;
+ static const int kMaxExecutableSizeHighMemoryDevice =
+ 256 * kPointerMultiplier;
+ static const int kMaxExecutableSizeHugeMemoryDevice =
+ 256 * kPointerMultiplier;
+
+ static const int kTraceRingBufferSize = 512;
+ static const int kStacktraceBufferSize = 512;
+
+ static const double kMinHeapGrowingFactor;
+ static const double kMaxHeapGrowingFactor;
+ static const double kMaxHeapGrowingFactorMemoryConstrained;
+ static const double kMaxHeapGrowingFactorIdle;
+ static const double kTargetMutatorUtilization;
+
+ // Sloppy mode arguments object size.
+ static const int kSloppyArgumentsObjectSize =
+ JSObject::kHeaderSize + 2 * kPointerSize;
+
+ // Strict mode arguments has no callee so it is smaller.
+ static const int kStrictArgumentsObjectSize =
+ JSObject::kHeaderSize + 1 * kPointerSize;
+
+ // Indicies for direct access into argument objects.
+ static const int kArgumentsLengthIndex = 0;
+
+ // callee is only valid in sloppy mode.
+ static const int kArgumentsCalleeIndex = 1;
+
+ static const int kNoGCFlags = 0;
+ static const int kReduceMemoryFootprintMask = 1;
+ static const int kAbortIncrementalMarkingMask = 2;
+ static const int kFinalizeIncrementalMarkingMask = 4;
+
+ // Making the heap iterable requires us to abort incremental marking.
+ static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
+
+ // The roots that have an index less than this are always in old space.
+ static const int kOldSpaceRoots = 0x20;
+
+ STATIC_ASSERT(kUndefinedValueRootIndex ==
+ Internals::kUndefinedValueRootIndex);
+ STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
+ STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
+ STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
+ STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
+
+ // Calculates the maximum amount of filler that could be required by the
+ // given alignment.
+ static int GetMaximumFillToAlign(AllocationAlignment alignment);
+ // Calculates the actual amount of filler required for a given address at the
+ // given alignment.
+ static int GetFillToAlign(Address address, AllocationAlignment alignment);
+
+ template <typename T>
+ static inline bool IsOneByte(T t, int chars);
+
+ // Callback function passed to Heap::Iterate etc. Copies an object if
+ // necessary, the object might be promoted to an old space. The caller must
+ // ensure the precondition that the object is (a) a heap object and (b) in
+ // the heap's from space.
+ static inline void ScavengePointer(HeapObject** p);
+ static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+
+ // Slow part of scavenge object.
+ static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+
+ static void FatalProcessOutOfMemory(const char* location,
+ bool take_snapshot = false);
+
+ static bool RootIsImmortalImmovable(int root_index);
+
+ // Checks whether the space is valid.
+ static bool IsValidAllocationSpace(AllocationSpace space);
+
+ // An object may have an AllocationSite associated with it through a trailing
+ // AllocationMemento. Its feedback should be updated when objects are found
+ // in the heap.
+ static inline void UpdateAllocationSiteFeedback(HeapObject* object,
+ ScratchpadSlotMode mode);
+
+ // Generated code can embed direct references to non-writable roots if
+ // they are in new space.
+ static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
+
+ // Zapping is needed for verify heap, and always done in debug builds.
+ static inline bool ShouldZapGarbage() {
+#ifdef DEBUG
+ return true;
+#else
+#ifdef VERIFY_HEAP
+ return FLAG_verify_heap;
+#else
+ return false;
+#endif
+#endif
}
- // Returns name of the space.
- const char* GetSpaceName(int idx);
+ static double HeapGrowingFactor(double gc_speed, double mutator_speed);
+
+ // Copy block of memory from src to dst. Size of block should be aligned
+ // by pointer size.
+ static inline void CopyBlock(Address dst, Address src, int byte_size);
+
+ // Optimized version of memmove for blocks with pointer size aligned sizes and
+ // pointer size aligned addresses.
+ static inline void MoveBlock(Address dst, Address src, int byte_size);
+
+ // Set the stack limit in the roots_ array. Some architectures generate
+ // code that looks here, because it is faster than loading from the static
+ // jslimit_/real_jslimit_ variable in the StackGuard.
+ void SetStackLimits();
+
+ // Notifies the heap that is ok to start marking or other activities that
+ // should not happen during deserialization.
+ void NotifyDeserializationComplete();
+
+ // Returns whether SetUp has been called.
+ bool HasBeenSetUp();
+
+ intptr_t old_generation_allocation_limit() const {
+ return old_generation_allocation_limit_;
+ }
bool always_allocate() { return always_allocate_scope_depth_ != 0; }
Address always_allocate_scope_depth_address() {
@@ -721,30 +853,6 @@ class Heap {
return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize();
}
- // Returns a deep copy of the JavaScript object.
- // Properties and elements are copied too.
- // Optionally takes an AllocationSite to be appended in an AllocationMemento.
- MUST_USE_RESULT AllocationResult
- CopyJSObject(JSObject* source, AllocationSite* site = NULL);
-
- // Calculates the maximum amount of filler that could be required by the
- // given alignment.
- static int GetMaximumFillToAlign(AllocationAlignment alignment);
- // Calculates the actual amount of filler required for a given address at the
- // given alignment.
- static int GetFillToAlign(Address address, AllocationAlignment alignment);
-
- // Creates a filler object and returns a heap object immediately after it.
- MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
- int filler_size);
- // Creates a filler object if needed for alignment and returns a heap object
- // immediately after it. If any space is left after the returned object,
- // another filler object is created so the over allocated memory is iterable.
- MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
- int object_size,
- int allocation_size,
- AllocationAlignment alignment);
-
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
@@ -754,24 +862,10 @@ class Heap {
// FreeSpace objects have a null map after deserialization. Update the map.
void RepairFreeListsAfterDeserialization();
- template <typename T>
- static inline bool IsOneByte(T t, int chars);
-
// Move len elements within a given array from src_index index to dst_index
// index.
void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
- // Sloppy mode arguments object size.
- static const int kSloppyArgumentsObjectSize =
- JSObject::kHeaderSize + 2 * kPointerSize;
- // Strict mode arguments has no callee so it is smaller.
- static const int kStrictArgumentsObjectSize =
- JSObject::kHeaderSize + 1 * kPointerSize;
- // Indicies for direct access into argument objects.
- static const int kArgumentsLengthIndex = 0;
- // callee is only valid in sloppy mode.
- static const int kArgumentsCalleeIndex = 1;
-
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
inline void FinalizeExternalString(String* string);
@@ -782,12 +876,6 @@ class Heap {
bool CanMoveObjectStart(HeapObject* object);
- // Indicates whether live bytes adjustment is triggered
- // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
- // - or from within GC (CONCURRENT_TO_SWEEPER),
- // - or mutator code (CONCURRENT_TO_SWEEPER).
- enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
-
// Maintain consistency of live bytes during incremental marking.
void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode);
@@ -802,65 +890,18 @@ class Heap {
// Converts the given boolean condition to JavaScript boolean value.
inline Object* ToBoolean(bool condition);
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- inline bool CollectGarbage(
- AllocationSpace space, const char* gc_reason = NULL,
- const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
- static const int kNoGCFlags = 0;
- static const int kReduceMemoryFootprintMask = 1;
- static const int kAbortIncrementalMarkingMask = 2;
- static const int kFinalizeIncrementalMarkingMask = 4;
-
- // Making the heap iterable requires us to abort incremental marking.
- static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
-
- // Invoked when GC was requested via the stack guard.
- void HandleGCRequest();
-
// Attempt to over-approximate the weak closure by marking object groups and
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
// objects that die later.
void OverApproximateWeakClosure(const char* gc_reason);
- // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
- // non-zero, then the slower precise sweeper is used, which leaves the heap
- // in a state where we can iterate over the heap visiting all objects.
- void CollectAllGarbage(
- int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL,
- const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
- // Last hope GC, should try to squeeze as much as possible.
- void CollectAllAvailableGarbage(const char* gc_reason = NULL);
-
// Check whether the heap is currently iterable.
bool IsHeapIterable();
// Notify the heap that a context has been disposed.
int NotifyContextDisposed(bool dependant_context);
- // Start incremental marking and ensure that idle time handler can perform
- // incremental steps.
- void StartIdleIncrementalMarking();
-
- // Starts incremental marking assuming incremental marking is currently
- // stopped.
- void StartIncrementalMarking(int gc_flags,
- const GCCallbackFlags gc_callback_flags,
- const char* reason = nullptr);
-
- // Performs incremental marking steps of step_size_in_bytes as long as
- // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute
- // an estimate increment. Returns the remaining time that cannot be used
- // for incremental marking anymore because a single step would exceed the
- // deadline.
- double AdvanceIncrementalMarking(
- intptr_t step_size_in_bytes, double deadline_in_ms,
- IncrementalMarking::StepActions step_actions);
-
void FinalizeIncrementalMarkingIfComplete(const char* comment);
inline void increment_scan_on_scavenge_pages() {
@@ -877,16 +918,6 @@ class Heap {
}
}
- PromotionQueue* promotion_queue() { return &promotion_queue_; }
-
- void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
- GCType gc_type_filter, bool pass_isolate = true);
- void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
-
- void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
- GCType gc_type_filter, bool pass_isolate = true);
- void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
-
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
// TODO(1490): Try removing the unchecked accessors, now that GC marking does
@@ -944,46 +975,6 @@ class Heap {
// Number of mark-sweeps.
int ms_count() const { return ms_count_; }
- // Iterates over all roots in the heap.
- void IterateRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates over all strong roots in the heap.
- void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates over entries in the smi roots list. Only interesting to the
- // serializer/deserializer, since GC does not care about smis.
- void IterateSmiRoots(ObjectVisitor* v);
- // Iterates over all the other roots in the heap.
- void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
-
- // Iterate pointers to from semispace of new space found in memory interval
- // from start to end within |object|.
- void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
- Address end, bool record_slots,
- ObjectSlotCallback callback);
-
- // Returns whether the object resides in new space.
- inline bool InNewSpace(Object* object);
- inline bool InNewSpace(Address address);
- inline bool InNewSpacePage(Address address);
- inline bool InFromSpace(Object* object);
- inline bool InToSpace(Object* object);
-
- // Returns whether the object resides in old space.
- inline bool InOldSpace(Address address);
- inline bool InOldSpace(Object* object);
-
- // Checks whether an address/object in the heap (including auxiliary
- // area and unused area).
- bool Contains(Address addr);
- bool Contains(HeapObject* value);
-
- // Checks whether an address/object in a space.
- // Currently used by tests, serialization and heap verification only.
- bool InSpace(Address addr, AllocationSpace space);
- bool InSpace(HeapObject* value, AllocationSpace space);
-
- // Checks whether the space is valid.
- static bool IsValidAllocationSpace(AllocationSpace space);
-
// Checks whether the given object is allowed to be migrated from it's
// current space into the given destination space. Used for debugging.
inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
@@ -1017,35 +1008,7 @@ class Heap {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
- static bool RootIsImmortalImmovable(int root_index);
- void CheckHandleCount();
-
-#ifdef VERIFY_HEAP
- // Verify the heap is in its normal state before or after a GC.
- void Verify();
-#endif
-
-#ifdef DEBUG
- void Print();
- void PrintHandles();
-
- // Report heap statistics.
- void ReportHeapStatistics(const char* title);
- void ReportCodeStatistics(const char* title);
-#endif
-
- // Zapping is needed for verify heap, and always done in debug builds.
- static inline bool ShouldZapGarbage() {
-#ifdef DEBUG
- return true;
-#else
-#ifdef VERIFY_HEAP
- return FLAG_verify_heap;
-#else
- return false;
-#endif
-#endif
- }
+ void CheckHandleCount();
// Number of "runtime allocations" done so far.
uint32_t allocations_count() { return allocations_count_; }
@@ -1060,6 +1023,7 @@ class Heap {
size_t object_count_last_gc(size_t index) {
return index < OBJECT_STATS_COUNT ? object_counts_last_time_[index] : 0;
}
+
size_t object_size_last_gc(size_t index) {
return index < OBJECT_STATS_COUNT ? object_sizes_last_time_[index] : 0;
}
@@ -1070,51 +1034,14 @@ class Heap {
// Write barrier support for address[start : start + len[ = o.
INLINE(void RecordWrites(Address address, int start, int len));
- enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
inline HeapState gc_state() { return gc_state_; }
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
-#ifdef DEBUG
- void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
-
- void TracePathToObjectFrom(Object* target, Object* root);
- void TracePathToObject(Object* target);
- void TracePathToGlobal();
-#endif
-
- // Callback function passed to Heap::Iterate etc. Copies an object if
- // necessary, the object might be promoted to an old space. The caller must
- // ensure the precondition that the object is (a) a heap object and (b) in
- // the heap's from space.
- static inline void ScavengePointer(HeapObject** p);
- static inline void ScavengeObject(HeapObject** p, HeapObject* object);
-
- // Slow part of scavenge object.
- static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
-
- enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
-
// If an object has an AllocationMemento trailing it, return it, otherwise
// return NULL;
inline AllocationMemento* FindAllocationMemento(HeapObject* object);
- // An object may have an AllocationSite associated with it through a trailing
- // AllocationMemento. Its feedback should be updated when objects are found
- // in the heap.
- static inline void UpdateAllocationSiteFeedback(HeapObject* object,
- ScratchpadSlotMode mode);
-
- // Support for partial snapshots. After calling this we have a linear
- // space to write objects in each space.
- struct Chunk {
- uint32_t size;
- Address start;
- Address end;
- };
-
- typedef List<Chunk> Reservation;
-
// Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations);
@@ -1124,72 +1051,6 @@ class Heap {
void CreateApiObjects();
- inline intptr_t PromotedTotalSize() {
- int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
- if (total > std::numeric_limits<intptr_t>::max()) {
- // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
- return std::numeric_limits<intptr_t>::max();
- }
- if (total < 0) return 0;
- return static_cast<intptr_t>(total);
- }
-
- inline intptr_t OldGenerationSpaceAvailable() {
- return old_generation_allocation_limit_ - PromotedTotalSize();
- }
-
- inline intptr_t OldGenerationCapacityAvailable() {
- return max_old_generation_size_ - PromotedTotalSize();
- }
-
- static const intptr_t kMinimumOldGenerationAllocationLimit =
- 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
-
- static const int kInitalOldGenerationLimitFactor = 2;
-
-#if V8_OS_ANDROID
- // Don't apply pointer multiplier on Android since it has no swap space and
- // should instead adapt it's heap size based on available physical memory.
- static const int kPointerMultiplier = 1;
-#else
- static const int kPointerMultiplier = i::kPointerSize / 4;
-#endif
-
- // The new space size has to be a power of 2. Sizes are in MB.
- static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
-
- // The old space size has to be a multiple of Page::kPageSize.
- // Sizes are in MB.
- static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeMediumMemoryDevice =
- 256 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
-
- // The executable size has to be a multiple of Page::kPageSize.
- // Sizes are in MB.
- static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
- static const int kMaxExecutableSizeMediumMemoryDevice =
- 192 * kPointerMultiplier;
- static const int kMaxExecutableSizeHighMemoryDevice =
- 256 * kPointerMultiplier;
- static const int kMaxExecutableSizeHugeMemoryDevice =
- 256 * kPointerMultiplier;
-
- static const int kTraceRingBufferSize = 512;
- static const int kStacktraceBufferSize = 512;
-
- static const double kMinHeapGrowingFactor;
- static const double kMaxHeapGrowingFactor;
- static const double kMaxHeapGrowingFactorMemoryConstrained;
- static const double kMaxHeapGrowingFactorIdle;
- static const double kTargetMutatorUtilization;
-
- static double HeapGrowingFactor(double gc_speed, double mutator_speed);
-
// Calculates the allocation limit based on a given growing factor and a
// given old generation size.
intptr_t CalculateOldGenerationAllocationLimit(double factor,
@@ -1205,63 +1066,14 @@ class Heap {
double gc_speed,
double mutator_speed);
- // Indicates whether inline bump-pointer allocation has been disabled.
- bool inline_allocation_disabled() { return inline_allocation_disabled_; }
-
- // Switch whether inline bump-pointer allocation should be used.
- void EnableInlineAllocation();
- void DisableInlineAllocation();
-
// Implements the corresponding V8 API function.
bool IdleNotification(double deadline_in_seconds);
bool IdleNotification(int idle_time_in_ms);
double MonotonicallyIncreasingTimeInMs();
- // Declare all the root indices. This defines the root list order.
- enum RootListIndex {
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-
-#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
- INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
-#undef STRING_DECLARATION
-
-#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
- PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_INDEX_DECLARATION
-
-#define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex,
- PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_INDEX_DECLARATION
-
-// Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
- kStringTableRootIndex,
-
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
- kRootListLength,
- kStrongRootListLength = kStringTableRootIndex,
- kSmiRootsStart = kStringTableRootIndex + 1
- };
-
Object* root(RootListIndex index) { return roots_[index]; }
- STATIC_ASSERT(kUndefinedValueRootIndex ==
- Internals::kUndefinedValueRootIndex);
- STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
- STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
- STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
- STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
-
- // Generated code can embed direct references to non-writable roots if
- // they are in new space.
- static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
// Generated code can treat direct references to this root as constant.
bool RootCanBeTreatedAsConstant(RootListIndex root_index);
@@ -1273,43 +1085,9 @@ class Heap {
void RecordStats(HeapStats* stats, bool take_snapshot = false);
- // Copy block of memory from src to dst. Size of block should be aligned
- // by pointer size.
- static inline void CopyBlock(Address dst, Address src, int byte_size);
-
- // Optimized version of memmove for blocks with pointer size aligned sizes and
- // pointer size aligned addresses.
- static inline void MoveBlock(Address dst, Address src, int byte_size);
-
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
- inline void IncrementPromotedObjectsSize(int object_size) {
- DCHECK(object_size > 0);
- promoted_objects_size_ += object_size;
- }
-
- inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
- DCHECK(object_size > 0);
- semi_space_copied_object_size_ += object_size;
- }
-
- inline intptr_t SurvivedNewSpaceObjectSize() {
- return promoted_objects_size_ + semi_space_copied_object_size_;
- }
-
- inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
-
- inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
-
- inline void IncrementNodesPromoted() { nodes_promoted_++; }
-
- inline void IncrementYoungSurvivorsCounter(int survived) {
- DCHECK(survived >= 0);
- survived_last_scavenge_ = survived;
- survived_since_last_expansion_ += survived;
- }
-
inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
@@ -1337,105 +1115,26 @@ class Heap {
void ClearNormalizedMapCaches();
- GCTracer* tracer() { return tracer_; }
-
- // Returns the size of objects residing in non new spaces.
- intptr_t PromotedSpaceSizeOfObjects();
-
- double total_regexp_code_generated() { return total_regexp_code_generated_; }
- void IncreaseTotalRegexpCodeGenerated(int size) {
- total_regexp_code_generated_ += size;
- }
-
- void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
- if (is_crankshafted) {
- crankshaft_codegen_bytes_generated_ += size;
- } else {
- full_codegen_bytes_generated_ += size;
- }
- }
-
- void UpdateNewSpaceAllocationCounter() {
- new_space_allocation_counter_ = NewSpaceAllocationCounter();
- }
-
- size_t NewSpaceAllocationCounter() {
- return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
- }
-
- // This should be used only for testing.
- void set_new_space_allocation_counter(size_t new_value) {
- new_space_allocation_counter_ = new_value;
- }
-
- void UpdateOldGenerationAllocationCounter() {
- old_generation_allocation_counter_ = OldGenerationAllocationCounter();
- }
-
- size_t OldGenerationAllocationCounter() {
- return old_generation_allocation_counter_ + PromotedSinceLastGC();
- }
-
- // This should be used only for testing.
- void set_old_generation_allocation_counter(size_t new_value) {
- old_generation_allocation_counter_ = new_value;
- }
-
- size_t PromotedSinceLastGC() {
- return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
- }
-
- // Update GC statistics that are tracked on the Heap.
- void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
- double marking_time);
-
- // Returns maximum GC pause.
- double get_max_gc_pause() { return max_gc_pause_; }
-
- // Returns maximum size of objects alive after GC.
- intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
-
- // Returns minimal interval between two subsequent collections.
- double get_min_in_mutator() { return min_in_mutator_; }
-
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
- MarkCompactCollector* mark_compact_collector() {
- return &mark_compact_collector_;
- }
-
- StoreBuffer* store_buffer() { return &store_buffer_; }
-
- IncrementalMarking* incremental_marking() { return &incremental_marking_; }
-
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; }
- inline Isolate* isolate();
-
- void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
- void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
-
inline bool OldGenerationAllocationLimitReached();
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FilterStoreBufferEntriesOnAboutToBeFreedPages();
void FreeQueuedChunks();
- int gc_count() const { return gc_count_; }
-
bool RecentIdleNotificationHappened();
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
- // The roots that have an index less than this are always in old space.
- static const int kOldSpaceRoots = 0x20;
-
inline uint32_t HashSeed();
inline Smi* NextScriptId();
@@ -1468,18 +1167,6 @@ class Heap {
return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
}
- // ObjectStats are kept in two arrays, counts and sizes. Related stats are
- // stored in a contiguous linear buffer. Stats groups are stored one after
- // another.
- enum {
- FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
- FIRST_FIXED_ARRAY_SUB_TYPE =
- FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
- FIRST_CODE_AGE_SUB_TYPE =
- FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
- OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
- };
-
void RecordObjectStats(InstanceType type, size_t size) {
DCHECK(type <= LAST_TYPE);
object_counts_[type]++;
@@ -1515,39 +1202,6 @@ class Heap {
void RegisterStrongRoots(Object** start, Object** end);
void UnregisterStrongRoots(Object** start);
- // Taking this lock prevents the GC from entering a phase that relocates
- // object references.
- class RelocationLock {
- public:
- explicit RelocationLock(Heap* heap) : heap_(heap) {
- heap_->relocation_mutex_.Lock();
- }
-
- ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
-
- private:
- Heap* heap_;
- };
-
- // An optional version of the above lock that can be used for some critical
- // sections on the mutator thread; only safe since the GC currently does not
- // do concurrent compaction.
- class OptionalRelocationLock {
- public:
- OptionalRelocationLock(Heap* heap, bool concurrent)
- : heap_(heap), concurrent_(concurrent) {
- if (concurrent_) heap_->relocation_mutex_.Lock();
- }
-
- ~OptionalRelocationLock() {
- if (concurrent_) heap_->relocation_mutex_.Unlock();
- }
-
- private:
- Heap* heap_;
- bool concurrent_;
- };
-
void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
Handle<DependentCode> dep);
@@ -1555,9 +1209,6 @@ class Heap {
void AddRetainedMap(Handle<Map> map);
- static void FatalProcessOutOfMemory(const char* location,
- bool take_snapshot = false);
-
// This event is triggered after successful allocation of a new object made
// by runtime. Allocations of target space for object evacuation do not
// trigger the event. In order to track ALL allocations one must turn off
@@ -1579,220 +1230,437 @@ class Heap {
// The backing store |data| is no longer owned by V8.
void UnregisterArrayBuffer(bool in_new_space, void* data);
- // A live ArrayBuffer was discovered during marking/scavenge.
- void RegisterLiveArrayBuffer(bool from_scavenge, void* data);
+ // A live ArrayBuffer was discovered during marking/scavenge.
+ void RegisterLiveArrayBuffer(bool from_scavenge, void* data);
+
+ // Frees all backing store pointers that weren't discovered in the previous
+ // marking or scavenge phase.
+ void FreeDeadArrayBuffers(bool from_scavenge);
+
+ // Prepare for a new scavenge phase. A new marking phase is implicitly
+ // prepared by finishing the previous one.
+ void PrepareArrayBufferDiscoveryInNewSpace();
+
+ // An ArrayBuffer moved from new space to old space.
+ void PromoteArrayBuffer(Object* buffer);
+
+ bool HasLowAllocationRate();
+ bool HasHighFragmentation();
+ bool HasHighFragmentation(intptr_t used, intptr_t committed);
+
+ bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
+
+ // ===========================================================================
+ // Initialization. ===========================================================
+ // ===========================================================================
+
+ // Configure heap size in MB before setup. Return false if the heap has been
+ // set up already.
+ bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+ int max_executable_size, size_t code_range_size);
+ bool ConfigureHeapDefault();
+
+ // Prepares the heap, setting up memory areas that are needed in the isolate
+ // without actually creating any objects.
+ bool SetUp();
+
+ // Bootstraps the object heap with the core set of objects required to run.
+ // Returns whether it succeeded.
+ bool CreateHeapObjects();
+
+ // Destroys all memory allocated by the heap.
+ void TearDown();
+
+ // ===========================================================================
+ // Getters for spaces. =======================================================
+ // ===========================================================================
+
+ // Return the starting address and a mask for the new space. And-masking an
+ // address with the mask will result in the start address of the new space
+ // for all addresses in either semispace.
+ Address NewSpaceStart() { return new_space_.start(); }
+ uintptr_t NewSpaceMask() { return new_space_.mask(); }
+ Address NewSpaceTop() { return new_space_.top(); }
+
+ NewSpace* new_space() { return &new_space_; }
+ OldSpace* old_space() { return old_space_; }
+ OldSpace* code_space() { return code_space_; }
+ MapSpace* map_space() { return map_space_; }
+ LargeObjectSpace* lo_space() { return lo_space_; }
+
+ PagedSpace* paged_space(int idx) {
+ switch (idx) {
+ case OLD_SPACE:
+ return old_space();
+ case MAP_SPACE:
+ return map_space();
+ case CODE_SPACE:
+ return code_space();
+ case NEW_SPACE:
+ case LO_SPACE:
+ UNREACHABLE();
+ }
+ return NULL;
+ }
+
+ Space* space(int idx) {
+ switch (idx) {
+ case NEW_SPACE:
+ return new_space();
+ case LO_SPACE:
+ return lo_space();
+ default:
+ return paged_space(idx);
+ }
+ }
+
+ // Returns name of the space.
+ const char* GetSpaceName(int idx);
+
+ // ===========================================================================
+ // Getters to other components. ==============================================
+ // ===========================================================================
+
+ GCTracer* tracer() { return tracer_; }
+
+ PromotionQueue* promotion_queue() { return &promotion_queue_; }
+
+ inline Isolate* isolate();
+
+ MarkCompactCollector* mark_compact_collector() {
+ return &mark_compact_collector_;
+ }
+
+ StoreBuffer* store_buffer() { return &store_buffer_; }
+
+ // ===========================================================================
+ // Inline allocation. ========================================================
+ // ===========================================================================
+
+ // Indicates whether inline bump-pointer allocation has been disabled.
+ bool inline_allocation_disabled() { return inline_allocation_disabled_; }
+
+ // Switch whether inline bump-pointer allocation should be used.
+ void EnableInlineAllocation();
+ void DisableInlineAllocation();
+
+ // ===========================================================================
+ // Methods triggering GCs. ===================================================
+ // ===========================================================================
+
+ // Performs garbage collection operation.
+ // Returns whether there is a chance that another major GC could
+ // collect more garbage.
+ inline bool CollectGarbage(
+ AllocationSpace space, const char* gc_reason = NULL,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+ // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
+ // non-zero, then the slower precise sweeper is used, which leaves the heap
+ // in a state where we can iterate over the heap visiting all objects.
+ void CollectAllGarbage(
+ int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+ // Last hope GC, should try to squeeze as much as possible.
+ void CollectAllAvailableGarbage(const char* gc_reason = NULL);
+
+ // Invoked when GC was requested via the stack guard.
+ void HandleGCRequest();
+
+ // ===========================================================================
+ // Iterators. ================================================================
+ // ===========================================================================
+
+ // Iterates over all roots in the heap.
+ void IterateRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over all strong roots in the heap.
+ void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over entries in the smi roots list. Only interesting to the
+ // serializer/deserializer, since GC does not care about smis.
+ void IterateSmiRoots(ObjectVisitor* v);
+ // Iterates over all the other roots in the heap.
+ void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
+
+ // Iterate pointers to from semispace of new space found in memory interval
+ // from start to end within |object|.
+ void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
+ Address end, bool record_slots,
+ ObjectSlotCallback callback);
+
+ // ===========================================================================
+ // Incremental marking API. ==================================================
+ // ===========================================================================
+
+ // Start incremental marking and ensure that idle time handler can perform
+ // incremental steps.
+ void StartIdleIncrementalMarking();
+
+ // Starts incremental marking assuming incremental marking is currently
+ // stopped.
+ void StartIncrementalMarking(int gc_flags,
+ const GCCallbackFlags gc_callback_flags,
+ const char* reason = nullptr);
+
+ // Performs incremental marking steps of step_size_in_bytes as long as
+ // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute
+ // an estimate increment. Returns the remaining time that cannot be used
+ // for incremental marking anymore because a single step would exceed the
+ // deadline.
+ double AdvanceIncrementalMarking(
+ intptr_t step_size_in_bytes, double deadline_in_ms,
+ IncrementalMarking::StepActions step_actions);
+
+ IncrementalMarking* incremental_marking() { return &incremental_marking_; }
+
+ // ===========================================================================
+ // Methods checking/returning the space of a given object/address. ===========
+ // ===========================================================================
+
+ // Returns whether the object resides in new space.
+ inline bool InNewSpace(Object* object);
+ inline bool InNewSpace(Address address);
+ inline bool InNewSpacePage(Address address);
+ inline bool InFromSpace(Object* object);
+ inline bool InToSpace(Object* object);
+
+ // Returns whether the object resides in old space.
+ inline bool InOldSpace(Address address);
+ inline bool InOldSpace(Object* object);
+
+ // Checks whether an address/object in the heap (including auxiliary
+ // area and unused area).
+ bool Contains(Address addr);
+ bool Contains(HeapObject* value);
+
+ // Checks whether an address/object in a space.
+ // Currently used by tests, serialization and heap verification only.
+ bool InSpace(Address addr, AllocationSpace space);
+ bool InSpace(HeapObject* value, AllocationSpace space);
+
+ // ===========================================================================
+ // GC statistics. ============================================================
+ // ===========================================================================
+
+ // Returns the maximum amount of memory reserved for the heap. For
+ // the young generation, we reserve 4 times the amount needed for a
+ // semi space. The young generation consists of two semi spaces and
+ // we reserve twice the amount needed for those in order to ensure
+ // that new space can be aligned to its size.
+ intptr_t MaxReserved() {
+ return 4 * reserved_semispace_size_ + max_old_generation_size_;
+ }
+ int MaxSemiSpaceSize() { return max_semi_space_size_; }
+ int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
+ int InitialSemiSpaceSize() { return initial_semispace_size_; }
+ int TargetSemiSpaceSize() { return target_semispace_size_; }
+ intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
+ intptr_t MaxExecutableSize() { return max_executable_size_; }
+
+ // Returns the capacity of the heap in bytes w/o growing. Heap grows when
+ // more spaces are needed until it reaches the limit.
+ intptr_t Capacity();
+
+ // Returns the amount of memory currently committed for the heap.
+ intptr_t CommittedMemory();
- // Frees all backing store pointers that weren't discovered in the previous
- // marking or scavenge phase.
- void FreeDeadArrayBuffers(bool from_scavenge);
+ // Returns the amount of memory currently committed for the old space.
+ intptr_t CommittedOldGenerationMemory();
- // Prepare for a new scavenge phase. A new marking phase is implicitly
- // prepared by finishing the previous one.
- void PrepareArrayBufferDiscoveryInNewSpace();
+ // Returns the amount of executable memory currently committed for the heap.
+ intptr_t CommittedMemoryExecutable();
- // An ArrayBuffer moved from new space to old space.
- void PromoteArrayBuffer(Object* buffer);
+ // Returns the amount of phyical memory currently committed for the heap.
+ size_t CommittedPhysicalMemory();
- bool HasLowAllocationRate();
- bool HasHighFragmentation();
- bool HasHighFragmentation(intptr_t used, intptr_t committed);
+ // Returns the maximum amount of memory ever committed for the heap.
+ intptr_t MaximumCommittedMemory() { return maximum_committed_; }
- bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
+ // Updates the maximum committed memory for the heap. Should be called
+ // whenever a space grows.
+ void UpdateMaximumCommitted();
- private:
- static const int kInitialStringTableSize = 2048;
- static const int kInitialEvalCacheSize = 64;
- static const int kInitialNumberStringCacheSize = 256;
+ // Returns the available bytes in space w/o growing.
+ // Heap doesn't guarantee that it can allocate an object that requires
+ // all available bytes. Check MaxHeapObjectSize() instead.
+ intptr_t Available();
- Heap();
+ // Returns of size of all objects residing in the heap.
+ intptr_t SizeOfObjects();
- int current_gc_flags() { return current_gc_flags_; }
- void set_current_gc_flags(int flags) {
- current_gc_flags_ = flags;
- DCHECK(!ShouldFinalizeIncrementalMarking() ||
- !ShouldAbortIncrementalMarking());
- }
+ void UpdateSurvivalStatistics(int start_new_space_size);
- inline bool ShouldReduceMemory() const {
- return current_gc_flags_ & kReduceMemoryFootprintMask;
+ inline void IncrementPromotedObjectsSize(int object_size) {
+ DCHECK(object_size > 0);
+ promoted_objects_size_ += object_size;
}
+ inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
- inline bool ShouldAbortIncrementalMarking() const {
- return current_gc_flags_ & kAbortIncrementalMarkingMask;
+ inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
+ DCHECK(object_size > 0);
+ semi_space_copied_object_size_ += object_size;
}
-
- inline bool ShouldFinalizeIncrementalMarking() const {
- return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
+ inline intptr_t semi_space_copied_object_size() {
+ return semi_space_copied_object_size_;
}
- // Allocates a JS Map in the heap.
- MUST_USE_RESULT AllocationResult
- AllocateMap(InstanceType instance_type, int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
- // Allocates and initializes a new JavaScript object based on a
- // constructor.
- // If allocation_site is non-null, then a memento is emitted after the object
- // that points to the site.
- MUST_USE_RESULT AllocationResult
- AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = NULL);
+ inline intptr_t SurvivedNewSpaceObjectSize() {
+ return promoted_objects_size_ + semi_space_copied_object_size_;
+ }
- // Allocates and initializes a new JavaScript object based on a map.
- // Passing an allocation site means that a memento will be created that
- // points to the site.
- MUST_USE_RESULT AllocationResult
- AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = NULL);
+ inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
- // Allocates a HeapNumber from value.
- MUST_USE_RESULT AllocationResult
- AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
- PretenureFlag pretenure = NOT_TENURED);
+ inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
-// Allocates SIMD values from the given lane values.
-#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
- AllocationResult Allocate##Type(lane_type lanes[lane_count], \
- PretenureFlag pretenure = NOT_TENURED);
- SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
-#undef SIMD_ALLOCATE_DECLARATION
+ inline void IncrementNodesPromoted() { nodes_promoted_++; }
- // Allocates a byte array of the specified length
- MUST_USE_RESULT AllocationResult
- AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
+ inline void IncrementYoungSurvivorsCounter(int survived) {
+ DCHECK(survived >= 0);
+ survived_last_scavenge_ = survived;
+ survived_since_last_expansion_ += survived;
+ }
- // Allocates a bytecode array with given contents.
- MUST_USE_RESULT AllocationResult
- AllocateBytecodeArray(int length, const byte* raw_bytecodes,
- int frame_size);
+ inline intptr_t PromotedTotalSize() {
+ int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+ if (total > std::numeric_limits<intptr_t>::max()) {
+ // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
+ return std::numeric_limits<intptr_t>::max();
+ }
+ if (total < 0) return 0;
+ return static_cast<intptr_t>(total);
+ }
- // Copy the code and scope info part of the code object, but insert
- // the provided data as the relocation information.
- MUST_USE_RESULT AllocationResult
- CopyCode(Code* code, Vector<byte> reloc_info);
+ inline intptr_t OldGenerationSpaceAvailable() {
+ return old_generation_allocation_limit_ - PromotedTotalSize();
+ }
- MUST_USE_RESULT AllocationResult CopyCode(Code* code);
+ inline intptr_t OldGenerationCapacityAvailable() {
+ return max_old_generation_size_ - PromotedTotalSize();
+ }
- // Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT AllocationResult
- AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
- // The amount of external memory registered through the API kept alive
- // by global handles
- int64_t amount_of_external_allocated_memory_;
+ void UpdateNewSpaceAllocationCounter() {
+ new_space_allocation_counter_ = NewSpaceAllocationCounter();
+ }
- // Caches the amount of external memory registered at the last global gc.
- int64_t amount_of_external_allocated_memory_at_last_global_gc_;
+ size_t NewSpaceAllocationCounter() {
+ return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
+ }
- // This can be calculated directly from a pointer to the heap; however, it is
- // more expedient to get at the isolate directly from within Heap methods.
- Isolate* isolate_;
+ // This should be used only for testing.
+ void set_new_space_allocation_counter(size_t new_value) {
+ new_space_allocation_counter_ = new_value;
+ }
- Object* roots_[kRootListLength];
+ void UpdateOldGenerationAllocationCounter() {
+ old_generation_allocation_counter_ = OldGenerationAllocationCounter();
+ }
- size_t code_range_size_;
- int reserved_semispace_size_;
- int max_semi_space_size_;
- int initial_semispace_size_;
- int target_semispace_size_;
- intptr_t max_old_generation_size_;
- intptr_t initial_old_generation_size_;
- bool old_generation_size_configured_;
- intptr_t max_executable_size_;
- intptr_t maximum_committed_;
+ size_t OldGenerationAllocationCounter() {
+ return old_generation_allocation_counter_ + PromotedSinceLastGC();
+ }
- // For keeping track of how much data has survived
- // scavenge since last new space expansion.
- int survived_since_last_expansion_;
+ // This should be used only for testing.
+ void set_old_generation_allocation_counter(size_t new_value) {
+ old_generation_allocation_counter_ = new_value;
+ }
- // ... and since the last scavenge.
- int survived_last_scavenge_;
+ size_t PromotedSinceLastGC() {
+ return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
+ }
- int always_allocate_scope_depth_;
+ // Update GC statistics that are tracked on the Heap.
+ void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
+ double marking_time);
- // For keeping track of context disposals.
- int contexts_disposed_;
+ // Returns maximum GC pause.
+ double get_max_gc_pause() { return max_gc_pause_; }
- int global_ic_age_;
+ // Returns maximum size of objects alive after GC.
+ intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
- int scan_on_scavenge_pages_;
+ // Returns minimal interval between two subsequent collections.
+ double get_min_in_mutator() { return min_in_mutator_; }
- NewSpace new_space_;
- OldSpace* old_space_;
- OldSpace* code_space_;
- MapSpace* map_space_;
- LargeObjectSpace* lo_space_;
- HeapState gc_state_;
- int gc_post_processing_depth_;
- Address new_space_top_after_last_gc_;
+ int gc_count() const { return gc_count_; }
- // Returns the amount of external memory registered since last global gc.
- int64_t PromotedExternalMemorySize();
+ // Returns the size of objects residing in non new spaces.
+ intptr_t PromotedSpaceSizeOfObjects();
- // How many "runtime allocations" happened.
- uint32_t allocations_count_;
+ double total_regexp_code_generated() { return total_regexp_code_generated_; }
+ void IncreaseTotalRegexpCodeGenerated(int size) {
+ total_regexp_code_generated_ += size;
+ }
- // Running hash over allocations performed.
- uint32_t raw_allocations_hash_;
+ void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
+ if (is_crankshafted) {
+ crankshaft_codegen_bytes_generated_ += size;
+ } else {
+ full_codegen_bytes_generated_ += size;
+ }
+ }
- // Countdown counter, dumps allocation hash when 0.
- uint32_t dump_allocations_hash_countdown_;
+ // ===========================================================================
+ // Prologue/epilogue callback methods.========================================
+ // ===========================================================================
- // How many mark-sweep collections happened.
- unsigned int ms_count_;
+ void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
+ GCType gc_type_filter, bool pass_isolate = true);
+ void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
- // How many gc happened.
- unsigned int gc_count_;
+ void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
+ GCType gc_type_filter, bool pass_isolate = true);
+ void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
- // For post mortem debugging.
- static const int kRememberedUnmappedPages = 128;
- int remembered_unmapped_pages_index_;
- Address remembered_unmapped_pages_[kRememberedUnmappedPages];
+ void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
+ void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline void set_##name(type* value);
- ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
+ // ===========================================================================
+ // Allocation methods. =======================================================
+ // ===========================================================================
-#ifdef DEBUG
- // If the --gc-interval flag is set to a positive value, this
- // variable holds the value indicating the number of allocations
- // remain until the next failure and garbage collection.
- int allocation_timeout_;
-#endif // DEBUG
+ // Returns a deep copy of the JavaScript object.
+ // Properties and elements are copied too.
+ // Optionally takes an AllocationSite to be appended in an AllocationMemento.
+ MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
+ AllocationSite* site = NULL);
- // Limit that triggers a global GC on the next (normally caused) GC. This
- // is checked when we have already decided to do a GC to help determine
- // which collector to invoke, before expanding a paged space in the old
- // generation and on every allocation in large object space.
- intptr_t old_generation_allocation_limit_;
+ // Creates a filler object and returns a heap object immediately after it.
+ MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
+ int filler_size);
+ // Creates a filler object if needed for alignment and returns a heap object
+ // immediately after it. If any space is left after the returned object,
+ // another filler object is created so the over allocated memory is iterable.
+ MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
+ int object_size,
+ int allocation_size,
+ AllocationAlignment alignment);
- // Indicates that an allocation has failed in the old generation since the
- // last GC.
- bool old_gen_exhausted_;
+// =============================================================================
- // Indicates that memory usage is more important than latency.
- // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed.
- bool optimize_for_memory_usage_;
+#ifdef VERIFY_HEAP
+ // Verify the heap is in its normal state before or after a GC.
+ void Verify();
+#endif
- // Indicates that inline bump-pointer allocation has been globally disabled
- // for all spaces. This is used to disable allocations in generated code.
- bool inline_allocation_disabled_;
+#ifdef DEBUG
+ void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
- // Weak list heads, threaded through the objects.
- // List heads are initialized lazily and contain the undefined_value at start.
- Object* native_contexts_list_;
- Object* allocation_sites_list_;
+ void TracePathToObjectFrom(Object* target, Object* root);
+ void TracePathToObject(Object* target);
+ void TracePathToGlobal();
- // List of encountered weak collections (JSWeakMap and JSWeakSet) during
- // marking. It is initialized during marking, destroyed after marking and
- // contains Smi(0) while marking is not active.
- Object* encountered_weak_collections_;
+ void Print();
+ void PrintHandles();
- Object* encountered_weak_cells_;
+ // Report heap statistics.
+ void ReportHeapStatistics(const char* title);
+ void ReportCodeStatistics(const char* title);
+#endif
- StoreBufferRebuilder store_buffer_rebuilder_;
+ private:
+ struct StrongRootsList;
struct StringTypeTable {
InstanceType type;
@@ -1811,10 +1679,6 @@ class Heap {
RootListIndex index;
};
- static const StringTypeTable string_type_table[];
- static const ConstantStringTable constant_string_table[];
- static const StructTable struct_table[];
-
struct GCCallbackPair {
GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type,
bool pass_isolate)
@@ -1829,8 +1693,64 @@ class Heap {
bool pass_isolate;
};
- List<GCCallbackPair> gc_epilogue_callbacks_;
- List<GCCallbackPair> gc_prologue_callbacks_;
+ static const int kInitialStringTableSize = 2048;
+ static const int kInitialEvalCacheSize = 64;
+ static const int kInitialNumberStringCacheSize = 256;
+
+ static const int kRememberedUnmappedPages = 128;
+
+ static const StringTypeTable string_type_table[];
+ static const ConstantStringTable constant_string_table[];
+ static const StructTable struct_table[];
+
+ static const int kYoungSurvivalRateHighThreshold = 90;
+ static const int kYoungSurvivalRateAllowedDeviation = 15;
+ static const int kOldSurvivalRateLowThreshold = 10;
+
+ static const int kMaxMarkCompactsInIdleRound = 7;
+ static const int kIdleScavengeThreshold = 5;
+
+ static const int kAllocationSiteScratchpadSize = 256;
+
+ Heap();
+
+ static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
+ Heap* heap, Object** pointer);
+
+ static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
+ StoreBufferEvent event);
+
+ // Selects the proper allocation space depending on the given object
+ // size and pretenuring decision.
+ static AllocationSpace SelectSpace(int object_size, PretenureFlag pretenure) {
+ if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
+ return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
+ }
+
+ int current_gc_flags() { return current_gc_flags_; }
+
+ void set_current_gc_flags(int flags) {
+ current_gc_flags_ = flags;
+ DCHECK(!ShouldFinalizeIncrementalMarking() ||
+ !ShouldAbortIncrementalMarking());
+ }
+
+ inline bool ShouldReduceMemory() const {
+ return current_gc_flags_ & kReduceMemoryFootprintMask;
+ }
+
+ inline bool ShouldAbortIncrementalMarking() const {
+ return current_gc_flags_ & kAbortIncrementalMarkingMask;
+ }
+
+ inline bool ShouldFinalizeIncrementalMarking() const {
+ return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
+ }
+
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ inline void set_##name(type* value);
+ ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
// Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set.
@@ -1858,32 +1778,205 @@ class Heap {
// over all objects. May cause a GC.
void MakeHeapIterable();
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- bool CollectGarbage(
- GarbageCollector collector, const char* gc_reason,
- const char* collector_reason,
- const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+ // Performs garbage collection operation.
+ // Returns whether there is a chance that another major GC could
+ // collect more garbage.
+ bool CollectGarbage(
+ GarbageCollector collector, const char* gc_reason,
+ const char* collector_reason,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+ // Performs garbage collection
+ // Returns whether there is a chance another major GC could
+ // collect more garbage.
+ bool PerformGarbageCollection(
+ GarbageCollector collector,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+ inline void UpdateOldSpaceLimits();
+
+ // Initializes a JSObject based on its map.
+ void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
+ Map* map);
+ void InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site);
+
+ bool CreateInitialMaps();
+ void CreateInitialObjects();
+
+ // These five Create*EntryStub functions are here and forced to not be inlined
+ // because of a gcc-4.4 bug that assigns wrong vtable entries.
+ NO_INLINE(void CreateJSEntryStub());
+ NO_INLINE(void CreateJSConstructEntryStub());
+
+ void CreateFixedStubs();
+
+ HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
+
+ // Performs a minor collection in new generation.
+ void Scavenge();
+
+ // Commits from space if it is uncommitted.
+ void EnsureFromSpaceIsCommitted();
+
+ // Uncommit unused semi space.
+ bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+
+ // Fill in bogus values in from space
+ void ZapFromSpace();
+
+ Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+
+ // Performs a major collection in the whole heap.
+ void MarkCompact();
+
+ // Code to be run before and after mark-compact.
+ void MarkCompactPrologue();
+ void MarkCompactEpilogue();
+
+ void ProcessNativeContexts(WeakObjectRetainer* retainer);
+ void ProcessAllocationSites(WeakObjectRetainer* retainer);
+
+ // Deopts all code that contains allocation instruction which are tenured or
+ // not tenured. Moreover it clears the pretenuring allocation site statistics.
+ void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
+
+ // Evaluates local pretenuring for the old space and calls
+ // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
+ // the old space.
+ void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
+
+ // Called on heap tear-down. Frees all remaining ArrayBuffer backing stores.
+ void TearDownArrayBuffers();
+
+ // These correspond to the non-Helper versions.
+ void RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers,
+ void* data, size_t length);
+ void UnregisterArrayBufferHelper(
+ std::map<void*, size_t>& live_buffers,
+ std::map<void*, size_t>& not_yet_discovered_buffers, void* data);
+ void RegisterLiveArrayBufferHelper(
+ std::map<void*, size_t>& not_yet_discovered_buffers, void* data);
+ size_t FreeDeadArrayBuffersHelper(
+ Isolate* isolate, std::map<void*, size_t>& live_buffers,
+ std::map<void*, size_t>& not_yet_discovered_buffers);
+ void TearDownArrayBuffersHelper(
+ Isolate* isolate, std::map<void*, size_t>& live_buffers,
+ std::map<void*, size_t>& not_yet_discovered_buffers);
+
+ // Record statistics before and after garbage collection.
+ void ReportStatisticsBeforeGC();
+ void ReportStatisticsAfterGC();
+
+ // Creates and installs the full-sized number string cache.
+ int FullSizeNumberStringCacheLength();
+ // Flush the number to string cache.
+ void FlushNumberStringCache();
+
+ // Sets used allocation sites entries to undefined.
+ void FlushAllocationSitesScratchpad();
+
+ // Initializes the allocation sites scratchpad with undefined values.
+ void InitializeAllocationSitesScratchpad();
+
+ // Adds an allocation site to the scratchpad if there is space left.
+ void AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode);
+
+ // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
+ // Re-visit incremental marking heuristics.
+ bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
+
+ void ConfigureInitialOldGenerationSize();
+
+ void SelectScavengingVisitorsTable();
+
+ bool HasLowYoungGenerationAllocationRate();
+ bool HasLowOldGenerationAllocationRate();
+ double YoungGenerationMutatorUtilization();
+ double OldGenerationMutatorUtilization();
+
+ void ReduceNewSpaceSize();
+
+ bool TryFinalizeIdleIncrementalMarking(
+ double idle_time_in_ms, size_t size_of_objects,
+ size_t mark_compact_speed_in_bytes_per_ms);
+
+ GCIdleTimeHandler::HeapState ComputeHeapState();
+
+ bool PerformIdleTimeAction(GCIdleTimeAction action,
+ GCIdleTimeHandler::HeapState heap_state,
+ double deadline_in_ms);
+
+ void IdleNotificationEpilogue(GCIdleTimeAction action,
+ GCIdleTimeHandler::HeapState heap_state,
+ double start_ms, double deadline_in_ms);
+ void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
+ double now_ms);
+
+ void ClearObjectStats(bool clear_last_time_stats = false);
+
+ inline void UpdateAllocationsHash(HeapObject* object);
+ inline void UpdateAllocationsHash(uint32_t value);
+ inline void PrintAlloctionsHash();
+
+ void AddToRingBuffer(const char* string);
+ void GetFromRingBuffer(char* buffer);
+
+ // ===========================================================================
+ // Allocation methods. =======================================================
+ // ===========================================================================
+
+ // Allocates a JS Map in the heap.
+ MUST_USE_RESULT AllocationResult
+ AllocateMap(InstanceType instance_type, int instance_size,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+
+ // Allocates and initializes a new JavaScript object based on a
+ // constructor.
+ // If allocation_site is non-null, then a memento is emitted after the object
+ // that points to the site.
+ MUST_USE_RESULT AllocationResult AllocateJSObject(
+ JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
+ AllocationSite* allocation_site = NULL);
+
+ // Allocates and initializes a new JavaScript object based on a map.
+ // Passing an allocation site means that a memento will be created that
+ // points to the site.
+ MUST_USE_RESULT AllocationResult
+ AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
+ AllocationSite* allocation_site = NULL);
+
+ // Allocates a HeapNumber from value.
+ MUST_USE_RESULT AllocationResult
+ AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
+ PretenureFlag pretenure = NOT_TENURED);
+
+// Allocates SIMD values from the given lane values.
+#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
+ AllocationResult Allocate##Type(lane_type lanes[lane_count], \
+ PretenureFlag pretenure = NOT_TENURED);
+ SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
+#undef SIMD_ALLOCATE_DECLARATION
+
+ // Allocates a byte array of the specified length
+ MUST_USE_RESULT AllocationResult
+ AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
- // Performs garbage collection
- // Returns whether there is a chance another major GC could
- // collect more garbage.
- bool PerformGarbageCollection(
- GarbageCollector collector,
- const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+ // Allocates a bytecode array with given contents.
+ MUST_USE_RESULT AllocationResult
+ AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size);
- inline void UpdateOldSpaceLimits();
+ // Copy the code and scope info part of the code object, but insert
+ // the provided data as the relocation information.
+ MUST_USE_RESULT AllocationResult CopyCode(Code* code,
+ Vector<byte> reloc_info);
- // Selects the proper allocation space depending on the given object
- // size and pretenuring decision.
- static AllocationSpace SelectSpace(int object_size,
- PretenureFlag pretenure) {
- if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
- return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
- }
+ MUST_USE_RESULT AllocationResult CopyCode(Code* code);
- HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
+ // Allocates a fixed array initialized with undefined values
+ MUST_USE_RESULT AllocationResult
+ AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate an uninitialized object. The memory is non-executable if the
// hardware and OS allow. This is the single choke-point for allocations
@@ -1902,12 +1995,6 @@ class Heap {
MUST_USE_RESULT AllocationResult
AllocatePartialMap(InstanceType instance_type, int instance_size);
- // Initializes a JSObject based on its map.
- void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
- Map* map);
- void InitializeAllocationMemento(AllocationMemento* memento,
- AllocationSite* allocation_site);
-
// Allocate a block of memory in the given space (filled with a filler).
// Used as a fall-back for generated code when the space is full.
MUST_USE_RESULT AllocationResult
@@ -1935,9 +2022,6 @@ class Heap {
MUST_USE_RESULT AllocationResult
AllocateRawTwoByteString(int length, PretenureFlag pretenure);
- bool CreateInitialMaps();
- void CreateInitialObjects();
-
// Allocates an internalized string in old space based on the character
// stream.
MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
@@ -2004,13 +2088,6 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
int length, PretenureFlag pretenure = NOT_TENURED);
- // These five Create*EntryStub functions are here and forced to not be inlined
- // because of a gcc-4.4 bug that assigns wrong vtable entries.
- NO_INLINE(void CreateJSEntryStub());
- NO_INLINE(void CreateJSConstructEntryStub());
-
- void CreateFixedStubs();
-
// Allocate empty fixed array.
MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
@@ -2040,94 +2117,125 @@ class Heap {
MUST_USE_RESULT AllocationResult InternalizeString(String* str);
- // Performs a minor collection in new generation.
- void Scavenge();
+ // The amount of external memory registered through the API kept alive
+ // by global handles
+ int64_t amount_of_external_allocated_memory_;
- // Commits from space if it is uncommitted.
- void EnsureFromSpaceIsCommitted();
+ // Caches the amount of external memory registered at the last global gc.
+ int64_t amount_of_external_allocated_memory_at_last_global_gc_;
- // Uncommit unused semi space.
- bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+ // This can be calculated directly from a pointer to the heap; however, it is
+ // more expedient to get at the isolate directly from within Heap methods.
+ Isolate* isolate_;
- // Fill in bogus values in from space
- void ZapFromSpace();
+ Object* roots_[kRootListLength];
- static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
- Heap* heap, Object** pointer);
+ size_t code_range_size_;
+ int reserved_semispace_size_;
+ int max_semi_space_size_;
+ int initial_semispace_size_;
+ int target_semispace_size_;
+ intptr_t max_old_generation_size_;
+ intptr_t initial_old_generation_size_;
+ bool old_generation_size_configured_;
+ intptr_t max_executable_size_;
+ intptr_t maximum_committed_;
- Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
- static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
- StoreBufferEvent event);
+ // For keeping track of how much data has survived
+ // scavenge since last new space expansion.
+ int survived_since_last_expansion_;
- // Performs a major collection in the whole heap.
- void MarkCompact();
+ // ... and since the last scavenge.
+ int survived_last_scavenge_;
- // Code to be run before and after mark-compact.
- void MarkCompactPrologue();
- void MarkCompactEpilogue();
+ int always_allocate_scope_depth_;
- void ProcessNativeContexts(WeakObjectRetainer* retainer);
- void ProcessAllocationSites(WeakObjectRetainer* retainer);
+ // For keeping track of context disposals.
+ int contexts_disposed_;
- // Deopts all code that contains allocation instruction which are tenured or
- // not tenured. Moreover it clears the pretenuring allocation site statistics.
- void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
+ int global_ic_age_;
- // Evaluates local pretenuring for the old space and calls
- // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
- // the old space.
- void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
+ int scan_on_scavenge_pages_;
- // Called on heap tear-down. Frees all remaining ArrayBuffer backing stores.
- void TearDownArrayBuffers();
+ NewSpace new_space_;
+ OldSpace* old_space_;
+ OldSpace* code_space_;
+ MapSpace* map_space_;
+ LargeObjectSpace* lo_space_;
+ HeapState gc_state_;
+ int gc_post_processing_depth_;
+ Address new_space_top_after_last_gc_;
- // These correspond to the non-Helper versions.
- void RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers,
- void* data, size_t length);
- void UnregisterArrayBufferHelper(
- std::map<void*, size_t>& live_buffers,
- std::map<void*, size_t>& not_yet_discovered_buffers, void* data);
- void RegisterLiveArrayBufferHelper(
- std::map<void*, size_t>& not_yet_discovered_buffers, void* data);
- size_t FreeDeadArrayBuffersHelper(
- Isolate* isolate, std::map<void*, size_t>& live_buffers,
- std::map<void*, size_t>& not_yet_discovered_buffers);
- void TearDownArrayBuffersHelper(
- Isolate* isolate, std::map<void*, size_t>& live_buffers,
- std::map<void*, size_t>& not_yet_discovered_buffers);
+ // Returns the amount of external memory registered since last global gc.
+ int64_t PromotedExternalMemorySize();
- // Record statistics before and after garbage collection.
- void ReportStatisticsBeforeGC();
- void ReportStatisticsAfterGC();
+ // How many "runtime allocations" happened.
+ uint32_t allocations_count_;
- // Total RegExp code ever generated
- double total_regexp_code_generated_;
+ // Running hash over allocations performed.
+ uint32_t raw_allocations_hash_;
- int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
+ // Countdown counter, dumps allocation hash when 0.
+ uint32_t dump_allocations_hash_countdown_;
- GCTracer* tracer_;
+ // How many mark-sweep collections happened.
+ unsigned int ms_count_;
- // Creates and installs the full-sized number string cache.
- int FullSizeNumberStringCacheLength();
- // Flush the number to string cache.
- void FlushNumberStringCache();
+ // How many gc happened.
+ unsigned int gc_count_;
- // Sets used allocation sites entries to undefined.
- void FlushAllocationSitesScratchpad();
+ // For post mortem debugging.
+ int remembered_unmapped_pages_index_;
+ Address remembered_unmapped_pages_[kRememberedUnmappedPages];
- // Initializes the allocation sites scratchpad with undefined values.
- void InitializeAllocationSitesScratchpad();
+#ifdef DEBUG
+ // If the --gc-interval flag is set to a positive value, this
+ // variable holds the value indicating the number of allocations
+ // remain until the next failure and garbage collection.
+ int allocation_timeout_;
+#endif // DEBUG
- // Adds an allocation site to the scratchpad if there is space left.
- void AddAllocationSiteToScratchpad(AllocationSite* site,
- ScratchpadSlotMode mode);
+ // Limit that triggers a global GC on the next (normally caused) GC. This
+ // is checked when we have already decided to do a GC to help determine
+ // which collector to invoke, before expanding a paged space in the old
+ // generation and on every allocation in large object space.
+ intptr_t old_generation_allocation_limit_;
- void UpdateSurvivalStatistics(int start_new_space_size);
+ // Indicates that an allocation has failed in the old generation since the
+ // last GC.
+ bool old_gen_exhausted_;
- static const int kYoungSurvivalRateHighThreshold = 90;
- static const int kYoungSurvivalRateAllowedDeviation = 15;
+ // Indicates that memory usage is more important than latency.
+ // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed.
+ bool optimize_for_memory_usage_;
- static const int kOldSurvivalRateLowThreshold = 10;
+ // Indicates that inline bump-pointer allocation has been globally disabled
+ // for all spaces. This is used to disable allocations in generated code.
+ bool inline_allocation_disabled_;
+
+ // Weak list heads, threaded through the objects.
+ // List heads are initialized lazily and contain the undefined_value at start.
+ Object* native_contexts_list_;
+ Object* allocation_sites_list_;
+
+ // List of encountered weak collections (JSWeakMap and JSWeakSet) during
+ // marking. It is initialized during marking, destroyed after marking and
+ // contains Smi(0) while marking is not active.
+ Object* encountered_weak_collections_;
+
+ Object* encountered_weak_cells_;
+
+ StoreBufferRebuilder store_buffer_rebuilder_;
+
+ List<GCCallbackPair> gc_epilogue_callbacks_;
+ List<GCCallbackPair> gc_prologue_callbacks_;
+
+ // Total RegExp code ever generated
+ double total_regexp_code_generated_;
+
+ int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
+
+ GCTracer* tracer_;
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
@@ -2146,46 +2254,6 @@ class Heap {
// of the allocation site.
unsigned int maximum_size_scavenges_;
- // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
- // Re-visit incremental marking heuristics.
- bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
-
- void ConfigureInitialOldGenerationSize();
-
- void SelectScavengingVisitorsTable();
-
- bool HasLowYoungGenerationAllocationRate();
- bool HasLowOldGenerationAllocationRate();
- double YoungGenerationMutatorUtilization();
- double OldGenerationMutatorUtilization();
-
- void ReduceNewSpaceSize();
-
- bool TryFinalizeIdleIncrementalMarking(
- double idle_time_in_ms, size_t size_of_objects,
- size_t mark_compact_speed_in_bytes_per_ms);
-
- GCIdleTimeHandler::HeapState ComputeHeapState();
-
- bool PerformIdleTimeAction(GCIdleTimeAction action,
- GCIdleTimeHandler::HeapState heap_state,
- double deadline_in_ms);
-
- void IdleNotificationEpilogue(GCIdleTimeAction action,
- GCIdleTimeHandler::HeapState heap_state,
- double start_ms, double deadline_in_ms);
- void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
- double now_ms);
-
- void ClearObjectStats(bool clear_last_time_stats = false);
-
- inline void UpdateAllocationsHash(HeapObject* object);
- inline void UpdateAllocationsHash(uint32_t value);
- inline void PrintAlloctionsHash();
-
- void AddToRingBuffer(const char* string);
- void GetFromRingBuffer(char* buffer);
-
// Object counts and used memory by InstanceType
size_t object_counts_[OBJECT_STATS_COUNT];
size_t object_counts_last_time_[OBJECT_STATS_COUNT];
@@ -2248,7 +2316,6 @@ class Heap {
// deoptimization triggered by garbage collection.
int gcs_since_last_deopt_;
- static const int kAllocationSiteScratchpadSize = 256;
int allocation_sites_scratchpad_length_;
char trace_ring_buffer_[kTraceRingBufferSize];
@@ -2258,9 +2325,6 @@ class Heap {
bool ring_buffer_full_;
size_t ring_buffer_end_;
- static const int kMaxMarkCompactsInIdleRound = 7;
- static const int kIdleScavengeThreshold = 5;
-
// Shared state read by the scavenge collector and set by ScavengeObject.
PromotionQueue promotion_queue_;
@@ -2304,7 +2368,6 @@ class Heap {
std::map<void*, size_t> live_array_buffers_for_scavenge_;
std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_;
- struct StrongRootsList;
StrongRootsList* strong_roots_list_;
friend class AlwaysAllocateScope;
« no previous file with comments | « src/heap/gc-tracer.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698