Index: src/heap.h |
=================================================================== |
--- src/heap.h (revision 9327) |
+++ src/heap.h (working copy) |
@@ -32,11 +32,15 @@ |
#include "allocation.h" |
#include "globals.h" |
+#include "incremental-marking.h" |
#include "list.h" |
#include "mark-compact.h" |
+#include "objects-visiting.h" |
#include "spaces.h" |
#include "splay-tree-inl.h" |
+#include "store-buffer.h" |
#include "v8-counters.h" |
+#include "v8globals.h" |
namespace v8 { |
namespace internal { |
@@ -49,13 +53,12 @@ |
// Defines all the roots in Heap. |
#define STRONG_ROOT_LIST(V) \ |
- /* Put the byte array map early. We need it to be in place by the time */ \ |
- /* the deserializer hits the next page, since it wants to put a byte */ \ |
- /* array in the unused space at the end of the page. */ \ |
V(Map, byte_array_map, ByteArrayMap) \ |
+ V(Map, free_space_map, FreeSpaceMap) \ |
V(Map, one_pointer_filler_map, OnePointerFillerMap) \ |
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ |
/* Cluster the most popular ones in a few cache lines here at the top. */ \ |
+ V(Smi, store_buffer_top, StoreBufferTop) \ |
V(Object, undefined_value, UndefinedValue) \ |
V(Object, the_hole_value, TheHoleValue) \ |
V(Object, null_value, NullValue) \ |
@@ -238,12 +241,28 @@ |
typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, |
Object** pointer); |
-typedef bool (*DirtyRegionCallback)(Heap* heap, |
- Address start, |
- Address end, |
- ObjectSlotCallback copy_object_func); |
+class StoreBufferRebuilder { |
+ public: |
+ explicit StoreBufferRebuilder(StoreBuffer* store_buffer) |
+ : store_buffer_(store_buffer) { |
+ } |
+ void Callback(MemoryChunk* page, StoreBufferEvent event); |
+ private: |
+ StoreBuffer* store_buffer_; |
+ |
+ // We record in this variable how full the store buffer was when we started |
+ // iterating over the current page, finding pointers to new space. If the |
+ // store buffer overflows again we can exempt the page from the store buffer |
+ // by rewinding to this point instead of having to search the store buffer. |
+ Object*** start_of_current_page_; |
+ // The current page we are scanning in the store buffer iterator. |
+ MemoryChunk* current_page_; |
+}; |
+ |
+ |
+ |
// The all static Heap captures the interface to the global object heap. |
// All JavaScript contexts by this process share the same object heap. |
@@ -259,22 +278,37 @@ |
PromotionQueue() : front_(NULL), rear_(NULL) { } |
void Initialize(Address start_address) { |
+ // Assumes that a NewSpacePage exactly fits a number of promotion queue |
+ // entries (where each is a pair of intptr_t). This allows us to simplify |
+ // the test fpr when to switch pages. |
+ ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) |
+ == 0); |
+ ASSERT(NewSpacePage::IsAtEnd(start_address)); |
front_ = rear_ = reinterpret_cast<intptr_t*>(start_address); |
} |
- bool is_empty() { return front_ <= rear_; } |
+ bool is_empty() { return front_ == rear_; } |
inline void insert(HeapObject* target, int size); |
void remove(HeapObject** target, int* size) { |
+ ASSERT(!is_empty()); |
+ if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { |
+ NewSpacePage* front_page = |
+ NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); |
+ ASSERT(!front_page->prev_page()->is_anchor()); |
+ front_ = |
+ reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit()); |
+ } |
*target = reinterpret_cast<HeapObject*>(*(--front_)); |
*size = static_cast<int>(*(--front_)); |
// Assert no underflow. |
- ASSERT(front_ >= rear_); |
+ SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), |
+ reinterpret_cast<Address>(front_)); |
} |
private: |
- // The front of the queue is higher in memory than the rear. |
+ // The front of the queue is higher in the memory page chain than the rear. |
intptr_t* front_; |
intptr_t* rear_; |
@@ -282,6 +316,11 @@ |
}; |
+typedef void (*ScavengingCallback)(Map* map, |
+ HeapObject** slot, |
+ HeapObject* object); |
+ |
+ |
// External strings table is a place where all external strings are |
// registered. We need to keep track of such strings to properly |
// finalize them. |
@@ -327,8 +366,8 @@ |
// Configure heap size before setup. Return false if the heap has been |
// setup already. |
bool ConfigureHeap(int max_semispace_size, |
- int max_old_gen_size, |
- int max_executable_size); |
+ intptr_t max_old_gen_size, |
+ intptr_t max_executable_size); |
bool ConfigureHeapDefault(); |
// Initializes the global object heap. If create_heap_objects is true, |
@@ -885,13 +924,24 @@ |
// collect more garbage. |
inline bool CollectGarbage(AllocationSpace space); |
- // Performs a full garbage collection. Force compaction if the |
- // parameter is true. |
- void CollectAllGarbage(bool force_compaction); |
+ static const int kNoGCFlags = 0; |
+ static const int kMakeHeapIterableMask = 1; |
+ // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is |
+ // non-zero, then the slower precise sweeper is used, which leaves the heap |
+ // in a state where we can iterate over the heap visiting all objects. |
+ void CollectAllGarbage(int flags); |
+ |
// Last hope GC, should try to squeeze as much as possible. |
void CollectAllAvailableGarbage(); |
+ // Check whether the heap is currently iterable. |
+ bool IsHeapIterable(); |
+ |
+ // Ensure that we have swept all spaces in such a way that we can iterate |
+ // over all objects. May cause a GC. |
+ void EnsureHeapIsIterable(); |
+ |
// Notify the heap that a context has been disposed. |
int NotifyContextDisposed() { return ++contexts_disposed_; } |
@@ -899,6 +949,20 @@ |
// ensure correct callback for weak global handles. |
void PerformScavenge(); |
+ inline void increment_scan_on_scavenge_pages() { |
+ scan_on_scavenge_pages_++; |
+ if (FLAG_gc_verbose) { |
+ PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); |
+ } |
+ } |
+ |
+ inline void decrement_scan_on_scavenge_pages() { |
+ scan_on_scavenge_pages_--; |
+ if (FLAG_gc_verbose) { |
+ PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); |
+ } |
+ } |
+ |
PromotionQueue* promotion_queue() { return &promotion_queue_; } |
#ifdef DEBUG |
@@ -925,6 +989,8 @@ |
// Heap root getters. We have versions with and without type::cast() here. |
// You can't use type::cast during GC because the assert fails. |
+ // TODO(1490): Try removing the unchecked accessors, now that GC marking does |
+ // not corrupt the stack. |
#define ROOT_ACCESSOR(type, name, camel_name) \ |
type* name() { \ |
return type::cast(roots_[k##camel_name##RootIndex]); \ |
@@ -965,60 +1031,31 @@ |
// Iterates over all the other roots in the heap. |
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); |
- enum ExpectedPageWatermarkState { |
- WATERMARK_SHOULD_BE_VALID, |
- WATERMARK_CAN_BE_INVALID |
- }; |
- |
- // For each dirty region on a page in use from an old space call |
- // visit_dirty_region callback. |
- // If either visit_dirty_region or callback can cause an allocation |
- // in old space and changes in allocation watermark then |
- // can_preallocate_during_iteration should be set to true. |
- // All pages will be marked as having invalid watermark upon |
- // iteration completion. |
- void IterateDirtyRegions( |
- PagedSpace* space, |
- DirtyRegionCallback visit_dirty_region, |
- ObjectSlotCallback callback, |
- ExpectedPageWatermarkState expected_page_watermark_state); |
- |
- // Interpret marks as a bitvector of dirty marks for regions of size |
- // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering |
- // memory interval from start to top. For each dirty region call a |
- // visit_dirty_region callback. Return updated bitvector of dirty marks. |
- uint32_t IterateDirtyRegions(uint32_t marks, |
- Address start, |
- Address end, |
- DirtyRegionCallback visit_dirty_region, |
- ObjectSlotCallback callback); |
- |
// Iterate pointers to from semispace of new space found in memory interval |
// from start to end. |
- // Update dirty marks for page containing start address. |
void IterateAndMarkPointersToFromSpace(Address start, |
Address end, |
ObjectSlotCallback callback); |
// Iterate pointers to new space found in memory interval from start to end. |
- // Return true if pointers to new space was found. |
- static bool IteratePointersInDirtyRegion(Heap* heap, |
- Address start, |
- Address end, |
- ObjectSlotCallback callback); |
+ static void IteratePointersToNewSpace(Heap* heap, |
+ Address start, |
+ Address end, |
+ ObjectSlotCallback callback); |
// Iterate pointers to new space found in memory interval from start to end. |
// This interval is considered to belong to the map space. |
- // Return true if pointers to new space was found. |
- static bool IteratePointersInDirtyMapsRegion(Heap* heap, |
- Address start, |
- Address end, |
- ObjectSlotCallback callback); |
+ static void IteratePointersFromMapsToNewSpace(Heap* heap, |
+ Address start, |
+ Address end, |
+ ObjectSlotCallback callback); |
// Returns whether the object resides in new space. |
inline bool InNewSpace(Object* object); |
+ inline bool InNewSpace(Address addr); |
+ inline bool InNewSpacePage(Address addr); |
inline bool InFromSpace(Object* object); |
inline bool InToSpace(Object* object); |
@@ -1057,12 +1094,20 @@ |
roots_[kEmptyScriptRootIndex] = script; |
} |
+ void public_set_store_buffer_top(Address* top) { |
+ roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top); |
+ } |
+ |
// Update the next script id. |
inline void SetLastScriptId(Object* last_script_id); |
// Generated code can embed this address to get access to the roots. |
Object** roots_address() { return roots_; } |
+ Address* store_buffer_top_address() { |
+ return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); |
+ } |
+ |
// Get address of global contexts list for serialization support. |
Object** global_contexts_list_address() { |
return &global_contexts_list_; |
@@ -1075,6 +1120,10 @@ |
// Verify the heap is in its normal state before or after a GC. |
void Verify(); |
+ void OldPointerSpaceCheckStoreBuffer(); |
+ void MapSpaceCheckStoreBuffer(); |
+ void LargeObjectSpaceCheckStoreBuffer(); |
+ |
// Report heap statistics. |
void ReportHeapStatistics(const char* title); |
void ReportCodeStatistics(const char* title); |
@@ -1170,24 +1219,53 @@ |
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length, |
PretenureFlag pretenure); |
+ inline intptr_t PromotedTotalSize() { |
+ return PromotedSpaceSize() + PromotedExternalMemorySize(); |
+ } |
+ |
// True if we have reached the allocation limit in the old generation that |
// should force the next GC (caused normally) to be a full one. |
- bool OldGenerationPromotionLimitReached() { |
- return (PromotedSpaceSize() + PromotedExternalMemorySize()) |
- > old_gen_promotion_limit_; |
+ inline bool OldGenerationPromotionLimitReached() { |
+ return PromotedTotalSize() > old_gen_promotion_limit_; |
} |
- intptr_t OldGenerationSpaceAvailable() { |
- return old_gen_allocation_limit_ - |
- (PromotedSpaceSize() + PromotedExternalMemorySize()); |
+ inline intptr_t OldGenerationSpaceAvailable() { |
+ return old_gen_allocation_limit_ - PromotedTotalSize(); |
} |
- // True if we have reached the allocation limit in the old generation that |
- // should artificially cause a GC right now. |
- bool OldGenerationAllocationLimitReached() { |
- return OldGenerationSpaceAvailable() < 0; |
+ static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize; |
+ static const intptr_t kMinimumAllocationLimit = |
+ 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); |
+ |
+ // When we sweep lazily we initially guess that there is no garbage on the |
+ // heap and set the limits for the next GC accordingly. As we sweep we find |
+ // out that some of the pages contained garbage and we have to adjust |
+ // downwards the size of the heap. This means the limits that control the |
+ // timing of the next GC also need to be adjusted downwards. |
+ void LowerOldGenLimits(intptr_t adjustment) { |
+ size_of_old_gen_at_last_old_space_gc_ -= adjustment; |
+ old_gen_promotion_limit_ = |
+ OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_); |
+ old_gen_allocation_limit_ = |
+ OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_); |
} |
+ intptr_t OldGenPromotionLimit(intptr_t old_gen_size) { |
+ intptr_t limit = |
+ Max(old_gen_size + old_gen_size / 3, kMinimumPromotionLimit); |
+ limit += new_space_.Capacity(); |
+ limit *= old_gen_limit_factor_; |
+ return limit; |
+ } |
+ |
+ intptr_t OldGenAllocationLimit(intptr_t old_gen_size) { |
+ intptr_t limit = |
+ Max(old_gen_size + old_gen_size / 2, kMinimumAllocationLimit); |
+ limit += new_space_.Capacity(); |
+ limit *= old_gen_limit_factor_; |
+ return limit; |
+ } |
+ |
// Can be called when the embedding application is idle. |
bool IdleNotification(); |
@@ -1224,18 +1302,10 @@ |
// by pointer size. |
static inline void CopyBlock(Address dst, Address src, int byte_size); |
- inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst, |
- Address src, |
- int byte_size); |
- |
// Optimized version of memmove for blocks with pointer size aligned sizes and |
// pointer size aligned addresses. |
static inline void MoveBlock(Address dst, Address src, int byte_size); |
- inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst, |
- Address src, |
- int byte_size); |
- |
// Check new space expansion criteria and expand semispaces if it was hit. |
void CheckNewSpaceExpansionCriteria(); |
@@ -1244,9 +1314,31 @@ |
survived_since_last_expansion_ += survived; |
} |
+ inline bool NextGCIsLikelyToBeFull() { |
+ if (FLAG_gc_global) return true; |
+ |
+ intptr_t total_promoted = PromotedTotalSize(); |
+ |
+ intptr_t adjusted_promotion_limit = |
+ old_gen_promotion_limit_ - new_space_.Capacity(); |
+ |
+ if (total_promoted >= adjusted_promotion_limit) return true; |
+ |
+ intptr_t adjusted_allocation_limit = |
+ old_gen_allocation_limit_ - new_space_.Capacity() / 5; |
+ |
+ if (PromotedSpaceSize() >= adjusted_allocation_limit) return true; |
+ |
+ return false; |
+ } |
+ |
+ |
void UpdateNewSpaceReferencesInExternalStringTable( |
ExternalStringTableUpdaterCallback updater_func); |
+ void UpdateReferencesInExternalStringTable( |
+ ExternalStringTableUpdaterCallback updater_func); |
+ |
void ProcessWeakReferences(WeakObjectRetainer* retainer); |
// Helper function that governs the promotion policy from new space to |
@@ -1263,6 +1355,9 @@ |
GCTracer* tracer() { return tracer_; } |
+ // Returns the size of objects residing in non new spaces. |
+ intptr_t PromotedSpaceSize(); |
+ |
double total_regexp_code_generated() { return total_regexp_code_generated_; } |
void IncreaseTotalRegexpCodeGenerated(int size) { |
total_regexp_code_generated_ += size; |
@@ -1281,6 +1376,18 @@ |
return &mark_compact_collector_; |
} |
+ StoreBuffer* store_buffer() { |
+ return &store_buffer_; |
+ } |
+ |
+ Marking* marking() { |
+ return &marking_; |
+ } |
+ |
+ IncrementalMarking* incremental_marking() { |
+ return &incremental_marking_; |
+ } |
+ |
ExternalStringTable* external_string_table() { |
return &external_string_table_; |
} |
@@ -1291,16 +1398,34 @@ |
} |
inline Isolate* isolate(); |
- bool is_safe_to_read_maps() { return is_safe_to_read_maps_; } |
- void CallGlobalGCPrologueCallback() { |
+ inline void CallGlobalGCPrologueCallback() { |
if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_(); |
} |
- void CallGlobalGCEpilogueCallback() { |
+ inline void CallGlobalGCEpilogueCallback() { |
if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_(); |
} |
+ inline bool OldGenerationAllocationLimitReached(); |
+ |
+ inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) { |
+ scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); |
+ } |
+ |
+ bool ShouldWeGiveBackAPageToTheOS() { |
+ last_empty_page_was_given_back_to_the_os_ = |
+ !last_empty_page_was_given_back_to_the_os_; |
+ return last_empty_page_was_given_back_to_the_os_; |
+ } |
+ |
+ void QueueMemoryChunkForFree(MemoryChunk* chunk); |
+ void FreeQueuedChunks(); |
+ |
+ // Completely clear the Instanceof cache (to stop it keeping objects alive |
+ // around a GC). |
+ inline void CompletelyClearInstanceofCache(); |
+ |
private: |
Heap(); |
@@ -1308,12 +1433,12 @@ |
// more expedient to get at the isolate directly from within Heap methods. |
Isolate* isolate_; |
+ intptr_t code_range_size_; |
int reserved_semispace_size_; |
int max_semispace_size_; |
int initial_semispace_size_; |
intptr_t max_old_generation_size_; |
intptr_t max_executable_size_; |
- intptr_t code_range_size_; |
// For keeping track of how much data has survived |
// scavenge since last new space expansion. |
@@ -1328,6 +1453,8 @@ |
// For keeping track of context disposals. |
int contexts_disposed_; |
+ int scan_on_scavenge_pages_; |
+ |
#if defined(V8_TARGET_ARCH_X64) |
static const int kMaxObjectSizeInNewSpace = 1024*KB; |
#else |
@@ -1344,13 +1471,9 @@ |
HeapState gc_state_; |
int gc_post_processing_depth_; |
- // Returns the size of object residing in non new spaces. |
- intptr_t PromotedSpaceSize(); |
- |
// Returns the amount of external memory registered since last global gc. |
int PromotedExternalMemorySize(); |
- int mc_count_; // how many mark-compact collections happened |
int ms_count_; // how many mark-sweep collections happened |
unsigned int gc_count_; // how many gc happened |
@@ -1389,6 +1512,13 @@ |
// every allocation in large object space. |
intptr_t old_gen_allocation_limit_; |
+ // Sometimes the heuristics dictate that those limits are increased. This |
+ // variable records that fact. |
+ int old_gen_limit_factor_; |
+ |
+ // Used to adjust the limits that control the timing of the next GC. |
+ intptr_t size_of_old_gen_at_last_old_space_gc_; |
+ |
// Limit on the amount of externally allocated memory allowed |
// between global GCs. If reached a global GC is forced. |
intptr_t external_allocation_limit_; |
@@ -1408,6 +1538,8 @@ |
Object* global_contexts_list_; |
+ StoreBufferRebuilder store_buffer_rebuilder_; |
+ |
struct StringTypeTable { |
InstanceType type; |
int size; |
@@ -1465,13 +1597,11 @@ |
// Support for computing object sizes during GC. |
HeapObjectCallback gc_safe_size_of_old_object_; |
static int GcSafeSizeOfOldObject(HeapObject* object); |
- static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object); |
// Update the GC state. Called from the mark-compact collector. |
void MarkMapPointersAsEncoded(bool encoded) { |
- gc_safe_size_of_old_object_ = encoded |
- ? &GcSafeSizeOfOldObjectWithEncodedMap |
- : &GcSafeSizeOfOldObject; |
+ ASSERT(!encoded); |
+ gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; |
} |
// Checks whether a global GC is necessary |
@@ -1483,11 +1613,10 @@ |
bool PerformGarbageCollection(GarbageCollector collector, |
GCTracer* tracer); |
- static const intptr_t kMinimumPromotionLimit = 2 * MB; |
- static const intptr_t kMinimumAllocationLimit = 8 * MB; |
inline void UpdateOldSpaceLimits(); |
+ |
// Allocate an uninitialized object in map space. The behavior is identical |
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't |
// have to test the allocation space argument and (b) can reduce code size |
@@ -1522,8 +1651,6 @@ |
// Allocate empty fixed double array. |
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray(); |
- void SwitchScavengingVisitorsTableIfProfilingWasEnabled(); |
- |
// Performs a minor collection in new generation. |
void Scavenge(); |
@@ -1532,17 +1659,16 @@ |
Object** pointer); |
Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); |
+ static void ScavengeStoreBufferCallback(Heap* heap, |
+ MemoryChunk* page, |
+ StoreBufferEvent event); |
// Performs a major collection in the whole heap. |
void MarkCompact(GCTracer* tracer); |
// Code to be run before and after mark-compact. |
- void MarkCompactPrologue(bool is_compacting); |
+ void MarkCompactPrologue(); |
- // Completely clear the Instanceof cache (to stop it keeping objects alive |
- // around a GC). |
- inline void CompletelyClearInstanceofCache(); |
- |
// Record statistics before and after garbage collection. |
void ReportStatisticsBeforeGC(); |
void ReportStatisticsAfterGC(); |
@@ -1621,6 +1747,8 @@ |
return high_survival_rate_period_length_ > 0; |
} |
+ void SelectScavengingVisitorsTable(); |
+ |
static const int kInitialSymbolTableSize = 2048; |
static const int kInitialEvalCacheSize = 64; |
@@ -1640,11 +1768,12 @@ |
MarkCompactCollector mark_compact_collector_; |
- // This field contains the meaning of the WATERMARK_INVALIDATED flag. |
- // Instead of clearing this flag from all pages we just flip |
- // its meaning at the beginning of a scavenge. |
- intptr_t page_watermark_invalidated_mark_; |
+ StoreBuffer store_buffer_; |
+ Marking marking_; |
+ |
+ IncrementalMarking incremental_marking_; |
+ |
int number_idle_notifications_; |
unsigned int last_idle_notification_gc_count_; |
bool last_idle_notification_gc_count_init_; |
@@ -1658,8 +1787,11 @@ |
ExternalStringTable external_string_table_; |
- bool is_safe_to_read_maps_; |
+ VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; |
+ bool last_empty_page_was_given_back_to_the_os_; |
+ MemoryChunk* chunks_queued_for_free_; |
+ |
friend class Factory; |
friend class GCTracer; |
friend class DisallowAllocationFailure; |
@@ -1757,29 +1889,6 @@ |
} |
} |
}; |
- |
- |
-// Visitor class to verify interior pointers in spaces that use region marks |
-// to keep track of intergenerational references. |
-// As VerifyPointersVisitor but also checks that dirty marks are set |
-// for regions covering intergenerational references. |
-class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor { |
- public: |
- void VisitPointers(Object** start, Object** end) { |
- for (Object** current = start; current < end; current++) { |
- if ((*current)->IsHeapObject()) { |
- HeapObject* object = HeapObject::cast(*current); |
- ASSERT(HEAP->Contains(object)); |
- ASSERT(object->map()->IsMap()); |
- if (HEAP->InNewSpace(object)) { |
- ASSERT(HEAP->InToSpace(object)); |
- Address addr = reinterpret_cast<Address>(current); |
- ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr)); |
- } |
- } |
- } |
- } |
-}; |
#endif |
@@ -2112,16 +2221,6 @@ |
// Sets the full GC count. |
void set_full_gc_count(int count) { full_gc_count_ = count; } |
- // Sets the flag that this is a compacting full GC. |
- void set_is_compacting() { is_compacting_ = true; } |
- bool is_compacting() const { return is_compacting_; } |
- |
- // Increment and decrement the count of marked objects. |
- void increment_marked_count() { ++marked_count_; } |
- void decrement_marked_count() { --marked_count_; } |
- |
- int marked_count() { return marked_count_; } |
- |
void increment_promoted_objects_size(int object_size) { |
promoted_objects_size_ += object_size; |
} |
@@ -2146,23 +2245,6 @@ |
// A count (including this one) of the number of full garbage collections. |
int full_gc_count_; |
- // True if the current GC is a compacting full collection, false |
- // otherwise. |
- bool is_compacting_; |
- |
- // True if the *previous* full GC cwas a compacting collection (will be |
- // false if there has not been a previous full GC). |
- bool previous_has_compacted_; |
- |
- // On a full GC, a count of the number of marked objects. Incremented |
- // when an object is marked and decremented when an object's mark bit is |
- // cleared. Will be zero on a scavenge collection. |
- int marked_count_; |
- |
- // The count from the end of the previous full GC. Will be zero if there |
- // was no previous full GC. |
- int previous_marked_count_; |
- |
// Amounts of time spent in different scopes during GC. |
double scopes_[Scope::kNumberOfScopes]; |
@@ -2181,6 +2263,13 @@ |
// Size of objects promoted during the current collection. |
intptr_t promoted_objects_size_; |
+ // Incremental marking steps counters. |
+ int steps_count_; |
+ double steps_took_; |
+ double longest_step_; |
+ int steps_count_since_last_gc_; |
+ double steps_took_since_last_gc_; |
+ |
Heap* heap_; |
}; |
@@ -2292,6 +2381,46 @@ |
}; |
+// Intrusive object marking uses least significant bit of |
+// heap object's map word to mark objects. |
+// Normally all map words have least significant bit set |
+// because they contain tagged map pointer. |
+// If the bit is not set object is marked. |
+// All objects should be unmarked before resuming |
+// JavaScript execution. |
+class IntrusiveMarking { |
+ public: |
+ static bool IsMarked(HeapObject* object) { |
+ return (object->map_word().ToRawValue() & kNotMarkedBit) == 0; |
+ } |
+ |
+ static void ClearMark(HeapObject* object) { |
+ uintptr_t map_word = object->map_word().ToRawValue(); |
+ object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit)); |
+ ASSERT(!IsMarked(object)); |
+ } |
+ |
+ static void SetMark(HeapObject* object) { |
+ uintptr_t map_word = object->map_word().ToRawValue(); |
+ object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit)); |
+ ASSERT(IsMarked(object)); |
+ } |
+ |
+ static Map* MapOfMarkedObject(HeapObject* object) { |
+ uintptr_t map_word = object->map_word().ToRawValue(); |
+ return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap(); |
+ } |
+ |
+ static int SizeOfMarkedObject(HeapObject* object) { |
+ return object->SizeFromMap(MapOfMarkedObject(object)); |
+ } |
+ |
+ private: |
+ static const uintptr_t kNotMarkedBit = 0x1; |
+ STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); |
+}; |
+ |
+ |
#if defined(DEBUG) || defined(LIVE_OBJECT_LIST) |
// Helper class for tracing paths to a search target Object from all roots. |
// The TracePathFrom() method can be used to trace paths from a specific |
@@ -2350,7 +2479,6 @@ |
}; |
#endif // DEBUG || LIVE_OBJECT_LIST |
- |
} } // namespace v8::internal |
#undef HEAP |