OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
(...skipping 289 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
300 POINTERS_FROM_HERE_ARE_INTERESTING, | 300 POINTERS_FROM_HERE_ARE_INTERESTING, |
301 SCAN_ON_SCAVENGE, | 301 SCAN_ON_SCAVENGE, |
302 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. | 302 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
303 IN_TO_SPACE, // All pages in new space has one of these two set. | 303 IN_TO_SPACE, // All pages in new space has one of these two set. |
304 NEW_SPACE_BELOW_AGE_MARK, | 304 NEW_SPACE_BELOW_AGE_MARK, |
305 EVACUATION_CANDIDATE, | 305 EVACUATION_CANDIDATE, |
306 RESCAN_ON_EVACUATION, | 306 RESCAN_ON_EVACUATION, |
307 NEVER_EVACUATE, // May contain immortal immutables. | 307 NEVER_EVACUATE, // May contain immortal immutables. |
308 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC. | 308 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC. |
309 | 309 |
310 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper, | |
311 // otherwise marking bits are still intact. | |
312 WAS_SWEPT, | |
313 | |
314 // Large objects can have a progress bar in their page header. These object | 310 // Large objects can have a progress bar in their page header. These object |
315 // are scanned in increments and will be kept black while being scanned. | 311 // are scanned in increments and will be kept black while being scanned. |
316 // Even if the mutator writes to them they will be kept black and a white | 312 // Even if the mutator writes to them they will be kept black and a white |
317 // to grey transition is performed in the value. | 313 // to grey transition is performed in the value. |
318 HAS_PROGRESS_BAR, | 314 HAS_PROGRESS_BAR, |
319 | 315 |
320 // This flag is intended to be used for testing. Works only when both | 316 // This flag is intended to be used for testing. Works only when both |
321 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection | 317 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection |
322 // are set. It forces the page to become an evacuation candidate at next | 318 // are set. It forces the page to become an evacuation candidate at next |
323 // candidates selection cycle. | 319 // candidates selection cycle. |
(...skipping 21 matching lines...) Expand all Loading... | |
345 // |kCompactingAborted|: Parallel compaction has been aborted, which should | 341 // |kCompactingAborted|: Parallel compaction has been aborted, which should |
346 // for now only happen in OOM scenarios. | 342 // for now only happen in OOM scenarios. |
347 enum ParallelCompactingState { | 343 enum ParallelCompactingState { |
348 kCompactingDone, | 344 kCompactingDone, |
349 kCompactingInProgress, | 345 kCompactingInProgress, |
350 kCompactingFinalize, | 346 kCompactingFinalize, |
351 kCompactingAborted, | 347 kCompactingAborted, |
352 }; | 348 }; |
353 | 349 |
354 // |kSweepingDone|: The page state when sweeping is complete or sweeping must | 350 // |kSweepingDone|: The page state when sweeping is complete or sweeping must |
355 // not be performed on that page. | 351 // not be performed on that page. Sweeper threads that are done with their |
356 // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will | 352 // work will set this value and not touch the page anymore. |
357 // not touch the page memory anymore. | 353 // |kSweepingPending|: This page is ready for parallel sweeping. |
358 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. | 354 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. |
359 // |kSweepingPending|: This page is ready for parallel sweeping. | 355 enum ConcurrentSweepingState { |
360 enum ParallelSweepingState { | |
361 kSweepingDone, | 356 kSweepingDone, |
362 kSweepingFinalize, | 357 kSweepingPending, |
363 kSweepingInProgress, | 358 kSweepingInProgress, |
364 kSweepingPending | |
365 }; | 359 }; |
366 | 360 |
367 // Every n write barrier invocations we go to runtime even though | 361 // Every n write barrier invocations we go to runtime even though |
368 // we could have handled it in generated code. This lets us check | 362 // we could have handled it in generated code. This lets us check |
369 // whether we have hit the limit and should do some more marking. | 363 // whether we have hit the limit and should do some more marking. |
370 static const int kWriteBarrierCounterGranularity = 500; | 364 static const int kWriteBarrierCounterGranularity = 500; |
371 | 365 |
372 static const int kPointersToHereAreInterestingMask = | 366 static const int kPointersToHereAreInterestingMask = |
373 1 << POINTERS_TO_HERE_ARE_INTERESTING; | 367 1 << POINTERS_TO_HERE_ARE_INTERESTING; |
374 | 368 |
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
548 // Set or clear multiple flags at a time. The flags in the mask | 542 // Set or clear multiple flags at a time. The flags in the mask |
549 // are set to the value in "flags", the rest retain the current value | 543 // are set to the value in "flags", the rest retain the current value |
550 // in flags_. | 544 // in flags_. |
551 void SetFlags(intptr_t flags, intptr_t mask) { | 545 void SetFlags(intptr_t flags, intptr_t mask) { |
552 flags_ = (flags_ & ~mask) | (flags & mask); | 546 flags_ = (flags_ & ~mask) | (flags & mask); |
553 } | 547 } |
554 | 548 |
555 // Return all current flags. | 549 // Return all current flags. |
556 intptr_t GetFlags() { return flags_; } | 550 intptr_t GetFlags() { return flags_; } |
557 | 551 |
558 AtomicValue<ParallelSweepingState>& parallel_sweeping_state() { | 552 AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() { |
559 return parallel_sweeping_; | 553 return concurrent_sweeping_; |
560 } | 554 } |
561 | 555 |
562 AtomicValue<ParallelCompactingState>& parallel_compaction_state() { | 556 AtomicValue<ParallelCompactingState>& parallel_compaction_state() { |
563 return parallel_compaction_; | 557 return parallel_compaction_; |
564 } | 558 } |
565 | 559 |
566 bool TryLock() { return mutex_->TryLock(); } | 560 bool TryLock() { return mutex_->TryLock(); } |
567 | 561 |
568 base::Mutex* mutex() { return mutex_; } | 562 base::Mutex* mutex() { return mutex_; } |
569 | 563 |
570 // WaitUntilSweepingCompleted only works when concurrent sweeping is in | |
571 // progress. In particular, when we know that right before this call a | |
572 // sweeper thread was sweeping this page. | |
573 void WaitUntilSweepingCompleted() { | |
574 mutex_->Lock(); | |
575 mutex_->Unlock(); | |
576 DCHECK(SweepingCompleted()); | |
577 } | |
578 | |
579 bool SweepingCompleted() { | |
580 return parallel_sweeping_state().Value() <= kSweepingFinalize; | |
581 } | |
582 | |
583 // Manage live byte count (count of bytes known to be live, | 564 // Manage live byte count (count of bytes known to be live, |
584 // because they are marked black). | 565 // because they are marked black). |
585 void ResetLiveBytes() { | 566 void ResetLiveBytes() { |
586 if (FLAG_gc_verbose) { | 567 if (FLAG_gc_verbose) { |
587 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), | 568 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), |
588 live_byte_count_); | 569 live_byte_count_); |
589 } | 570 } |
590 live_byte_count_ = 0; | 571 live_byte_count_ = 0; |
591 } | 572 } |
592 | 573 |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
751 // Count of bytes marked black on page. | 732 // Count of bytes marked black on page. |
752 int live_byte_count_; | 733 int live_byte_count_; |
753 SlotsBuffer* slots_buffer_; | 734 SlotsBuffer* slots_buffer_; |
754 SkipList* skip_list_; | 735 SkipList* skip_list_; |
755 intptr_t write_barrier_counter_; | 736 intptr_t write_barrier_counter_; |
756 // Assuming the initial allocation on a page is sequential, | 737 // Assuming the initial allocation on a page is sequential, |
757 // count highest number of bytes ever allocated on the page. | 738 // count highest number of bytes ever allocated on the page. |
758 AtomicValue<intptr_t> high_water_mark_; | 739 AtomicValue<intptr_t> high_water_mark_; |
759 | 740 |
760 base::Mutex* mutex_; | 741 base::Mutex* mutex_; |
761 AtomicValue<ParallelSweepingState> parallel_sweeping_; | 742 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; |
762 AtomicValue<ParallelCompactingState> parallel_compaction_; | 743 AtomicValue<ParallelCompactingState> parallel_compaction_; |
763 | 744 |
764 // PagedSpace free-list statistics. | 745 // PagedSpace free-list statistics. |
765 AtomicNumber<intptr_t> available_in_small_free_list_; | 746 AtomicNumber<intptr_t> available_in_small_free_list_; |
766 AtomicNumber<intptr_t> available_in_medium_free_list_; | 747 AtomicNumber<intptr_t> available_in_medium_free_list_; |
767 AtomicNumber<intptr_t> available_in_large_free_list_; | 748 AtomicNumber<intptr_t> available_in_large_free_list_; |
768 AtomicNumber<intptr_t> available_in_huge_free_list_; | 749 AtomicNumber<intptr_t> available_in_huge_free_list_; |
769 AtomicNumber<intptr_t> non_available_small_blocks_; | 750 AtomicNumber<intptr_t> non_available_small_blocks_; |
770 | 751 |
771 // next_chunk_ holds a pointer of type MemoryChunk | 752 // next_chunk_ holds a pointer of type MemoryChunk |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
857 // Page size mask. | 838 // Page size mask. |
858 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 839 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
859 | 840 |
860 inline void ClearGCFields(); | 841 inline void ClearGCFields(); |
861 | 842 |
862 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, | 843 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, |
863 Executability executable, PagedSpace* owner); | 844 Executability executable, PagedSpace* owner); |
864 | 845 |
865 void InitializeAsAnchor(PagedSpace* owner); | 846 void InitializeAsAnchor(PagedSpace* owner); |
866 | 847 |
867 bool WasSwept() { return IsFlagSet(WAS_SWEPT); } | 848 // WaitUntilSweepingCompleted only works when concurrent sweeping is in |
868 void SetWasSwept() { SetFlag(WAS_SWEPT); } | 849 // progress. In particular, when we know that right before this call a |
869 void ClearWasSwept() { ClearFlag(WAS_SWEPT); } | 850 // sweeper thread was sweeping this page. |
851 void WaitUntilSweepingCompleted() { | |
852 mutex_->Lock(); | |
853 mutex_->Unlock(); | |
854 DCHECK(SweepingCompleted()); | |
855 } | |
856 | |
857 bool SweepingCompleted() { | |
Hannes Payer (out of office)
2016/01/25 11:15:44
Let's rename that to SweepingDone to reflect the a
Michael Lippautz
2016/01/25 11:46:39
Done.
| |
858 return concurrent_sweeping_state().Value() == kSweepingDone; | |
859 } | |
870 | 860 |
871 void ResetFreeListStatistics(); | 861 void ResetFreeListStatistics(); |
872 | 862 |
873 int LiveBytesFromFreeList() { | 863 int LiveBytesFromFreeList() { |
874 return static_cast<int>( | 864 return static_cast<int>( |
875 area_size() - non_available_small_blocks() - | 865 area_size() - non_available_small_blocks() - |
876 available_in_small_free_list() - available_in_medium_free_list() - | 866 available_in_small_free_list() - available_in_medium_free_list() - |
877 available_in_large_free_list() - available_in_huge_free_list()); | 867 available_in_large_free_list() - available_in_huge_free_list()); |
878 } | 868 } |
879 | 869 |
(...skipping 1189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2069 int old_linear_size = static_cast<int>(limit() - top()); | 2059 int old_linear_size = static_cast<int>(limit() - top()); |
2070 Free(top(), old_linear_size); | 2060 Free(top(), old_linear_size); |
2071 SetTopAndLimit(NULL, NULL); | 2061 SetTopAndLimit(NULL, NULL); |
2072 } | 2062 } |
2073 | 2063 |
2074 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } | 2064 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } |
2075 | 2065 |
2076 void IncreaseCapacity(int size); | 2066 void IncreaseCapacity(int size); |
2077 | 2067 |
2078 // Releases an unused page and shrinks the space. | 2068 // Releases an unused page and shrinks the space. |
2079 void ReleasePage(Page* page); | 2069 void ReleasePage(Page* page, bool evict_free_list_items); |
2080 | 2070 |
2081 // The dummy page that anchors the linked list of pages. | 2071 // The dummy page that anchors the linked list of pages. |
2082 Page* anchor() { return &anchor_; } | 2072 Page* anchor() { return &anchor_; } |
2083 | 2073 |
2084 #ifdef VERIFY_HEAP | 2074 #ifdef VERIFY_HEAP |
2085 // Verify integrity of this space. | 2075 // Verify integrity of this space. |
2086 virtual void Verify(ObjectVisitor* visitor); | 2076 virtual void Verify(ObjectVisitor* visitor); |
2087 | 2077 |
2088 // Overridden by subclasses to verify space-specific object | 2078 // Overridden by subclasses to verify space-specific object |
2089 // properties (e.g., only maps or free-list nodes are in map space). | 2079 // properties (e.g., only maps or free-list nodes are in map space). |
2090 virtual void VerifyObject(HeapObject* obj) {} | 2080 virtual void VerifyObject(HeapObject* obj) {} |
2091 #endif | 2081 #endif |
2092 | 2082 |
2093 #ifdef DEBUG | 2083 #ifdef DEBUG |
2094 // Print meta info and objects in this space. | 2084 // Print meta info and objects in this space. |
2095 void Print() override; | 2085 void Print() override; |
2096 | 2086 |
2097 // Reports statistics for the space | 2087 // Reports statistics for the space |
2098 void ReportStatistics(); | 2088 void ReportStatistics(); |
2099 | 2089 |
2100 // Report code object related statistics | 2090 // Report code object related statistics |
2101 void CollectCodeStatistics(); | 2091 void CollectCodeStatistics(); |
2102 static void ReportCodeStatistics(Isolate* isolate); | 2092 static void ReportCodeStatistics(Isolate* isolate); |
2103 static void ResetCodeStatistics(Isolate* isolate); | 2093 static void ResetCodeStatistics(Isolate* isolate); |
2104 #endif | 2094 #endif |
2105 | 2095 |
2106 // Evacuation candidates are swept by evacuator. Needs to return a valid | |
2107 // result before _and_ after evacuation has finished. | |
2108 static bool ShouldBeSweptBySweeperThreads(Page* p) { | |
2109 return !p->IsEvacuationCandidate() && | |
2110 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept(); | |
2111 } | |
2112 | |
2113 // This function tries to steal size_in_bytes memory from the sweeper threads | 2096 // This function tries to steal size_in_bytes memory from the sweeper threads |
2114 // free-lists. If it does not succeed stealing enough memory, it will wait | 2097 // free-lists. If it does not succeed stealing enough memory, it will wait |
2115 // for the sweeper threads to finish sweeping. | 2098 // for the sweeper threads to finish sweeping. |
2116 // It returns true when sweeping is completed and false otherwise. | 2099 // It returns true when sweeping is completed and false otherwise. |
2117 bool EnsureSweeperProgress(intptr_t size_in_bytes); | 2100 bool EnsureSweeperProgress(intptr_t size_in_bytes); |
2118 | 2101 |
2119 Page* FirstPage() { return anchor_.next_page(); } | 2102 Page* FirstPage() { return anchor_.next_page(); } |
2120 Page* LastPage() { return anchor_.prev_page(); } | 2103 Page* LastPage() { return anchor_.prev_page(); } |
2121 | 2104 |
2122 void EvictEvacuationCandidatesFromLinearAllocationArea(); | 2105 void EvictEvacuationCandidatesFromLinearAllocationArea(); |
(...skipping 1088 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3211 count = 0; | 3194 count = 0; |
3212 } | 3195 } |
3213 // Must be small, since an iteration is used for lookup. | 3196 // Must be small, since an iteration is used for lookup. |
3214 static const int kMaxComments = 64; | 3197 static const int kMaxComments = 64; |
3215 }; | 3198 }; |
3216 #endif | 3199 #endif |
3217 } // namespace internal | 3200 } // namespace internal |
3218 } // namespace v8 | 3201 } // namespace v8 |
3219 | 3202 |
3220 #endif // V8_HEAP_SPACES_H_ | 3203 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |