| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_MARK_COMPACT_H_ | 5 #ifndef V8_HEAP_MARK_COMPACT_H_ |
| 6 #define V8_HEAP_MARK_COMPACT_H_ | 6 #define V8_HEAP_MARK_COMPACT_H_ |
| 7 | 7 |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/heap/spaces.h" | 9 #include "src/heap/spaces.h" |
| 10 | 10 |
| (...skipping 388 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 399 void RecordCodeTargetPatch(Address pc, Code* target); | 399 void RecordCodeTargetPatch(Address pc, Code* target); |
| 400 INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target)); | 400 INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target)); |
| 401 INLINE(void ForceRecordSlot(HeapObject* object, Object** slot, | 401 INLINE(void ForceRecordSlot(HeapObject* object, Object** slot, |
| 402 Object* target)); | 402 Object* target)); |
| 403 | 403 |
| 404 void UpdateSlots(SlotsBuffer* buffer); | 404 void UpdateSlots(SlotsBuffer* buffer); |
| 405 void UpdateSlotsRecordedIn(SlotsBuffer* buffer); | 405 void UpdateSlotsRecordedIn(SlotsBuffer* buffer); |
| 406 | 406 |
| 407 void MigrateObject(HeapObject* dst, HeapObject* src, int size, | 407 void MigrateObject(HeapObject* dst, HeapObject* src, int size, |
| 408 AllocationSpace to_old_space, | 408 AllocationSpace to_old_space, |
| 409 SlotsBuffer** evacuation_slots_buffer); | 409 SlotsBuffer** evacuation_slots_buffer, |
| 410 LocalStoreBuffer* local_store_buffer); |
| 410 | 411 |
| 411 void InvalidateCode(Code* code); | 412 void InvalidateCode(Code* code); |
| 412 | 413 |
| 413 void ClearMarkbits(); | 414 void ClearMarkbits(); |
| 414 | 415 |
| 415 bool is_compacting() const { return compacting_; } | 416 bool is_compacting() const { return compacting_; } |
| 416 | 417 |
| 417 MarkingParity marking_parity() { return marking_parity_; } | 418 MarkingParity marking_parity() { return marking_parity_; } |
| 418 | 419 |
| 419 // Concurrent and parallel sweeping support. If required_freed_bytes was set | 420 // Concurrent and parallel sweeping support. If required_freed_bytes was set |
| 420 // to a value larger than 0, then sweeping returns after a block of at least | 421 // to a value larger than 0, then sweeping returns after a block of at least |
| 421 // required_freed_bytes was freed. If required_freed_bytes was set to zero | 422 // required_freed_bytes was freed. If required_freed_bytes was set to zero |
| 422 // then the whole given space is swept. It returns the size of the maximum | 423 // then the whole given space is swept. It returns the size of the maximum |
| 423 // continuous freed memory chunk. | 424 // continuous freed memory chunk. |
| 424 int SweepInParallel(PagedSpace* space, int required_freed_bytes); | 425 int SweepInParallel(PagedSpace* space, int required_freed_bytes, |
| 426 int max_pages = 0); |
| 425 | 427 |
| 426 // Sweeps a given page concurrently to the sweeper threads. It returns the | 428 // Sweeps a given page concurrently to the sweeper threads. It returns the |
| 427 // size of the maximum continuous freed memory chunk. | 429 // size of the maximum continuous freed memory chunk. |
| 428 int SweepInParallel(Page* page, PagedSpace* space); | 430 int SweepInParallel(Page* page, PagedSpace* space); |
| 429 | 431 |
| 430 // Ensures that sweeping is finished. | 432 // Ensures that sweeping is finished. |
| 431 // | 433 // |
| 432 // Note: Can only be called safely from main thread. | 434 // Note: Can only be called safely from main thread. |
| 433 void EnsureSweepingCompleted(); | 435 void EnsureSweepingCompleted(); |
| 434 | 436 |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 504 } | 506 } |
| 505 | 507 |
| 506 private: | 508 private: |
| 507 class CompactionTask; | 509 class CompactionTask; |
| 508 class EvacuateNewSpaceVisitor; | 510 class EvacuateNewSpaceVisitor; |
| 509 class EvacuateOldSpaceVisitor; | 511 class EvacuateOldSpaceVisitor; |
| 510 class EvacuateVisitorBase; | 512 class EvacuateVisitorBase; |
| 511 class HeapObjectVisitor; | 513 class HeapObjectVisitor; |
| 512 class SweeperTask; | 514 class SweeperTask; |
| 513 | 515 |
| 514 static const int kInitialLocalPretenuringFeedbackCapacity = 256; | |
| 515 | |
| 516 explicit MarkCompactCollector(Heap* heap); | 516 explicit MarkCompactCollector(Heap* heap); |
| 517 | 517 |
| 518 bool WillBeDeoptimized(Code* code); | 518 bool WillBeDeoptimized(Code* code); |
| 519 void EvictPopularEvacuationCandidate(Page* page); | 519 void EvictPopularEvacuationCandidate(Page* page); |
| 520 void ClearInvalidStoreAndSlotsBufferEntries(); | 520 void ClearInvalidStoreAndSlotsBufferEntries(); |
| 521 | 521 |
| 522 void StartSweeperThreads(); | 522 void StartSweeperThreads(); |
| 523 | 523 |
| 524 void ComputeEvacuationHeuristics(int area_size, | 524 void ComputeEvacuationHeuristics(int area_size, |
| 525 int* target_fragmentation_percent, | 525 int* target_fragmentation_percent, |
| (...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 692 // their space's free list. Active eden semispace is compacted by | 692 // their space's free list. Active eden semispace is compacted by |
| 693 // evacuation. | 693 // evacuation. |
| 694 // | 694 // |
| 695 | 695 |
| 696 // If we are not compacting the heap, we simply sweep the spaces except | 696 // If we are not compacting the heap, we simply sweep the spaces except |
| 697 // for the large object space, clearing mark bits and adding unmarked | 697 // for the large object space, clearing mark bits and adding unmarked |
| 698 // regions to each space's free list. | 698 // regions to each space's free list. |
| 699 void SweepSpaces(); | 699 void SweepSpaces(); |
| 700 | 700 |
| 701 void EvacuateNewSpacePrologue(); | 701 void EvacuateNewSpacePrologue(); |
| 702 | 702 void EvacuateNewSpaceEpilogue(); |
| 703 // Returns local pretenuring feedback. | |
| 704 HashMap* EvacuateNewSpaceInParallel(); | |
| 705 | 703 |
| 706 void AddEvacuationSlotsBufferSynchronized( | 704 void AddEvacuationSlotsBufferSynchronized( |
| 707 SlotsBuffer* evacuation_slots_buffer); | 705 SlotsBuffer* evacuation_slots_buffer); |
| 708 | 706 |
| 707 bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor, |
| 708 CompactionSpaceCollection* compaction_spaces); |
| 709 void EvacuatePages(CompactionSpaceCollection* compaction_spaces, | 709 void EvacuatePages(CompactionSpaceCollection* compaction_spaces, |
| 710 SlotsBuffer** evacuation_slots_buffer); | 710 SlotsBuffer** evacuation_slots_buffer); |
| 711 | |
| 712 void EvacuatePagesInParallel(); | 711 void EvacuatePagesInParallel(); |
| 713 | 712 |
| 714 // The number of parallel compaction tasks, including the main thread. | 713 // The number of parallel compaction tasks, including the main thread. |
| 715 int NumberOfParallelCompactionTasks(); | 714 int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes); |
| 716 | 715 |
| 717 | 716 |
| 718 void StartParallelCompaction(CompactionSpaceCollection** compaction_spaces, | 717 void StartParallelCompaction(CompactionSpaceCollection** compaction_spaces, |
| 719 uint32_t* task_ids, int len); | 718 uint32_t* task_ids, int len); |
| 720 void WaitUntilCompactionCompleted(uint32_t* task_ids, int len); | 719 void WaitUntilCompactionCompleted(uint32_t* task_ids, int len); |
| 721 | 720 |
| 722 void EvacuateNewSpaceAndCandidates(); | 721 void EvacuateNewSpaceAndCandidates(); |
| 723 | 722 |
| 724 void UpdatePointersAfterEvacuation(); | 723 void UpdatePointersAfterEvacuation(); |
| 725 | 724 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 745 void StartSweepSpace(PagedSpace* space); | 744 void StartSweepSpace(PagedSpace* space); |
| 746 | 745 |
| 747 // Finalizes the parallel sweeping phase. Marks all the pages that were | 746 // Finalizes the parallel sweeping phase. Marks all the pages that were |
| 748 // swept in parallel. | 747 // swept in parallel. |
| 749 void ParallelSweepSpacesComplete(); | 748 void ParallelSweepSpacesComplete(); |
| 750 | 749 |
| 751 void ParallelSweepSpaceComplete(PagedSpace* space); | 750 void ParallelSweepSpaceComplete(PagedSpace* space); |
| 752 | 751 |
| 753 // Updates store buffer and slot buffer for a pointer in a migrating object. | 752 // Updates store buffer and slot buffer for a pointer in a migrating object. |
| 754 void RecordMigratedSlot(Object* value, Address slot, | 753 void RecordMigratedSlot(Object* value, Address slot, |
| 755 SlotsBuffer** evacuation_slots_buffer); | 754 SlotsBuffer** evacuation_slots_buffer, |
| 755 LocalStoreBuffer* local_store_buffer); |
| 756 | 756 |
| 757 // Adds the code entry slot to the slots buffer. | 757 // Adds the code entry slot to the slots buffer. |
| 758 void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot, | 758 void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot, |
| 759 SlotsBuffer** evacuation_slots_buffer); | 759 SlotsBuffer** evacuation_slots_buffer); |
| 760 | 760 |
| 761 // Adds the slot of a moved code object. | 761 // Adds the slot of a moved code object. |
| 762 void RecordMigratedCodeObjectSlot(Address code_object, | 762 void RecordMigratedCodeObjectSlot(Address code_object, |
| 763 SlotsBuffer** evacuation_slots_buffer); | 763 SlotsBuffer** evacuation_slots_buffer); |
| 764 | 764 |
| 765 #ifdef DEBUG | 765 #ifdef DEBUG |
| 766 friend class MarkObjectVisitor; | 766 friend class MarkObjectVisitor; |
| 767 static void VisitObject(HeapObject* obj); | 767 static void VisitObject(HeapObject* obj); |
| 768 | 768 |
| 769 friend class UnmarkObjectVisitor; | 769 friend class UnmarkObjectVisitor; |
| 770 static void UnmarkObject(HeapObject* obj); | 770 static void UnmarkObject(HeapObject* obj); |
| 771 #endif | 771 #endif |
| 772 | 772 |
| 773 Heap* heap_; | 773 Heap* heap_; |
| 774 base::VirtualMemory* marking_deque_memory_; | 774 base::VirtualMemory* marking_deque_memory_; |
| 775 size_t marking_deque_memory_committed_; | 775 size_t marking_deque_memory_committed_; |
| 776 MarkingDeque marking_deque_; | 776 MarkingDeque marking_deque_; |
| 777 CodeFlusher* code_flusher_; | 777 CodeFlusher* code_flusher_; |
| 778 bool have_code_to_deoptimize_; | 778 bool have_code_to_deoptimize_; |
| 779 | 779 |
| 780 List<Page*> evacuation_candidates_; | 780 List<Page*> evacuation_candidates_; |
| 781 | 781 List<NewSpacePage*> newspace_evacuation_candidates_; |
| 782 List<MemoryChunk*> newspace_evacuation_candidates_; | |
| 783 | 782 |
| 784 // The evacuation_slots_buffers_ are used by the compaction threads. | 783 // The evacuation_slots_buffers_ are used by the compaction threads. |
| 785 // When a compaction task finishes, it uses | 784 // When a compaction task finishes, it uses |
| 786 // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the | 785 // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the |
| 787 // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_ | 786 // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_ |
| 788 // lock. | 787 // lock. |
| 789 base::Mutex evacuation_slots_buffers_mutex_; | 788 base::Mutex evacuation_slots_buffers_mutex_; |
| 790 List<SlotsBuffer*> evacuation_slots_buffers_; | 789 List<SlotsBuffer*> evacuation_slots_buffers_; |
| 791 | 790 |
| 792 base::SmartPointer<FreeList> free_list_old_space_; | 791 base::SmartPointer<FreeList> free_list_old_space_; |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 895 private: | 894 private: |
| 896 MarkCompactCollector* collector_; | 895 MarkCompactCollector* collector_; |
| 897 }; | 896 }; |
| 898 | 897 |
| 899 | 898 |
| 900 const char* AllocationSpaceName(AllocationSpace space); | 899 const char* AllocationSpaceName(AllocationSpace space); |
| 901 } // namespace internal | 900 } // namespace internal |
| 902 } // namespace v8 | 901 } // namespace v8 |
| 903 | 902 |
| 904 #endif // V8_HEAP_MARK_COMPACT_H_ | 903 #endif // V8_HEAP_MARK_COMPACT_H_ |
| OLD | NEW |