OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_MARK_COMPACT_H_ | 5 #ifndef V8_HEAP_MARK_COMPACT_H_ |
6 #define V8_HEAP_MARK_COMPACT_H_ | 6 #define V8_HEAP_MARK_COMPACT_H_ |
7 | 7 |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/heap/spaces.h" | 9 #include "src/heap/spaces.h" |
10 | 10 |
(...skipping 388 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
399 void RecordCodeTargetPatch(Address pc, Code* target); | 399 void RecordCodeTargetPatch(Address pc, Code* target); |
400 INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target)); | 400 INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target)); |
401 INLINE(void ForceRecordSlot(HeapObject* object, Object** slot, | 401 INLINE(void ForceRecordSlot(HeapObject* object, Object** slot, |
402 Object* target)); | 402 Object* target)); |
403 | 403 |
404 void UpdateSlots(SlotsBuffer* buffer); | 404 void UpdateSlots(SlotsBuffer* buffer); |
405 void UpdateSlotsRecordedIn(SlotsBuffer* buffer); | 405 void UpdateSlotsRecordedIn(SlotsBuffer* buffer); |
406 | 406 |
407 void MigrateObject(HeapObject* dst, HeapObject* src, int size, | 407 void MigrateObject(HeapObject* dst, HeapObject* src, int size, |
408 AllocationSpace to_old_space, | 408 AllocationSpace to_old_space, |
409 SlotsBuffer** evacuation_slots_buffer); | 409 SlotsBuffer** evacuation_slots_buffer, |
| 410 LocalStoreBuffer* local_store_buffer); |
410 | 411 |
411 void InvalidateCode(Code* code); | 412 void InvalidateCode(Code* code); |
412 | 413 |
413 void ClearMarkbits(); | 414 void ClearMarkbits(); |
414 | 415 |
415 bool is_compacting() const { return compacting_; } | 416 bool is_compacting() const { return compacting_; } |
416 | 417 |
417 MarkingParity marking_parity() { return marking_parity_; } | 418 MarkingParity marking_parity() { return marking_parity_; } |
418 | 419 |
419 // Concurrent and parallel sweeping support. If required_freed_bytes was set | 420 // Concurrent and parallel sweeping support. If required_freed_bytes was set |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
504 } | 505 } |
505 | 506 |
506 private: | 507 private: |
507 class CompactionTask; | 508 class CompactionTask; |
508 class EvacuateNewSpaceVisitor; | 509 class EvacuateNewSpaceVisitor; |
509 class EvacuateOldSpaceVisitor; | 510 class EvacuateOldSpaceVisitor; |
510 class EvacuateVisitorBase; | 511 class EvacuateVisitorBase; |
511 class HeapObjectVisitor; | 512 class HeapObjectVisitor; |
512 class SweeperTask; | 513 class SweeperTask; |
513 | 514 |
514 static const int kInitialLocalPretenuringFeedbackCapacity = 256; | |
515 | |
516 explicit MarkCompactCollector(Heap* heap); | 515 explicit MarkCompactCollector(Heap* heap); |
517 | 516 |
518 bool WillBeDeoptimized(Code* code); | 517 bool WillBeDeoptimized(Code* code); |
519 void EvictPopularEvacuationCandidate(Page* page); | 518 void EvictPopularEvacuationCandidate(Page* page); |
520 void ClearInvalidStoreAndSlotsBufferEntries(); | 519 void ClearInvalidStoreAndSlotsBufferEntries(); |
521 | 520 |
522 void StartSweeperThreads(); | 521 void StartSweeperThreads(); |
523 | 522 |
524 void ComputeEvacuationHeuristics(int area_size, | 523 void ComputeEvacuationHeuristics(int area_size, |
525 int* target_fragmentation_percent, | 524 int* target_fragmentation_percent, |
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
692 // their space's free list. Active eden semispace is compacted by | 691 // their space's free list. Active eden semispace is compacted by |
693 // evacuation. | 692 // evacuation. |
694 // | 693 // |
695 | 694 |
696 // If we are not compacting the heap, we simply sweep the spaces except | 695 // If we are not compacting the heap, we simply sweep the spaces except |
697 // for the large object space, clearing mark bits and adding unmarked | 696 // for the large object space, clearing mark bits and adding unmarked |
698 // regions to each space's free list. | 697 // regions to each space's free list. |
699 void SweepSpaces(); | 698 void SweepSpaces(); |
700 | 699 |
701 void EvacuateNewSpacePrologue(); | 700 void EvacuateNewSpacePrologue(); |
702 | 701 void EvacuateNewSpaceEpilogue(); |
703 // Returns local pretenuring feedback. | |
704 HashMap* EvacuateNewSpaceInParallel(); | |
705 | 702 |
706 void AddEvacuationSlotsBufferSynchronized( | 703 void AddEvacuationSlotsBufferSynchronized( |
707 SlotsBuffer* evacuation_slots_buffer); | 704 SlotsBuffer* evacuation_slots_buffer); |
708 | 705 |
709 void EvacuatePages(CompactionSpaceCollection* compaction_spaces, | 706 void EvacuatePages(CompactionSpaceCollection* compaction_spaces, |
710 SlotsBuffer** evacuation_slots_buffer); | 707 SlotsBuffer** evacuation_slots_buffer); |
711 | 708 |
712 void EvacuatePagesInParallel(); | 709 void EvacuatePagesInParallel(); |
713 | 710 |
714 // The number of parallel compaction tasks, including the main thread. | 711 // The number of parallel compaction tasks, including the main thread. |
(...skipping 30 matching lines...) Expand all Loading... |
745 void StartSweepSpace(PagedSpace* space); | 742 void StartSweepSpace(PagedSpace* space); |
746 | 743 |
747 // Finalizes the parallel sweeping phase. Marks all the pages that were | 744 // Finalizes the parallel sweeping phase. Marks all the pages that were |
748 // swept in parallel. | 745 // swept in parallel. |
749 void ParallelSweepSpacesComplete(); | 746 void ParallelSweepSpacesComplete(); |
750 | 747 |
751 void ParallelSweepSpaceComplete(PagedSpace* space); | 748 void ParallelSweepSpaceComplete(PagedSpace* space); |
752 | 749 |
753 // Updates store buffer and slot buffer for a pointer in a migrating object. | 750 // Updates store buffer and slot buffer for a pointer in a migrating object. |
754 void RecordMigratedSlot(Object* value, Address slot, | 751 void RecordMigratedSlot(Object* value, Address slot, |
755 SlotsBuffer** evacuation_slots_buffer); | 752 SlotsBuffer** evacuation_slots_buffer, |
| 753 LocalStoreBuffer* local_store_buffer); |
756 | 754 |
757 // Adds the code entry slot to the slots buffer. | 755 // Adds the code entry slot to the slots buffer. |
758 void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot, | 756 void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot, |
759 SlotsBuffer** evacuation_slots_buffer); | 757 SlotsBuffer** evacuation_slots_buffer); |
760 | 758 |
761 // Adds the slot of a moved code object. | 759 // Adds the slot of a moved code object. |
762 void RecordMigratedCodeObjectSlot(Address code_object, | 760 void RecordMigratedCodeObjectSlot(Address code_object, |
763 SlotsBuffer** evacuation_slots_buffer); | 761 SlotsBuffer** evacuation_slots_buffer); |
764 | 762 |
765 #ifdef DEBUG | 763 #ifdef DEBUG |
766 friend class MarkObjectVisitor; | 764 friend class MarkObjectVisitor; |
767 static void VisitObject(HeapObject* obj); | 765 static void VisitObject(HeapObject* obj); |
768 | 766 |
769 friend class UnmarkObjectVisitor; | 767 friend class UnmarkObjectVisitor; |
770 static void UnmarkObject(HeapObject* obj); | 768 static void UnmarkObject(HeapObject* obj); |
771 #endif | 769 #endif |
772 | 770 |
773 Heap* heap_; | 771 Heap* heap_; |
774 base::VirtualMemory* marking_deque_memory_; | 772 base::VirtualMemory* marking_deque_memory_; |
775 size_t marking_deque_memory_committed_; | 773 size_t marking_deque_memory_committed_; |
776 MarkingDeque marking_deque_; | 774 MarkingDeque marking_deque_; |
777 CodeFlusher* code_flusher_; | 775 CodeFlusher* code_flusher_; |
778 bool have_code_to_deoptimize_; | 776 bool have_code_to_deoptimize_; |
779 | 777 |
780 List<Page*> evacuation_candidates_; | 778 List<MemoryChunk*> evacuation_candidates_; |
781 | |
782 List<MemoryChunk*> newspace_evacuation_candidates_; | |
783 | 779 |
784 // The evacuation_slots_buffers_ are used by the compaction threads. | 780 // The evacuation_slots_buffers_ are used by the compaction threads. |
785 // When a compaction task finishes, it uses | 781 // When a compaction task finishes, it uses |
786 // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the | 782 // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the |
787 // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_ | 783 // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_ |
788 // lock. | 784 // lock. |
789 base::Mutex evacuation_slots_buffers_mutex_; | 785 base::Mutex evacuation_slots_buffers_mutex_; |
790 List<SlotsBuffer*> evacuation_slots_buffers_; | 786 List<SlotsBuffer*> evacuation_slots_buffers_; |
791 | 787 |
792 base::SmartPointer<FreeList> free_list_old_space_; | 788 base::SmartPointer<FreeList> free_list_old_space_; |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
895 private: | 891 private: |
896 MarkCompactCollector* collector_; | 892 MarkCompactCollector* collector_; |
897 }; | 893 }; |
898 | 894 |
899 | 895 |
900 const char* AllocationSpaceName(AllocationSpace space); | 896 const char* AllocationSpaceName(AllocationSpace space); |
901 } // namespace internal | 897 } // namespace internal |
902 } // namespace v8 | 898 } // namespace v8 |
903 | 899 |
904 #endif // V8_HEAP_MARK_COMPACT_H_ | 900 #endif // V8_HEAP_MARK_COMPACT_H_ |
OLD | NEW |