Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(331)

Side by Side Diff: src/heap/spaces.h

Issue 1330463002: [not for landing, heap] Parallel compaction in main GC pause (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase: Remove obsolete changes Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/atomic-utils.h" 9 #include "src/atomic-utils.h"
10 #include "src/base/atomicops.h" 10 #include "src/base/atomicops.h"
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
261 261
262 class SkipList; 262 class SkipList;
263 class SlotsBuffer; 263 class SlotsBuffer;
264 264
265 // MemoryChunk represents a memory region owned by a specific space. 265 // MemoryChunk represents a memory region owned by a specific space.
266 // It is divided into the header and the body. Chunk start is always 266 // It is divided into the header and the body. Chunk start is always
267 // 1MB aligned. Start of the body is aligned so it can accommodate 267 // 1MB aligned. Start of the body is aligned so it can accommodate
268 // any heap object. 268 // any heap object.
269 class MemoryChunk { 269 class MemoryChunk {
270 public: 270 public:
271 // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
272 // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
273 // be finalized.
274 // |kCompactingInProgress|: Parallel compaction is currently in progress.
275 // |kCompactingAborted|: Parallel compaction has been aborted, which should
276 // for now only happen in OOM scenarios.
277 // |kNoEvacuationCandidate|: Page was no evacuation candidate by the time the
278 // compactor inspected it.
279 enum ParallelCompactingState {
280 kCompactingDone,
281 kCompactingFinalize,
282 kCompactingInProgress,
283 kCompactingAborted,
284 kNoEvacuationCandidate,
285 };
286
271 // Only works if the pointer is in the first kPageSize of the MemoryChunk. 287 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
272 static MemoryChunk* FromAddress(Address a) { 288 static MemoryChunk* FromAddress(Address a) {
273 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); 289 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
274 } 290 }
275 static const MemoryChunk* FromAddress(const byte* a) { 291 static const MemoryChunk* FromAddress(const byte* a) {
276 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & 292 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
277 ~kAlignmentMask); 293 ~kAlignmentMask);
278 } 294 }
279 295
280 // Only works for addresses in pointer spaces, not data or code spaces. 296 // Only works for addresses in pointer spaces, not data or code spaces.
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
451 467
452 ParallelSweepingState parallel_sweeping() { 468 ParallelSweepingState parallel_sweeping() {
453 return static_cast<ParallelSweepingState>( 469 return static_cast<ParallelSweepingState>(
454 base::Acquire_Load(&parallel_sweeping_)); 470 base::Acquire_Load(&parallel_sweeping_));
455 } 471 }
456 472
457 void set_parallel_sweeping(ParallelSweepingState state) { 473 void set_parallel_sweeping(ParallelSweepingState state) {
458 base::Release_Store(&parallel_sweeping_, state); 474 base::Release_Store(&parallel_sweeping_, state);
459 } 475 }
460 476
477 AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
478 return parallel_compaction_;
479 }
480
461 bool TryLock() { return mutex_->TryLock(); } 481 bool TryLock() { return mutex_->TryLock(); }
462 482
463 base::Mutex* mutex() { return mutex_; } 483 base::Mutex* mutex() { return mutex_; }
464 484
465 // WaitUntilSweepingCompleted only works when concurrent sweeping is in 485 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
466 // progress. In particular, when we know that right before this call a 486 // progress. In particular, when we know that right before this call a
467 // sweeper thread was sweeping this page. 487 // sweeper thread was sweeping this page.
468 void WaitUntilSweepingCompleted() { 488 void WaitUntilSweepingCompleted() {
469 mutex_->Lock(); 489 mutex_->Lock();
470 mutex_->Unlock(); 490 mutex_->Unlock();
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
559 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; 579 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
560 + kPointerSize; // SkipList* skip_list_; 580 + kPointerSize; // SkipList* skip_list_;
561 581
562 static const size_t kMinHeaderSize = 582 static const size_t kMinHeaderSize =
563 kWriteBarrierCounterOffset + 583 kWriteBarrierCounterOffset +
564 kIntptrSize // intptr_t write_barrier_counter_ 584 kIntptrSize // intptr_t write_barrier_counter_
565 + kIntSize // int progress_bar_ 585 + kIntSize // int progress_bar_
566 + kPointerSize // AtomicValue high_water_mark_ 586 + kPointerSize // AtomicValue high_water_mark_
567 + kPointerSize // base::Mutex* mutex_ 587 + kPointerSize // base::Mutex* mutex_
568 + kPointerSize // base::AtomicWord parallel_sweeping_ 588 + kPointerSize // base::AtomicWord parallel_sweeping_
589 + kPointerSize // AtomicFlag parallel_compaction_
569 + 5 * kPointerSize // AtomicNumber free-list statistics 590 + 5 * kPointerSize // AtomicNumber free-list statistics
570 + kPointerSize // base::AtomicWord next_chunk_ 591 + kPointerSize // base::AtomicWord next_chunk_
571 + kPointerSize; // base::AtomicWord prev_chunk_ 592 + kPointerSize; // base::AtomicWord prev_chunk_
572 593
573 // We add some more space to the computed header size to amount for missing 594 // We add some more space to the computed header size to amount for missing
574 // alignment requirements in our computation. 595 // alignment requirements in our computation.
575 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 596 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
576 static const size_t kHeaderSize = kMinHeaderSize + kIntSize; 597 static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
577 598
578 static const int kBodyOffset = 599 static const int kBodyOffset =
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
687 // to another chunk. See the comment to Page::FromAllocationTop. 708 // to another chunk. See the comment to Page::FromAllocationTop.
688 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); 709 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
689 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); 710 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
690 intptr_t old_mark = 0; 711 intptr_t old_mark = 0;
691 do { 712 do {
692 old_mark = chunk->high_water_mark_.Value(); 713 old_mark = chunk->high_water_mark_.Value();
693 } while ((new_mark > old_mark) && 714 } while ((new_mark > old_mark) &&
694 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); 715 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
695 } 716 }
696 717
718
719 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
720 type name() { return name##_.Value(); } \
721 void set_##name(type name) { name##_.SetValue(name); } \
722 void add_##name(type name) { name##_.Increment(name); }
723
724 FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
725 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
726 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
727 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
728 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
729
730 #undef FRAGMENTATION_STATS_ACCESSORS
731
697 protected: 732 protected:
698 size_t size_; 733 size_t size_;
699 intptr_t flags_; 734 intptr_t flags_;
700 735
701 // Start and end of allocatable memory on this chunk. 736 // Start and end of allocatable memory on this chunk.
702 Address area_start_; 737 Address area_start_;
703 Address area_end_; 738 Address area_end_;
704 739
705 // If the chunk needs to remember its memory reservation, it is stored here. 740 // If the chunk needs to remember its memory reservation, it is stored here.
706 base::VirtualMemory reservation_; 741 base::VirtualMemory reservation_;
(...skipping 12 matching lines...) Expand all
719 intptr_t write_barrier_counter_; 754 intptr_t write_barrier_counter_;
720 // Used by the incremental marker to keep track of the scanning progress in 755 // Used by the incremental marker to keep track of the scanning progress in
721 // large objects that have a progress bar and are scanned in increments. 756 // large objects that have a progress bar and are scanned in increments.
722 int progress_bar_; 757 int progress_bar_;
723 // Assuming the initial allocation on a page is sequential, 758 // Assuming the initial allocation on a page is sequential,
724 // count highest number of bytes ever allocated on the page. 759 // count highest number of bytes ever allocated on the page.
725 AtomicValue<intptr_t> high_water_mark_; 760 AtomicValue<intptr_t> high_water_mark_;
726 761
727 base::Mutex* mutex_; 762 base::Mutex* mutex_;
728 base::AtomicWord parallel_sweeping_; 763 base::AtomicWord parallel_sweeping_;
764 AtomicValue<ParallelCompactingState> parallel_compaction_;
729 765
730 // PagedSpace free-list statistics. 766 // PagedSpace free-list statistics.
731 AtomicNumber<intptr_t> available_in_small_free_list_; 767 AtomicNumber<intptr_t> available_in_small_free_list_;
732 AtomicNumber<intptr_t> available_in_medium_free_list_; 768 AtomicNumber<intptr_t> available_in_medium_free_list_;
733 AtomicNumber<intptr_t> available_in_large_free_list_; 769 AtomicNumber<intptr_t> available_in_large_free_list_;
734 AtomicNumber<intptr_t> available_in_huge_free_list_; 770 AtomicNumber<intptr_t> available_in_huge_free_list_;
735 AtomicNumber<intptr_t> non_available_small_blocks_; 771 AtomicNumber<intptr_t> non_available_small_blocks_;
736 772
737 // next_chunk_ holds a pointer of type MemoryChunk 773 // next_chunk_ holds a pointer of type MemoryChunk
738 base::AtomicWord next_chunk_; 774 base::AtomicWord next_chunk_;
(...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after
1014 } 1050 }
1015 1051
1016 Address start; 1052 Address start;
1017 size_t size; 1053 size_t size;
1018 }; 1054 };
1019 1055
1020 // The global mutex guards free_list_ and allocation_list_ as GC threads may 1056 // The global mutex guards free_list_ and allocation_list_ as GC threads may
1021 // access both lists concurrently to the main thread. 1057 // access both lists concurrently to the main thread.
1022 base::Mutex code_range_mutex_; 1058 base::Mutex code_range_mutex_;
1023 1059
1060 // ReserveBlock can be calleed concurrently. TODO: Merge this into a single
1061 // lock for CodeRange.
1062 base::Mutex reserve_block_mutex_;
1063
1024 // Freed blocks of memory are added to the free list. When the allocation 1064 // Freed blocks of memory are added to the free list. When the allocation
1025 // list is exhausted, the free list is sorted and merged to make the new 1065 // list is exhausted, the free list is sorted and merged to make the new
1026 // allocation list. 1066 // allocation list.
1027 List<FreeBlock> free_list_; 1067 List<FreeBlock> free_list_;
1028 1068
1029 // Memory is allocated from the free blocks on the allocation list. 1069 // Memory is allocated from the free blocks on the allocation list.
1030 // The block at current_allocation_block_index_ is the current block. 1070 // The block at current_allocation_block_index_ is the current block.
1031 List<FreeBlock> allocation_list_; 1071 List<FreeBlock> allocation_list_;
1032 int current_allocation_block_index_; 1072 int current_allocation_block_index_;
1033 1073
(...skipping 572 matching lines...) Expand 10 before | Expand all | Expand 10 after
1606 // and limit when the object we need to allocate is 256-2047 words in size. 1646 // and limit when the object we need to allocate is 256-2047 words in size.
1607 // These spaces are call large. 1647 // These spaces are call large.
1608 // At least 16384 words. This list is for objects of 2048 words or larger. 1648 // At least 16384 words. This list is for objects of 2048 words or larger.
1609 // Empty pages are added to this list. These spaces are called huge. 1649 // Empty pages are added to this list. These spaces are called huge.
1610 class FreeList { 1650 class FreeList {
1611 public: 1651 public:
1612 explicit FreeList(PagedSpace* owner); 1652 explicit FreeList(PagedSpace* owner);
1613 1653
1614 intptr_t Concatenate(FreeList* free_list); 1654 intptr_t Concatenate(FreeList* free_list);
1615 1655
1656 // Evenly divide available blocks into {free_lists_to_fill}.
1657 void Divide(FreeList** free_lists_to_fill, size_t num);
1658
1616 // Clear the free list. 1659 // Clear the free list.
1617 void Reset(); 1660 void Reset();
1618 1661
1619 // Return the number of bytes available on the free list. 1662 // Return the number of bytes available on the free list.
1620 intptr_t available() { 1663 intptr_t available() {
1621 return small_list_.available() + medium_list_.available() + 1664 return small_list_.available() + medium_list_.available() +
1622 large_list_.available() + huge_list_.available(); 1665 large_list_.available() + huge_list_.available();
1623 } 1666 }
1624 1667
1625 // Place a node on the free list. The block of size 'size_in_bytes' 1668 // Place a node on the free list. The block of size 'size_in_bytes'
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1666 void RepairLists(Heap* heap); 1709 void RepairLists(Heap* heap);
1667 1710
1668 intptr_t EvictFreeListItems(Page* p); 1711 intptr_t EvictFreeListItems(Page* p);
1669 bool ContainsPageFreeListItems(Page* p); 1712 bool ContainsPageFreeListItems(Page* p);
1670 1713
1671 FreeListCategory* small_list() { return &small_list_; } 1714 FreeListCategory* small_list() { return &small_list_; }
1672 FreeListCategory* medium_list() { return &medium_list_; } 1715 FreeListCategory* medium_list() { return &medium_list_; }
1673 FreeListCategory* large_list() { return &large_list_; } 1716 FreeListCategory* large_list() { return &large_list_; }
1674 FreeListCategory* huge_list() { return &huge_list_; } 1717 FreeListCategory* huge_list() { return &huge_list_; }
1675 1718
1719
1676 private: 1720 private:
1677 // The size range of blocks, in bytes. 1721 // The size range of blocks, in bytes.
1678 static const int kMinBlockSize = 3 * kPointerSize; 1722 static const int kMinBlockSize = 3 * kPointerSize;
1679 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize; 1723 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1680 1724
1681 static const int kSmallListMin = 0x1f * kPointerSize; 1725 static const int kSmallListMin = 0x1f * kPointerSize;
1682 static const int kSmallListMax = 0xff * kPointerSize; 1726 static const int kSmallListMax = 0xff * kPointerSize;
1683 static const int kMediumListMax = 0x7ff * kPointerSize; 1727 static const int kMediumListMax = 0x7ff * kPointerSize;
1684 static const int kLargeListMax = 0x3fff * kPointerSize; 1728 static const int kLargeListMax = 0x3fff * kPointerSize;
1685 static const int kSmallAllocationMax = kSmallListMin; 1729 static const int kSmallAllocationMax = kSmallListMin;
1686 static const int kMediumAllocationMax = kSmallListMax; 1730 static const int kMediumAllocationMax = kSmallListMax;
1687 static const int kLargeAllocationMax = kMediumListMax; 1731 static const int kLargeAllocationMax = kMediumListMax;
1688 1732
1689 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); 1733 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
1690 1734
1691 PagedSpace* owner_; 1735 PagedSpace* owner_;
1692 Heap* heap_; 1736 Heap* heap_;
1693 FreeListCategory small_list_; 1737 FreeListCategory small_list_;
1694 FreeListCategory medium_list_; 1738 FreeListCategory medium_list_;
1695 FreeListCategory large_list_; 1739 FreeListCategory large_list_;
1696 FreeListCategory huge_list_; 1740 FreeListCategory huge_list_;
1697 1741
1742 friend class CompactionSpace;
1743
1698 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); 1744 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1699 }; 1745 };
1700 1746
1701 1747
1702 class AllocationResult { 1748 class AllocationResult {
1703 public: 1749 public:
1704 // Implicit constructor from Object*. 1750 // Implicit constructor from Object*.
1705 AllocationResult(Object* object) // NOLINT 1751 AllocationResult(Object* object) // NOLINT
1706 : object_(object) { 1752 : object_(object) {
1707 // AllocationResults can't return Smis, which are used to represent 1753 // AllocationResults can't return Smis, which are used to represent
(...skipping 1029 matching lines...) Expand 10 before | Expand all | Expand 10 after
2737 IncreaseCapacity(size_in_bytes); 2783 IncreaseCapacity(size_in_bytes);
2738 Free(start, size_in_bytes); 2784 Free(start, size_in_bytes);
2739 } 2785 }
2740 2786
2741 protected: 2787 protected:
2742 // The space is temporary and not included in any snapshots. 2788 // The space is temporary and not included in any snapshots.
2743 virtual bool snapshotable() { return false; } 2789 virtual bool snapshotable() { return false; }
2744 }; 2790 };
2745 2791
2746 2792
2793 class CompactionSpaces : public Malloced {
2794 public:
2795 explicit CompactionSpaces(Heap* heap)
2796 : old_space_(new CompactionSpace(heap, OLD_SPACE,
2797 Executability::NOT_EXECUTABLE)),
2798 code_space_(
2799 new CompactionSpace(heap, CODE_SPACE, Executability::EXECUTABLE)) {}
2800
2801 ~CompactionSpaces() {
2802 delete old_space_;
2803 delete code_space_;
2804 }
2805
2806 CompactionSpace* Get(AllocationSpace space) {
2807 switch (space) {
2808 case OLD_SPACE:
2809 DCHECK(old_space_ != nullptr);
2810 return old_space_;
2811 case CODE_SPACE:
2812 DCHECK(code_space_ != nullptr);
2813 return code_space_;
2814 default:
2815 UNREACHABLE();
2816 }
2817 UNREACHABLE();
2818 return nullptr;
2819 }
2820
2821 private:
2822 CompactionSpace* old_space_;
2823 CompactionSpace* code_space_;
2824 };
2825
2826
2747 // ----------------------------------------------------------------------------- 2827 // -----------------------------------------------------------------------------
2748 // Old object space (includes the old space of objects and code space) 2828 // Old object space (includes the old space of objects and code space)
2749 2829
2750 class OldSpace : public PagedSpace { 2830 class OldSpace : public PagedSpace {
2751 public: 2831 public:
2752 // Creates an old space object. The constructor does not allocate pages 2832 // Creates an old space object. The constructor does not allocate pages
2753 // from OS. 2833 // from OS.
2754 OldSpace(Heap* heap, AllocationSpace id, Executability executable) 2834 OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2755 : PagedSpace(heap, id, executable) {} 2835 : PagedSpace(heap, id, executable) {}
2756 }; 2836 };
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
2937 count = 0; 3017 count = 0;
2938 } 3018 }
2939 // Must be small, since an iteration is used for lookup. 3019 // Must be small, since an iteration is used for lookup.
2940 static const int kMaxComments = 64; 3020 static const int kMaxComments = 64;
2941 }; 3021 };
2942 #endif 3022 #endif
2943 } 3023 }
2944 } // namespace v8::internal 3024 } // namespace v8::internal
2945 3025
2946 #endif // V8_HEAP_SPACES_H_ 3026 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698