Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(30)

Side by Side Diff: src/heap/spaces.h

Issue 1347873003: Revert of [heap] Introduce parallel compaction algorithm. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@counters-2nd-try
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/atomic-utils.h" 9 #include "src/atomic-utils.h"
10 #include "src/base/atomicops.h" 10 #include "src/base/atomicops.h"
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
261 261
262 class SkipList; 262 class SkipList;
263 class SlotsBuffer; 263 class SlotsBuffer;
264 264
265 // MemoryChunk represents a memory region owned by a specific space. 265 // MemoryChunk represents a memory region owned by a specific space.
266 // It is divided into the header and the body. Chunk start is always 266 // It is divided into the header and the body. Chunk start is always
267 // 1MB aligned. Start of the body is aligned so it can accommodate 267 // 1MB aligned. Start of the body is aligned so it can accommodate
268 // any heap object. 268 // any heap object.
269 class MemoryChunk { 269 class MemoryChunk {
270 public: 270 public:
271 // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
272 // |kCompactingInProgress|: Parallel compaction is currently in progress.
273 // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
274 // be finalized.
275 // |kCompactingAborted|: Parallel compaction has been aborted, which should
276 // for now only happen in OOM scenarios.
277 enum ParallelCompactingState {
278 kCompactingDone,
279 kCompactingInProgress,
280 kCompactingFinalize,
281 kCompactingAborted,
282 };
283
284 // Only works if the pointer is in the first kPageSize of the MemoryChunk. 271 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
285 static MemoryChunk* FromAddress(Address a) { 272 static MemoryChunk* FromAddress(Address a) {
286 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); 273 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
287 } 274 }
288 static const MemoryChunk* FromAddress(const byte* a) { 275 static const MemoryChunk* FromAddress(const byte* a) {
289 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & 276 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
290 ~kAlignmentMask); 277 ~kAlignmentMask);
291 } 278 }
292 279
293 // Only works for addresses in pointer spaces, not data or code spaces. 280 // Only works for addresses in pointer spaces, not data or code spaces.
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
464 451
465 ParallelSweepingState parallel_sweeping() { 452 ParallelSweepingState parallel_sweeping() {
466 return static_cast<ParallelSweepingState>( 453 return static_cast<ParallelSweepingState>(
467 base::Acquire_Load(&parallel_sweeping_)); 454 base::Acquire_Load(&parallel_sweeping_));
468 } 455 }
469 456
470 void set_parallel_sweeping(ParallelSweepingState state) { 457 void set_parallel_sweeping(ParallelSweepingState state) {
471 base::Release_Store(&parallel_sweeping_, state); 458 base::Release_Store(&parallel_sweeping_, state);
472 } 459 }
473 460
474 AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
475 return parallel_compaction_;
476 }
477
478 bool TryLock() { return mutex_->TryLock(); } 461 bool TryLock() { return mutex_->TryLock(); }
479 462
480 base::Mutex* mutex() { return mutex_; } 463 base::Mutex* mutex() { return mutex_; }
481 464
482 // WaitUntilSweepingCompleted only works when concurrent sweeping is in 465 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
483 // progress. In particular, when we know that right before this call a 466 // progress. In particular, when we know that right before this call a
484 // sweeper thread was sweeping this page. 467 // sweeper thread was sweeping this page.
485 void WaitUntilSweepingCompleted() { 468 void WaitUntilSweepingCompleted() {
486 mutex_->Lock(); 469 mutex_->Lock();
487 mutex_->Unlock(); 470 mutex_->Unlock();
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
576 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; 559 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
577 + kPointerSize; // SkipList* skip_list_; 560 + kPointerSize; // SkipList* skip_list_;
578 561
579 static const size_t kMinHeaderSize = 562 static const size_t kMinHeaderSize =
580 kWriteBarrierCounterOffset + 563 kWriteBarrierCounterOffset +
581 kIntptrSize // intptr_t write_barrier_counter_ 564 kIntptrSize // intptr_t write_barrier_counter_
582 + kIntSize // int progress_bar_ 565 + kIntSize // int progress_bar_
583 + kPointerSize // AtomicValue high_water_mark_ 566 + kPointerSize // AtomicValue high_water_mark_
584 + kPointerSize // base::Mutex* mutex_ 567 + kPointerSize // base::Mutex* mutex_
585 + kPointerSize // base::AtomicWord parallel_sweeping_ 568 + kPointerSize // base::AtomicWord parallel_sweeping_
586 + kPointerSize // AtomicValue parallel_compaction_
587 + 5 * kPointerSize // AtomicNumber free-list statistics 569 + 5 * kPointerSize // AtomicNumber free-list statistics
588 + kPointerSize // base::AtomicWord next_chunk_ 570 + kPointerSize // base::AtomicWord next_chunk_
589 + kPointerSize; // base::AtomicWord prev_chunk_ 571 + kPointerSize; // base::AtomicWord prev_chunk_
590 572
591 // We add some more space to the computed header size to amount for missing 573 // We add some more space to the computed header size to amount for missing
592 // alignment requirements in our computation. 574 // alignment requirements in our computation.
593 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 575 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
594 static const size_t kHeaderSize = kMinHeaderSize + kIntSize; 576 static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
595 577
596 static const int kBodyOffset = 578 static const int kBodyOffset =
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
737 intptr_t write_barrier_counter_; 719 intptr_t write_barrier_counter_;
738 // Used by the incremental marker to keep track of the scanning progress in 720 // Used by the incremental marker to keep track of the scanning progress in
739 // large objects that have a progress bar and are scanned in increments. 721 // large objects that have a progress bar and are scanned in increments.
740 int progress_bar_; 722 int progress_bar_;
741 // Assuming the initial allocation on a page is sequential, 723 // Assuming the initial allocation on a page is sequential,
742 // count highest number of bytes ever allocated on the page. 724 // count highest number of bytes ever allocated on the page.
743 AtomicValue<intptr_t> high_water_mark_; 725 AtomicValue<intptr_t> high_water_mark_;
744 726
745 base::Mutex* mutex_; 727 base::Mutex* mutex_;
746 base::AtomicWord parallel_sweeping_; 728 base::AtomicWord parallel_sweeping_;
747 AtomicValue<ParallelCompactingState> parallel_compaction_;
748 729
749 // PagedSpace free-list statistics. 730 // PagedSpace free-list statistics.
750 AtomicNumber<intptr_t> available_in_small_free_list_; 731 AtomicNumber<intptr_t> available_in_small_free_list_;
751 AtomicNumber<intptr_t> available_in_medium_free_list_; 732 AtomicNumber<intptr_t> available_in_medium_free_list_;
752 AtomicNumber<intptr_t> available_in_large_free_list_; 733 AtomicNumber<intptr_t> available_in_large_free_list_;
753 AtomicNumber<intptr_t> available_in_huge_free_list_; 734 AtomicNumber<intptr_t> available_in_huge_free_list_;
754 AtomicNumber<intptr_t> non_available_small_blocks_; 735 AtomicNumber<intptr_t> non_available_small_blocks_;
755 736
756 // next_chunk_ holds a pointer of type MemoryChunk 737 // next_chunk_ holds a pointer of type MemoryChunk
757 base::AtomicWord next_chunk_; 738 base::AtomicWord next_chunk_;
(...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after
998 // Allocates a chunk of memory from the large-object portion of 979 // Allocates a chunk of memory from the large-object portion of
999 // the code range. On platforms with no separate code range, should 980 // the code range. On platforms with no separate code range, should
1000 // not be called. 981 // not be called.
1001 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, 982 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
1002 const size_t commit_size, 983 const size_t commit_size,
1003 size_t* allocated); 984 size_t* allocated);
1004 bool CommitRawMemory(Address start, size_t length); 985 bool CommitRawMemory(Address start, size_t length);
1005 bool UncommitRawMemory(Address start, size_t length); 986 bool UncommitRawMemory(Address start, size_t length);
1006 void FreeRawMemory(Address buf, size_t length); 987 void FreeRawMemory(Address buf, size_t length);
1007 988
989 void ReserveEmergencyBlock();
990 void ReleaseEmergencyBlock();
991
1008 private: 992 private:
1009 // Frees the range of virtual memory, and frees the data structures used to 993 // Frees the range of virtual memory, and frees the data structures used to
1010 // manage it. 994 // manage it.
1011 void TearDown(); 995 void TearDown();
1012 996
1013 Isolate* isolate_; 997 Isolate* isolate_;
1014 998
1015 // The reserved range of virtual memory that all code objects are put in. 999 // The reserved range of virtual memory that all code objects are put in.
1016 base::VirtualMemory* code_range_; 1000 base::VirtualMemory* code_range_;
1017 // Plain old data class, just a struct plus a constructor. 1001 // Plain old data class, just a struct plus a constructor.
(...skipping 22 matching lines...) Expand all
1040 // Freed blocks of memory are added to the free list. When the allocation 1024 // Freed blocks of memory are added to the free list. When the allocation
1041 // list is exhausted, the free list is sorted and merged to make the new 1025 // list is exhausted, the free list is sorted and merged to make the new
1042 // allocation list. 1026 // allocation list.
1043 List<FreeBlock> free_list_; 1027 List<FreeBlock> free_list_;
1044 1028
1045 // Memory is allocated from the free blocks on the allocation list. 1029 // Memory is allocated from the free blocks on the allocation list.
1046 // The block at current_allocation_block_index_ is the current block. 1030 // The block at current_allocation_block_index_ is the current block.
1047 List<FreeBlock> allocation_list_; 1031 List<FreeBlock> allocation_list_;
1048 int current_allocation_block_index_; 1032 int current_allocation_block_index_;
1049 1033
1034 // Emergency block guarantees that we can always allocate a page for
1035 // evacuation candidates when code space is compacted. Emergency block is
1036 // reserved immediately after GC and is released immedietely before
1037 // allocating a page for evacuation.
1038 FreeBlock emergency_block_;
1039
1050 // Finds a block on the allocation list that contains at least the 1040 // Finds a block on the allocation list that contains at least the
1051 // requested amount of memory. If none is found, sorts and merges 1041 // requested amount of memory. If none is found, sorts and merges
1052 // the existing free memory blocks, and searches again. 1042 // the existing free memory blocks, and searches again.
1053 // If none can be found, returns false. 1043 // If none can be found, returns false.
1054 bool GetNextAllocationBlock(size_t requested); 1044 bool GetNextAllocationBlock(size_t requested);
1055 // Compares the start addresses of two free blocks. 1045 // Compares the start addresses of two free blocks.
1056 static int CompareFreeBlockAddress(const FreeBlock* left, 1046 static int CompareFreeBlockAddress(const FreeBlock* left,
1057 const FreeBlock* right); 1047 const FreeBlock* right);
1058 bool ReserveBlock(const size_t requested_size, FreeBlock* block); 1048 bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1059 void ReleaseBlock(const FreeBlock* block); 1049 void ReleaseBlock(const FreeBlock* block);
(...skipping 912 matching lines...) Expand 10 before | Expand all | Expand 10 after
1972 void EvictEvacuationCandidatesFromFreeLists(); 1962 void EvictEvacuationCandidatesFromFreeLists();
1973 1963
1974 bool CanExpand(size_t size); 1964 bool CanExpand(size_t size);
1975 1965
1976 // Returns the number of total pages in this space. 1966 // Returns the number of total pages in this space.
1977 int CountTotalPages(); 1967 int CountTotalPages();
1978 1968
1979 // Return size of allocatable area on a page in this space. 1969 // Return size of allocatable area on a page in this space.
1980 inline int AreaSize() { return area_size_; } 1970 inline int AreaSize() { return area_size_; }
1981 1971
1972 void CreateEmergencyMemory();
1973 void FreeEmergencyMemory();
1974 void UseEmergencyMemory();
1975 intptr_t MaxEmergencyMemoryAllocated();
1976
1977 bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
1978
1982 // Merges {other} into the current space. Note that this modifies {other}, 1979 // Merges {other} into the current space. Note that this modifies {other},
1983 // e.g., removes its bump pointer area and resets statistics. 1980 // e.g., removes its bump pointer area and resets statistics.
1984 void MergeCompactionSpace(CompactionSpace* other); 1981 void MergeCompactionSpace(CompactionSpace* other);
1985 1982
1986 void MoveOverFreeMemory(PagedSpace* other);
1987
1988 protected: 1983 protected:
1989 // PagedSpaces that should be included in snapshots have different, i.e., 1984 // PagedSpaces that should be included in snapshots have different, i.e.,
1990 // smaller, initial pages. 1985 // smaller, initial pages.
1991 virtual bool snapshotable() { return true; } 1986 virtual bool snapshotable() { return true; }
1992 1987
1993 FreeList* free_list() { return &free_list_; } 1988 FreeList* free_list() { return &free_list_; }
1994 1989
1995 bool HasPages() { return anchor_.next_page() != &anchor_; } 1990 bool HasPages() { return anchor_.next_page() != &anchor_; }
1996 1991
1997 // Cleans up the space, frees all pages in this space except those belonging 1992 // Cleans up the space, frees all pages in this space except those belonging
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
2038 2033
2039 // The number of free bytes which could be reclaimed by advancing the 2034 // The number of free bytes which could be reclaimed by advancing the
2040 // concurrent sweeper threads. 2035 // concurrent sweeper threads.
2041 intptr_t unswept_free_bytes_; 2036 intptr_t unswept_free_bytes_;
2042 2037
2043 // The sweeper threads iterate over the list of pointer and data space pages 2038 // The sweeper threads iterate over the list of pointer and data space pages
2044 // and sweep these pages concurrently. They will stop sweeping after the 2039 // and sweep these pages concurrently. They will stop sweeping after the
2045 // end_of_unswept_pages_ page. 2040 // end_of_unswept_pages_ page.
2046 Page* end_of_unswept_pages_; 2041 Page* end_of_unswept_pages_;
2047 2042
2043 // Emergency memory is the memory of a full page for a given space, allocated
2044 // conservatively before evacuating a page. If compaction fails due to out
2045 // of memory error the emergency memory can be used to complete compaction.
2046 // If not used, the emergency memory is released after compaction.
2047 MemoryChunk* emergency_memory_;
2048
2048 // Mutex guarding any concurrent access to the space. 2049 // Mutex guarding any concurrent access to the space.
2049 base::Mutex space_mutex_; 2050 base::Mutex space_mutex_;
2050 2051
2051 friend class MarkCompactCollector; 2052 friend class MarkCompactCollector;
2052 friend class PageIterator; 2053 friend class PageIterator;
2053 }; 2054 };
2054 2055
2055 2056
2056 class NumberAndSizeInfo BASE_EMBEDDED { 2057 class NumberAndSizeInfo BASE_EMBEDDED {
2057 public: 2058 public:
(...skipping 678 matching lines...) Expand 10 before | Expand all | Expand 10 after
2736 IncreaseCapacity(size_in_bytes); 2737 IncreaseCapacity(size_in_bytes);
2737 Free(start, size_in_bytes); 2738 Free(start, size_in_bytes);
2738 } 2739 }
2739 2740
2740 protected: 2741 protected:
2741 // The space is temporary and not included in any snapshots. 2742 // The space is temporary and not included in any snapshots.
2742 virtual bool snapshotable() { return false; } 2743 virtual bool snapshotable() { return false; }
2743 }; 2744 };
2744 2745
2745 2746
2746 // A collection of |CompactionSpace|s used by a single compaction task.
2747 class CompactionSpaceCollection : public Malloced {
2748 public:
2749 explicit CompactionSpaceCollection(Heap* heap)
2750 : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2751 code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2752
2753 CompactionSpace* Get(AllocationSpace space) {
2754 switch (space) {
2755 case OLD_SPACE:
2756 return &old_space_;
2757 case CODE_SPACE:
2758 return &code_space_;
2759 default:
2760 UNREACHABLE();
2761 }
2762 UNREACHABLE();
2763 return nullptr;
2764 }
2765
2766 private:
2767 CompactionSpace old_space_;
2768 CompactionSpace code_space_;
2769 };
2770
2771
2772 // ----------------------------------------------------------------------------- 2747 // -----------------------------------------------------------------------------
2773 // Old object space (includes the old space of objects and code space) 2748 // Old object space (includes the old space of objects and code space)
2774 2749
2775 class OldSpace : public PagedSpace { 2750 class OldSpace : public PagedSpace {
2776 public: 2751 public:
2777 // Creates an old space object. The constructor does not allocate pages 2752 // Creates an old space object. The constructor does not allocate pages
2778 // from OS. 2753 // from OS.
2779 OldSpace(Heap* heap, AllocationSpace id, Executability executable) 2754 OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2780 : PagedSpace(heap, id, executable) {} 2755 : PagedSpace(heap, id, executable) {}
2781 }; 2756 };
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
2962 count = 0; 2937 count = 0;
2963 } 2938 }
2964 // Must be small, since an iteration is used for lookup. 2939 // Must be small, since an iteration is used for lookup.
2965 static const int kMaxComments = 64; 2940 static const int kMaxComments = 64;
2966 }; 2941 };
2967 #endif 2942 #endif
2968 } 2943 }
2969 } // namespace v8::internal 2944 } // namespace v8::internal
2970 2945
2971 #endif // V8_HEAP_SPACES_H_ 2946 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698