Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(391)

Side by Side Diff: src/heap/spaces.h

Issue 1356533002: Reland "[heap] Introduce parallel compaction algorithm." (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fix accounting for moved free list memory Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/atomic-utils.h" 9 #include "src/atomic-utils.h"
10 #include "src/base/atomicops.h" 10 #include "src/base/atomicops.h"
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
261 261
262 class SkipList; 262 class SkipList;
263 class SlotsBuffer; 263 class SlotsBuffer;
264 264
265 // MemoryChunk represents a memory region owned by a specific space. 265 // MemoryChunk represents a memory region owned by a specific space.
266 // It is divided into the header and the body. Chunk start is always 266 // It is divided into the header and the body. Chunk start is always
267 // 1MB aligned. Start of the body is aligned so it can accommodate 267 // 1MB aligned. Start of the body is aligned so it can accommodate
268 // any heap object. 268 // any heap object.
269 class MemoryChunk { 269 class MemoryChunk {
270 public: 270 public:
271 // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
272 // |kCompactingInProgress|: Parallel compaction is currently in progress.
273 // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
274 // be finalized.
275 // |kCompactingAborted|: Parallel compaction has been aborted, which should
276 // for now only happen in OOM scenarios.
277 enum ParallelCompactingState {
278 kCompactingDone,
279 kCompactingInProgress,
280 kCompactingFinalize,
281 kCompactingAborted,
282 };
283
271 // Only works if the pointer is in the first kPageSize of the MemoryChunk. 284 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
272 static MemoryChunk* FromAddress(Address a) { 285 static MemoryChunk* FromAddress(Address a) {
273 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); 286 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
274 } 287 }
275 static const MemoryChunk* FromAddress(const byte* a) { 288 static const MemoryChunk* FromAddress(const byte* a) {
276 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & 289 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
277 ~kAlignmentMask); 290 ~kAlignmentMask);
278 } 291 }
279 292
280 // Only works for addresses in pointer spaces, not data or code spaces. 293 // Only works for addresses in pointer spaces, not data or code spaces.
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
451 464
452 ParallelSweepingState parallel_sweeping() { 465 ParallelSweepingState parallel_sweeping() {
453 return static_cast<ParallelSweepingState>( 466 return static_cast<ParallelSweepingState>(
454 base::Acquire_Load(&parallel_sweeping_)); 467 base::Acquire_Load(&parallel_sweeping_));
455 } 468 }
456 469
457 void set_parallel_sweeping(ParallelSweepingState state) { 470 void set_parallel_sweeping(ParallelSweepingState state) {
458 base::Release_Store(&parallel_sweeping_, state); 471 base::Release_Store(&parallel_sweeping_, state);
459 } 472 }
460 473
474 AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
475 return parallel_compaction_;
476 }
477
461 bool TryLock() { return mutex_->TryLock(); } 478 bool TryLock() { return mutex_->TryLock(); }
462 479
463 base::Mutex* mutex() { return mutex_; } 480 base::Mutex* mutex() { return mutex_; }
464 481
465 // WaitUntilSweepingCompleted only works when concurrent sweeping is in 482 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
466 // progress. In particular, when we know that right before this call a 483 // progress. In particular, when we know that right before this call a
467 // sweeper thread was sweeping this page. 484 // sweeper thread was sweeping this page.
468 void WaitUntilSweepingCompleted() { 485 void WaitUntilSweepingCompleted() {
469 mutex_->Lock(); 486 mutex_->Lock();
470 mutex_->Unlock(); 487 mutex_->Unlock();
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
559 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; 576 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
560 + kPointerSize; // SkipList* skip_list_; 577 + kPointerSize; // SkipList* skip_list_;
561 578
562 static const size_t kMinHeaderSize = 579 static const size_t kMinHeaderSize =
563 kWriteBarrierCounterOffset + 580 kWriteBarrierCounterOffset +
564 kIntptrSize // intptr_t write_barrier_counter_ 581 kIntptrSize // intptr_t write_barrier_counter_
565 + kIntSize // int progress_bar_ 582 + kIntSize // int progress_bar_
566 + kPointerSize // AtomicValue high_water_mark_ 583 + kPointerSize // AtomicValue high_water_mark_
567 + kPointerSize // base::Mutex* mutex_ 584 + kPointerSize // base::Mutex* mutex_
568 + kPointerSize // base::AtomicWord parallel_sweeping_ 585 + kPointerSize // base::AtomicWord parallel_sweeping_
586 + kPointerSize // AtomicValue parallel_compaction_
569 + 5 * kPointerSize // AtomicNumber free-list statistics 587 + 5 * kPointerSize // AtomicNumber free-list statistics
570 + kPointerSize // base::AtomicWord next_chunk_ 588 + kPointerSize // base::AtomicWord next_chunk_
571 + kPointerSize; // base::AtomicWord prev_chunk_ 589 + kPointerSize; // base::AtomicWord prev_chunk_
572 590
573 // We add some more space to the computed header size to amount for missing 591 // We add some more space to the computed header size to amount for missing
574 // alignment requirements in our computation. 592 // alignment requirements in our computation.
575 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 593 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
576 static const size_t kHeaderSize = kMinHeaderSize + kIntSize; 594 static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
577 595
578 static const int kBodyOffset = 596 static const int kBodyOffset =
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
719 intptr_t write_barrier_counter_; 737 intptr_t write_barrier_counter_;
720 // Used by the incremental marker to keep track of the scanning progress in 738 // Used by the incremental marker to keep track of the scanning progress in
721 // large objects that have a progress bar and are scanned in increments. 739 // large objects that have a progress bar and are scanned in increments.
722 int progress_bar_; 740 int progress_bar_;
723 // Assuming the initial allocation on a page is sequential, 741 // Assuming the initial allocation on a page is sequential,
724 // count highest number of bytes ever allocated on the page. 742 // count highest number of bytes ever allocated on the page.
725 AtomicValue<intptr_t> high_water_mark_; 743 AtomicValue<intptr_t> high_water_mark_;
726 744
727 base::Mutex* mutex_; 745 base::Mutex* mutex_;
728 base::AtomicWord parallel_sweeping_; 746 base::AtomicWord parallel_sweeping_;
747 AtomicValue<ParallelCompactingState> parallel_compaction_;
729 748
730 // PagedSpace free-list statistics. 749 // PagedSpace free-list statistics.
731 AtomicNumber<intptr_t> available_in_small_free_list_; 750 AtomicNumber<intptr_t> available_in_small_free_list_;
732 AtomicNumber<intptr_t> available_in_medium_free_list_; 751 AtomicNumber<intptr_t> available_in_medium_free_list_;
733 AtomicNumber<intptr_t> available_in_large_free_list_; 752 AtomicNumber<intptr_t> available_in_large_free_list_;
734 AtomicNumber<intptr_t> available_in_huge_free_list_; 753 AtomicNumber<intptr_t> available_in_huge_free_list_;
735 AtomicNumber<intptr_t> non_available_small_blocks_; 754 AtomicNumber<intptr_t> non_available_small_blocks_;
736 755
737 // next_chunk_ holds a pointer of type MemoryChunk 756 // next_chunk_ holds a pointer of type MemoryChunk
738 base::AtomicWord next_chunk_; 757 base::AtomicWord next_chunk_;
(...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after
979 // Allocates a chunk of memory from the large-object portion of 998 // Allocates a chunk of memory from the large-object portion of
980 // the code range. On platforms with no separate code range, should 999 // the code range. On platforms with no separate code range, should
981 // not be called. 1000 // not be called.
982 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, 1001 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
983 const size_t commit_size, 1002 const size_t commit_size,
984 size_t* allocated); 1003 size_t* allocated);
985 bool CommitRawMemory(Address start, size_t length); 1004 bool CommitRawMemory(Address start, size_t length);
986 bool UncommitRawMemory(Address start, size_t length); 1005 bool UncommitRawMemory(Address start, size_t length);
987 void FreeRawMemory(Address buf, size_t length); 1006 void FreeRawMemory(Address buf, size_t length);
988 1007
989 void ReserveEmergencyBlock();
990 void ReleaseEmergencyBlock();
991
992 private: 1008 private:
993 // Frees the range of virtual memory, and frees the data structures used to 1009 // Frees the range of virtual memory, and frees the data structures used to
994 // manage it. 1010 // manage it.
995 void TearDown(); 1011 void TearDown();
996 1012
997 Isolate* isolate_; 1013 Isolate* isolate_;
998 1014
999 // The reserved range of virtual memory that all code objects are put in. 1015 // The reserved range of virtual memory that all code objects are put in.
1000 base::VirtualMemory* code_range_; 1016 base::VirtualMemory* code_range_;
1001 // Plain old data class, just a struct plus a constructor. 1017 // Plain old data class, just a struct plus a constructor.
(...skipping 22 matching lines...) Expand all
1024 // Freed blocks of memory are added to the free list. When the allocation 1040 // Freed blocks of memory are added to the free list. When the allocation
1025 // list is exhausted, the free list is sorted and merged to make the new 1041 // list is exhausted, the free list is sorted and merged to make the new
1026 // allocation list. 1042 // allocation list.
1027 List<FreeBlock> free_list_; 1043 List<FreeBlock> free_list_;
1028 1044
1029 // Memory is allocated from the free blocks on the allocation list. 1045 // Memory is allocated from the free blocks on the allocation list.
1030 // The block at current_allocation_block_index_ is the current block. 1046 // The block at current_allocation_block_index_ is the current block.
1031 List<FreeBlock> allocation_list_; 1047 List<FreeBlock> allocation_list_;
1032 int current_allocation_block_index_; 1048 int current_allocation_block_index_;
1033 1049
1034 // Emergency block guarantees that we can always allocate a page for
1035 // evacuation candidates when code space is compacted. Emergency block is
1036 // reserved immediately after GC and is released immedietely before
1037 // allocating a page for evacuation.
1038 FreeBlock emergency_block_;
1039
1040 // Finds a block on the allocation list that contains at least the 1050 // Finds a block on the allocation list that contains at least the
1041 // requested amount of memory. If none is found, sorts and merges 1051 // requested amount of memory. If none is found, sorts and merges
1042 // the existing free memory blocks, and searches again. 1052 // the existing free memory blocks, and searches again.
1043 // If none can be found, returns false. 1053 // If none can be found, returns false.
1044 bool GetNextAllocationBlock(size_t requested); 1054 bool GetNextAllocationBlock(size_t requested);
1045 // Compares the start addresses of two free blocks. 1055 // Compares the start addresses of two free blocks.
1046 static int CompareFreeBlockAddress(const FreeBlock* left, 1056 static int CompareFreeBlockAddress(const FreeBlock* left,
1047 const FreeBlock* right); 1057 const FreeBlock* right);
1048 bool ReserveBlock(const size_t requested_size, FreeBlock* block); 1058 bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1049 void ReleaseBlock(const FreeBlock* block); 1059 void ReleaseBlock(const FreeBlock* block);
(...skipping 461 matching lines...) Expand 10 before | Expand all | Expand 10 after
1511 // Merge {other} into {this}. 1521 // Merge {other} into {this}.
1512 void Merge(const AllocationStats& other) { 1522 void Merge(const AllocationStats& other) {
1513 capacity_ += other.capacity_; 1523 capacity_ += other.capacity_;
1514 size_ += other.size_; 1524 size_ += other.size_;
1515 waste_ += other.waste_; 1525 waste_ += other.waste_;
1516 if (other.max_capacity_ > max_capacity_) { 1526 if (other.max_capacity_ > max_capacity_) {
1517 max_capacity_ = other.max_capacity_; 1527 max_capacity_ = other.max_capacity_;
1518 } 1528 }
1519 } 1529 }
1520 1530
1531 void DecreaseCapacity(intptr_t size_in_bytes) {
1532 capacity_ -= size_in_bytes;
1533 DCHECK_GE(capacity_, 0);
1534 }
1535
1536 void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
1537
1521 private: 1538 private:
1522 intptr_t capacity_; 1539 intptr_t capacity_;
1523 intptr_t max_capacity_; 1540 intptr_t max_capacity_;
1524 intptr_t size_; 1541 intptr_t size_;
1525 intptr_t waste_; 1542 intptr_t waste_;
1526 }; 1543 };
1527 1544
1528 1545
1529 // ----------------------------------------------------------------------------- 1546 // -----------------------------------------------------------------------------
1530 // Free lists for old object spaces 1547 // Free lists for old object spaces
1531 1548
1532 // The free list category holds a pointer to the top element and a pointer to 1549 // The free list category holds a pointer to the top element and a pointer to
1533 // the end element of the linked list of free memory blocks. 1550 // the end element of the linked list of free memory blocks.
1534 class FreeListCategory { 1551 class FreeListCategory {
1535 public: 1552 public:
1536 FreeListCategory() : top_(0), end_(NULL), available_(0) {} 1553 explicit FreeListCategory(FreeList* owner)
1554 : top_(0), end_(NULL), available_(0), owner_(owner) {}
1537 1555
1538 intptr_t Concatenate(FreeListCategory* category); 1556 intptr_t Concatenate(FreeListCategory* category);
1539 1557
1540 void Reset(); 1558 void Reset();
1541 1559
1542 void Free(FreeSpace* node, int size_in_bytes); 1560 void Free(FreeSpace* node, int size_in_bytes);
1543 1561
1544 FreeSpace* PickNodeFromList(int* node_size); 1562 FreeSpace* PickNodeFromList(int* node_size);
1545 FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size); 1563 FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
1546 1564
(...skipping 19 matching lines...) Expand all
1566 1584
1567 base::Mutex* mutex() { return &mutex_; } 1585 base::Mutex* mutex() { return &mutex_; }
1568 1586
1569 bool IsEmpty() { return top() == 0; } 1587 bool IsEmpty() { return top() == 0; }
1570 1588
1571 #ifdef DEBUG 1589 #ifdef DEBUG
1572 intptr_t SumFreeList(); 1590 intptr_t SumFreeList();
1573 int FreeListLength(); 1591 int FreeListLength();
1574 #endif 1592 #endif
1575 1593
1594 FreeList* owner() { return owner_; }
1595
1576 private: 1596 private:
1577 // top_ points to the top FreeSpace* in the free list category. 1597 // top_ points to the top FreeSpace* in the free list category.
1578 base::AtomicWord top_; 1598 base::AtomicWord top_;
1579 FreeSpace* end_; 1599 FreeSpace* end_;
1580 base::Mutex mutex_; 1600 base::Mutex mutex_;
1581 1601
1582 // Total available bytes in all blocks of this free list category. 1602 // Total available bytes in all blocks of this free list category.
1583 int available_; 1603 int available_;
1604
1605 FreeList* owner_;
1584 }; 1606 };
1585 1607
1586 1608
1587 // The free list for the old space. The free list is organized in such a way 1609 // The free list for the old space. The free list is organized in such a way
1588 // as to encourage objects allocated around the same time to be near each 1610 // as to encourage objects allocated around the same time to be near each
1589 // other. The normal way to allocate is intended to be by bumping a 'top' 1611 // other. The normal way to allocate is intended to be by bumping a 'top'
1590 // pointer until it hits a 'limit' pointer. When the limit is hit we need to 1612 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1591 // find a new space to allocate from. This is done with the free list, which 1613 // find a new space to allocate from. This is done with the free list, which
1592 // is divided up into rough categories to cut down on waste. Having finer 1614 // is divided up into rough categories to cut down on waste. Having finer
1593 // categories would scatter allocation more. 1615 // categories would scatter allocation more.
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
1666 void RepairLists(Heap* heap); 1688 void RepairLists(Heap* heap);
1667 1689
1668 intptr_t EvictFreeListItems(Page* p); 1690 intptr_t EvictFreeListItems(Page* p);
1669 bool ContainsPageFreeListItems(Page* p); 1691 bool ContainsPageFreeListItems(Page* p);
1670 1692
1671 FreeListCategory* small_list() { return &small_list_; } 1693 FreeListCategory* small_list() { return &small_list_; }
1672 FreeListCategory* medium_list() { return &medium_list_; } 1694 FreeListCategory* medium_list() { return &medium_list_; }
1673 FreeListCategory* large_list() { return &large_list_; } 1695 FreeListCategory* large_list() { return &large_list_; }
1674 FreeListCategory* huge_list() { return &huge_list_; } 1696 FreeListCategory* huge_list() { return &huge_list_; }
1675 1697
1698 PagedSpace* owner() { return owner_; }
1699
1676 private: 1700 private:
1677 // The size range of blocks, in bytes. 1701 // The size range of blocks, in bytes.
1678 static const int kMinBlockSize = 3 * kPointerSize; 1702 static const int kMinBlockSize = 3 * kPointerSize;
1679 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize; 1703 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1680 1704
1681 static const int kSmallListMin = 0x1f * kPointerSize; 1705 static const int kSmallListMin = 0x1f * kPointerSize;
1682 static const int kSmallListMax = 0xff * kPointerSize; 1706 static const int kSmallListMax = 0xff * kPointerSize;
1683 static const int kMediumListMax = 0x7ff * kPointerSize; 1707 static const int kMediumListMax = 0x7ff * kPointerSize;
1684 static const int kLargeListMax = 0x3fff * kPointerSize; 1708 static const int kLargeListMax = 0x3fff * kPointerSize;
1685 static const int kSmallAllocationMax = kSmallListMin; 1709 static const int kSmallAllocationMax = kSmallListMin;
(...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after
1962 void EvictEvacuationCandidatesFromFreeLists(); 1986 void EvictEvacuationCandidatesFromFreeLists();
1963 1987
1964 bool CanExpand(size_t size); 1988 bool CanExpand(size_t size);
1965 1989
1966 // Returns the number of total pages in this space. 1990 // Returns the number of total pages in this space.
1967 int CountTotalPages(); 1991 int CountTotalPages();
1968 1992
1969 // Return size of allocatable area on a page in this space. 1993 // Return size of allocatable area on a page in this space.
1970 inline int AreaSize() { return area_size_; } 1994 inline int AreaSize() { return area_size_; }
1971 1995
1972 void CreateEmergencyMemory();
1973 void FreeEmergencyMemory();
1974 void UseEmergencyMemory();
1975 intptr_t MaxEmergencyMemoryAllocated();
1976
1977 bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
1978
1979 // Merges {other} into the current space. Note that this modifies {other}, 1996 // Merges {other} into the current space. Note that this modifies {other},
1980 // e.g., removes its bump pointer area and resets statistics. 1997 // e.g., removes its bump pointer area and resets statistics.
1981 void MergeCompactionSpace(CompactionSpace* other); 1998 void MergeCompactionSpace(CompactionSpace* other);
1982 1999
2000 void MoveOverFreeMemory(PagedSpace* other);
2001
2002 virtual bool is_local() { return false; }
2003
1983 protected: 2004 protected:
1984 // PagedSpaces that should be included in snapshots have different, i.e., 2005 // PagedSpaces that should be included in snapshots have different, i.e.,
1985 // smaller, initial pages. 2006 // smaller, initial pages.
1986 virtual bool snapshotable() { return true; } 2007 virtual bool snapshotable() { return true; }
1987 2008
1988 FreeList* free_list() { return &free_list_; } 2009 FreeList* free_list() { return &free_list_; }
1989 2010
1990 bool HasPages() { return anchor_.next_page() != &anchor_; } 2011 bool HasPages() { return anchor_.next_page() != &anchor_; }
1991 2012
1992 // Cleans up the space, frees all pages in this space except those belonging 2013 // Cleans up the space, frees all pages in this space except those belonging
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
2033 2054
2034 // The number of free bytes which could be reclaimed by advancing the 2055 // The number of free bytes which could be reclaimed by advancing the
2035 // concurrent sweeper threads. 2056 // concurrent sweeper threads.
2036 intptr_t unswept_free_bytes_; 2057 intptr_t unswept_free_bytes_;
2037 2058
2038 // The sweeper threads iterate over the list of pointer and data space pages 2059 // The sweeper threads iterate over the list of pointer and data space pages
2039 // and sweep these pages concurrently. They will stop sweeping after the 2060 // and sweep these pages concurrently. They will stop sweeping after the
2040 // end_of_unswept_pages_ page. 2061 // end_of_unswept_pages_ page.
2041 Page* end_of_unswept_pages_; 2062 Page* end_of_unswept_pages_;
2042 2063
2043 // Emergency memory is the memory of a full page for a given space, allocated
2044 // conservatively before evacuating a page. If compaction fails due to out
2045 // of memory error the emergency memory can be used to complete compaction.
2046 // If not used, the emergency memory is released after compaction.
2047 MemoryChunk* emergency_memory_;
2048
2049 // Mutex guarding any concurrent access to the space. 2064 // Mutex guarding any concurrent access to the space.
2050 base::Mutex space_mutex_; 2065 base::Mutex space_mutex_;
2051 2066
2052 friend class MarkCompactCollector; 2067 friend class MarkCompactCollector;
2053 friend class PageIterator; 2068 friend class PageIterator;
2054 }; 2069 };
2055 2070
2056 2071
2057 class NumberAndSizeInfo BASE_EMBEDDED { 2072 class NumberAndSizeInfo BASE_EMBEDDED {
2058 public: 2073 public:
(...skipping 673 matching lines...) Expand 10 before | Expand all | Expand 10 after
2732 public: 2747 public:
2733 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) 2748 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2734 : PagedSpace(heap, id, executable) {} 2749 : PagedSpace(heap, id, executable) {}
2735 2750
2736 // Adds external memory starting at {start} of {size_in_bytes} to the space. 2751 // Adds external memory starting at {start} of {size_in_bytes} to the space.
2737 void AddExternalMemory(Address start, int size_in_bytes) { 2752 void AddExternalMemory(Address start, int size_in_bytes) {
2738 IncreaseCapacity(size_in_bytes); 2753 IncreaseCapacity(size_in_bytes);
2739 Free(start, size_in_bytes); 2754 Free(start, size_in_bytes);
2740 } 2755 }
2741 2756
2757 virtual bool is_local() { return true; }
2758
2742 protected: 2759 protected:
2743 // The space is temporary and not included in any snapshots. 2760 // The space is temporary and not included in any snapshots.
2744 virtual bool snapshotable() { return false; } 2761 virtual bool snapshotable() { return false; }
2745 }; 2762 };
2746 2763
2747 2764
2765 // A collection of |CompactionSpace|s used by a single compaction task.
2766 class CompactionSpaceCollection : public Malloced {
2767 public:
2768 explicit CompactionSpaceCollection(Heap* heap)
2769 : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2770 code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2771
2772 CompactionSpace* Get(AllocationSpace space) {
2773 switch (space) {
2774 case OLD_SPACE:
2775 return &old_space_;
2776 case CODE_SPACE:
2777 return &code_space_;
2778 default:
2779 UNREACHABLE();
2780 }
2781 UNREACHABLE();
2782 return nullptr;
2783 }
2784
2785 private:
2786 CompactionSpace old_space_;
2787 CompactionSpace code_space_;
2788 };
2789
2790
2748 // ----------------------------------------------------------------------------- 2791 // -----------------------------------------------------------------------------
2749 // Old object space (includes the old space of objects and code space) 2792 // Old object space (includes the old space of objects and code space)
2750 2793
2751 class OldSpace : public PagedSpace { 2794 class OldSpace : public PagedSpace {
2752 public: 2795 public:
2753 // Creates an old space object. The constructor does not allocate pages 2796 // Creates an old space object. The constructor does not allocate pages
2754 // from OS. 2797 // from OS.
2755 OldSpace(Heap* heap, AllocationSpace id, Executability executable) 2798 OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2756 : PagedSpace(heap, id, executable) {} 2799 : PagedSpace(heap, id, executable) {}
2757 }; 2800 };
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
2938 count = 0; 2981 count = 0;
2939 } 2982 }
2940 // Must be small, since an iteration is used for lookup. 2983 // Must be small, since an iteration is used for lookup.
2941 static const int kMaxComments = 64; 2984 static const int kMaxComments = 64;
2942 }; 2985 };
2943 #endif 2986 #endif
2944 } 2987 }
2945 } // namespace v8::internal 2988 } // namespace v8::internal
2946 2989
2947 #endif // V8_HEAP_SPACES_H_ 2990 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698