| OLD | NEW | 
|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ | 
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ | 
| 7 | 7 | 
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" | 
| 9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" | 
| 10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" | 
| (...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 261 | 261 | 
| 262 class SkipList; | 262 class SkipList; | 
| 263 class SlotsBuffer; | 263 class SlotsBuffer; | 
| 264 | 264 | 
| 265 // MemoryChunk represents a memory region owned by a specific space. | 265 // MemoryChunk represents a memory region owned by a specific space. | 
| 266 // It is divided into the header and the body. Chunk start is always | 266 // It is divided into the header and the body. Chunk start is always | 
| 267 // 1MB aligned. Start of the body is aligned so it can accommodate | 267 // 1MB aligned. Start of the body is aligned so it can accommodate | 
| 268 // any heap object. | 268 // any heap object. | 
| 269 class MemoryChunk { | 269 class MemoryChunk { | 
| 270  public: | 270  public: | 
|  | 271   // |kCompactionDone|: Initial compaction state of a |MemoryChunk|. | 
|  | 272   // |kCompactingInProgress|:  Parallel compaction is currently in progress. | 
|  | 273   // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to | 
|  | 274   //   be finalized. | 
|  | 275   // |kCompactingAborted|: Parallel compaction has been aborted, which should | 
|  | 276   //   for now only happen in OOM scenarios. | 
|  | 277   enum ParallelCompactingState { | 
|  | 278     kCompactingDone, | 
|  | 279     kCompactingInProgress, | 
|  | 280     kCompactingFinalize, | 
|  | 281     kCompactingAborted, | 
|  | 282   }; | 
|  | 283 | 
| 271   // Only works if the pointer is in the first kPageSize of the MemoryChunk. | 284   // Only works if the pointer is in the first kPageSize of the MemoryChunk. | 
| 272   static MemoryChunk* FromAddress(Address a) { | 285   static MemoryChunk* FromAddress(Address a) { | 
| 273     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 286     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 
| 274   } | 287   } | 
| 275   static const MemoryChunk* FromAddress(const byte* a) { | 288   static const MemoryChunk* FromAddress(const byte* a) { | 
| 276     return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & | 289     return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & | 
| 277                                                 ~kAlignmentMask); | 290                                                 ~kAlignmentMask); | 
| 278   } | 291   } | 
| 279 | 292 | 
| 280   // Only works for addresses in pointer spaces, not data or code spaces. | 293   // Only works for addresses in pointer spaces, not data or code spaces. | 
| (...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 451 | 464 | 
| 452   ParallelSweepingState parallel_sweeping() { | 465   ParallelSweepingState parallel_sweeping() { | 
| 453     return static_cast<ParallelSweepingState>( | 466     return static_cast<ParallelSweepingState>( | 
| 454         base::Acquire_Load(¶llel_sweeping_)); | 467         base::Acquire_Load(¶llel_sweeping_)); | 
| 455   } | 468   } | 
| 456 | 469 | 
| 457   void set_parallel_sweeping(ParallelSweepingState state) { | 470   void set_parallel_sweeping(ParallelSweepingState state) { | 
| 458     base::Release_Store(¶llel_sweeping_, state); | 471     base::Release_Store(¶llel_sweeping_, state); | 
| 459   } | 472   } | 
| 460 | 473 | 
|  | 474   AtomicValue<ParallelCompactingState>& parallel_compaction_state() { | 
|  | 475     return parallel_compaction_; | 
|  | 476   } | 
|  | 477 | 
| 461   bool TryLock() { return mutex_->TryLock(); } | 478   bool TryLock() { return mutex_->TryLock(); } | 
| 462 | 479 | 
| 463   base::Mutex* mutex() { return mutex_; } | 480   base::Mutex* mutex() { return mutex_; } | 
| 464 | 481 | 
| 465   // WaitUntilSweepingCompleted only works when concurrent sweeping is in | 482   // WaitUntilSweepingCompleted only works when concurrent sweeping is in | 
| 466   // progress. In particular, when we know that right before this call a | 483   // progress. In particular, when we know that right before this call a | 
| 467   // sweeper thread was sweeping this page. | 484   // sweeper thread was sweeping this page. | 
| 468   void WaitUntilSweepingCompleted() { | 485   void WaitUntilSweepingCompleted() { | 
| 469     mutex_->Lock(); | 486     mutex_->Lock(); | 
| 470     mutex_->Unlock(); | 487     mutex_->Unlock(); | 
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 559       kSlotsBufferOffset + kPointerSize  // SlotsBuffer* slots_buffer_; | 576       kSlotsBufferOffset + kPointerSize  // SlotsBuffer* slots_buffer_; | 
| 560       + kPointerSize;                    // SkipList* skip_list_; | 577       + kPointerSize;                    // SkipList* skip_list_; | 
| 561 | 578 | 
| 562   static const size_t kMinHeaderSize = | 579   static const size_t kMinHeaderSize = | 
| 563       kWriteBarrierCounterOffset + | 580       kWriteBarrierCounterOffset + | 
| 564       kIntptrSize         // intptr_t write_barrier_counter_ | 581       kIntptrSize         // intptr_t write_barrier_counter_ | 
| 565       + kIntSize          // int progress_bar_ | 582       + kIntSize          // int progress_bar_ | 
| 566       + kPointerSize      // AtomicValue high_water_mark_ | 583       + kPointerSize      // AtomicValue high_water_mark_ | 
| 567       + kPointerSize      // base::Mutex* mutex_ | 584       + kPointerSize      // base::Mutex* mutex_ | 
| 568       + kPointerSize      // base::AtomicWord parallel_sweeping_ | 585       + kPointerSize      // base::AtomicWord parallel_sweeping_ | 
|  | 586       + kPointerSize      // AtomicValue parallel_compaction_ | 
| 569       + 5 * kPointerSize  // AtomicNumber free-list statistics | 587       + 5 * kPointerSize  // AtomicNumber free-list statistics | 
| 570       + kPointerSize      // base::AtomicWord next_chunk_ | 588       + kPointerSize      // base::AtomicWord next_chunk_ | 
| 571       + kPointerSize;     // base::AtomicWord prev_chunk_ | 589       + kPointerSize;     // base::AtomicWord prev_chunk_ | 
| 572 | 590 | 
| 573   // We add some more space to the computed header size to amount for missing | 591   // We add some more space to the computed header size to amount for missing | 
| 574   // alignment requirements in our computation. | 592   // alignment requirements in our computation. | 
| 575   // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | 593   // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | 
| 576   static const size_t kHeaderSize = kMinHeaderSize + kIntSize; | 594   static const size_t kHeaderSize = kMinHeaderSize + kIntSize; | 
| 577 | 595 | 
| 578   static const int kBodyOffset = | 596   static const int kBodyOffset = | 
| (...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 719   intptr_t write_barrier_counter_; | 737   intptr_t write_barrier_counter_; | 
| 720   // Used by the incremental marker to keep track of the scanning progress in | 738   // Used by the incremental marker to keep track of the scanning progress in | 
| 721   // large objects that have a progress bar and are scanned in increments. | 739   // large objects that have a progress bar and are scanned in increments. | 
| 722   int progress_bar_; | 740   int progress_bar_; | 
| 723   // Assuming the initial allocation on a page is sequential, | 741   // Assuming the initial allocation on a page is sequential, | 
| 724   // count highest number of bytes ever allocated on the page. | 742   // count highest number of bytes ever allocated on the page. | 
| 725   AtomicValue<intptr_t> high_water_mark_; | 743   AtomicValue<intptr_t> high_water_mark_; | 
| 726 | 744 | 
| 727   base::Mutex* mutex_; | 745   base::Mutex* mutex_; | 
| 728   base::AtomicWord parallel_sweeping_; | 746   base::AtomicWord parallel_sweeping_; | 
|  | 747   AtomicValue<ParallelCompactingState> parallel_compaction_; | 
| 729 | 748 | 
| 730   // PagedSpace free-list statistics. | 749   // PagedSpace free-list statistics. | 
| 731   AtomicNumber<intptr_t> available_in_small_free_list_; | 750   AtomicNumber<intptr_t> available_in_small_free_list_; | 
| 732   AtomicNumber<intptr_t> available_in_medium_free_list_; | 751   AtomicNumber<intptr_t> available_in_medium_free_list_; | 
| 733   AtomicNumber<intptr_t> available_in_large_free_list_; | 752   AtomicNumber<intptr_t> available_in_large_free_list_; | 
| 734   AtomicNumber<intptr_t> available_in_huge_free_list_; | 753   AtomicNumber<intptr_t> available_in_huge_free_list_; | 
| 735   AtomicNumber<intptr_t> non_available_small_blocks_; | 754   AtomicNumber<intptr_t> non_available_small_blocks_; | 
| 736 | 755 | 
| 737   // next_chunk_ holds a pointer of type MemoryChunk | 756   // next_chunk_ holds a pointer of type MemoryChunk | 
| 738   base::AtomicWord next_chunk_; | 757   base::AtomicWord next_chunk_; | 
| (...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 979   // Allocates a chunk of memory from the large-object portion of | 998   // Allocates a chunk of memory from the large-object portion of | 
| 980   // the code range.  On platforms with no separate code range, should | 999   // the code range.  On platforms with no separate code range, should | 
| 981   // not be called. | 1000   // not be called. | 
| 982   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, | 1001   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, | 
| 983                                             const size_t commit_size, | 1002                                             const size_t commit_size, | 
| 984                                             size_t* allocated); | 1003                                             size_t* allocated); | 
| 985   bool CommitRawMemory(Address start, size_t length); | 1004   bool CommitRawMemory(Address start, size_t length); | 
| 986   bool UncommitRawMemory(Address start, size_t length); | 1005   bool UncommitRawMemory(Address start, size_t length); | 
| 987   void FreeRawMemory(Address buf, size_t length); | 1006   void FreeRawMemory(Address buf, size_t length); | 
| 988 | 1007 | 
| 989   void ReserveEmergencyBlock(); |  | 
| 990   void ReleaseEmergencyBlock(); |  | 
| 991 |  | 
| 992  private: | 1008  private: | 
| 993   // Frees the range of virtual memory, and frees the data structures used to | 1009   // Frees the range of virtual memory, and frees the data structures used to | 
| 994   // manage it. | 1010   // manage it. | 
| 995   void TearDown(); | 1011   void TearDown(); | 
| 996 | 1012 | 
| 997   Isolate* isolate_; | 1013   Isolate* isolate_; | 
| 998 | 1014 | 
| 999   // The reserved range of virtual memory that all code objects are put in. | 1015   // The reserved range of virtual memory that all code objects are put in. | 
| 1000   base::VirtualMemory* code_range_; | 1016   base::VirtualMemory* code_range_; | 
| 1001   // Plain old data class, just a struct plus a constructor. | 1017   // Plain old data class, just a struct plus a constructor. | 
| (...skipping 22 matching lines...) Expand all  Loading... | 
| 1024   // Freed blocks of memory are added to the free list.  When the allocation | 1040   // Freed blocks of memory are added to the free list.  When the allocation | 
| 1025   // list is exhausted, the free list is sorted and merged to make the new | 1041   // list is exhausted, the free list is sorted and merged to make the new | 
| 1026   // allocation list. | 1042   // allocation list. | 
| 1027   List<FreeBlock> free_list_; | 1043   List<FreeBlock> free_list_; | 
| 1028 | 1044 | 
| 1029   // Memory is allocated from the free blocks on the allocation list. | 1045   // Memory is allocated from the free blocks on the allocation list. | 
| 1030   // The block at current_allocation_block_index_ is the current block. | 1046   // The block at current_allocation_block_index_ is the current block. | 
| 1031   List<FreeBlock> allocation_list_; | 1047   List<FreeBlock> allocation_list_; | 
| 1032   int current_allocation_block_index_; | 1048   int current_allocation_block_index_; | 
| 1033 | 1049 | 
| 1034   // Emergency block guarantees that we can always allocate a page for |  | 
| 1035   // evacuation candidates when code space is compacted. Emergency block is |  | 
| 1036   // reserved immediately after GC and is released immedietely before |  | 
| 1037   // allocating a page for evacuation. |  | 
| 1038   FreeBlock emergency_block_; |  | 
| 1039 |  | 
| 1040   // Finds a block on the allocation list that contains at least the | 1050   // Finds a block on the allocation list that contains at least the | 
| 1041   // requested amount of memory.  If none is found, sorts and merges | 1051   // requested amount of memory.  If none is found, sorts and merges | 
| 1042   // the existing free memory blocks, and searches again. | 1052   // the existing free memory blocks, and searches again. | 
| 1043   // If none can be found, returns false. | 1053   // If none can be found, returns false. | 
| 1044   bool GetNextAllocationBlock(size_t requested); | 1054   bool GetNextAllocationBlock(size_t requested); | 
| 1045   // Compares the start addresses of two free blocks. | 1055   // Compares the start addresses of two free blocks. | 
| 1046   static int CompareFreeBlockAddress(const FreeBlock* left, | 1056   static int CompareFreeBlockAddress(const FreeBlock* left, | 
| 1047                                      const FreeBlock* right); | 1057                                      const FreeBlock* right); | 
| 1048   bool ReserveBlock(const size_t requested_size, FreeBlock* block); | 1058   bool ReserveBlock(const size_t requested_size, FreeBlock* block); | 
| 1049   void ReleaseBlock(const FreeBlock* block); | 1059   void ReleaseBlock(const FreeBlock* block); | 
| (...skipping 478 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1528 | 1538 | 
| 1529 // ----------------------------------------------------------------------------- | 1539 // ----------------------------------------------------------------------------- | 
| 1530 // Free lists for old object spaces | 1540 // Free lists for old object spaces | 
| 1531 | 1541 | 
| 1532 // The free list category holds a pointer to the top element and a pointer to | 1542 // The free list category holds a pointer to the top element and a pointer to | 
| 1533 // the end element of the linked list of free memory blocks. | 1543 // the end element of the linked list of free memory blocks. | 
| 1534 class FreeListCategory { | 1544 class FreeListCategory { | 
| 1535  public: | 1545  public: | 
| 1536   FreeListCategory() : top_(0), end_(NULL), available_(0) {} | 1546   FreeListCategory() : top_(0), end_(NULL), available_(0) {} | 
| 1537 | 1547 | 
| 1538   intptr_t Concatenate(FreeListCategory* category); | 1548   intptr_t Concatenate(FreeListCategory* category, | 
|  | 1549                        bool category_is_local = false); | 
| 1539 | 1550 | 
| 1540   void Reset(); | 1551   void Reset(); | 
| 1541 | 1552 | 
| 1542   void Free(FreeSpace* node, int size_in_bytes); | 1553   void Free(FreeSpace* node, int size_in_bytes); | 
| 1543 | 1554 | 
| 1544   FreeSpace* PickNodeFromList(int* node_size); | 1555   FreeSpace* PickNodeFromList(int* node_size); | 
| 1545   FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size); | 1556   FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size); | 
| 1546 | 1557 | 
| 1547   intptr_t EvictFreeListItemsInList(Page* p); | 1558   intptr_t EvictFreeListItemsInList(Page* p); | 
| 1548   bool ContainsPageFreeListItemsInList(Page* p); | 1559   bool ContainsPageFreeListItemsInList(Page* p); | 
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1604 //     spaces are called medium. | 1615 //     spaces are called medium. | 
| 1605 // 1048-16383 words: There is a list of spaces this large.  It is used for top | 1616 // 1048-16383 words: There is a list of spaces this large.  It is used for top | 
| 1606 //     and limit when the object we need to allocate is 256-2047 words in size. | 1617 //     and limit when the object we need to allocate is 256-2047 words in size. | 
| 1607 //     These spaces are call large. | 1618 //     These spaces are call large. | 
| 1608 // At least 16384 words.  This list is for objects of 2048 words or larger. | 1619 // At least 16384 words.  This list is for objects of 2048 words or larger. | 
| 1609 //     Empty pages are added to this list.  These spaces are called huge. | 1620 //     Empty pages are added to this list.  These spaces are called huge. | 
| 1610 class FreeList { | 1621 class FreeList { | 
| 1611  public: | 1622  public: | 
| 1612   explicit FreeList(PagedSpace* owner); | 1623   explicit FreeList(PagedSpace* owner); | 
| 1613 | 1624 | 
| 1614   intptr_t Concatenate(FreeList* free_list); | 1625   intptr_t Concatenate(FreeList* free_list, bool free_list_is_local = false); | 
| 1615 | 1626 | 
| 1616   // Clear the free list. | 1627   // Clear the free list. | 
| 1617   void Reset(); | 1628   void Reset(); | 
| 1618 | 1629 | 
| 1619   // Return the number of bytes available on the free list. | 1630   // Return the number of bytes available on the free list. | 
| 1620   intptr_t available() { | 1631   intptr_t available() { | 
| 1621     return small_list_.available() + medium_list_.available() + | 1632     return small_list_.available() + medium_list_.available() + | 
| 1622            large_list_.available() + huge_list_.available(); | 1633            large_list_.available() + huge_list_.available(); | 
| 1623   } | 1634   } | 
| 1624 | 1635 | 
| (...skipping 337 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1962   void EvictEvacuationCandidatesFromFreeLists(); | 1973   void EvictEvacuationCandidatesFromFreeLists(); | 
| 1963 | 1974 | 
| 1964   bool CanExpand(size_t size); | 1975   bool CanExpand(size_t size); | 
| 1965 | 1976 | 
| 1966   // Returns the number of total pages in this space. | 1977   // Returns the number of total pages in this space. | 
| 1967   int CountTotalPages(); | 1978   int CountTotalPages(); | 
| 1968 | 1979 | 
| 1969   // Return size of allocatable area on a page in this space. | 1980   // Return size of allocatable area on a page in this space. | 
| 1970   inline int AreaSize() { return area_size_; } | 1981   inline int AreaSize() { return area_size_; } | 
| 1971 | 1982 | 
| 1972   void CreateEmergencyMemory(); |  | 
| 1973   void FreeEmergencyMemory(); |  | 
| 1974   void UseEmergencyMemory(); |  | 
| 1975   intptr_t MaxEmergencyMemoryAllocated(); |  | 
| 1976 |  | 
| 1977   bool HasEmergencyMemory() { return emergency_memory_ != NULL; } |  | 
| 1978 |  | 
| 1979   // Merges {other} into the current space. Note that this modifies {other}, | 1983   // Merges {other} into the current space. Note that this modifies {other}, | 
| 1980   // e.g., removes its bump pointer area and resets statistics. | 1984   // e.g., removes its bump pointer area and resets statistics. | 
| 1981   void MergeCompactionSpace(CompactionSpace* other); | 1985   void MergeCompactionSpace(CompactionSpace* other); | 
| 1982 | 1986 | 
|  | 1987   void MoveOverFreeMemory(PagedSpace* other, bool other_is_local = false); | 
|  | 1988 | 
| 1983  protected: | 1989  protected: | 
| 1984   // PagedSpaces that should be included in snapshots have different, i.e., | 1990   // PagedSpaces that should be included in snapshots have different, i.e., | 
| 1985   // smaller, initial pages. | 1991   // smaller, initial pages. | 
| 1986   virtual bool snapshotable() { return true; } | 1992   virtual bool snapshotable() { return true; } | 
| 1987 | 1993 | 
| 1988   FreeList* free_list() { return &free_list_; } | 1994   FreeList* free_list() { return &free_list_; } | 
| 1989 | 1995 | 
| 1990   bool HasPages() { return anchor_.next_page() != &anchor_; } | 1996   bool HasPages() { return anchor_.next_page() != &anchor_; } | 
| 1991 | 1997 | 
| 1992   // Cleans up the space, frees all pages in this space except those belonging | 1998   // Cleans up the space, frees all pages in this space except those belonging | 
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2033 | 2039 | 
| 2034   // The number of free bytes which could be reclaimed by advancing the | 2040   // The number of free bytes which could be reclaimed by advancing the | 
| 2035   // concurrent sweeper threads. | 2041   // concurrent sweeper threads. | 
| 2036   intptr_t unswept_free_bytes_; | 2042   intptr_t unswept_free_bytes_; | 
| 2037 | 2043 | 
| 2038   // The sweeper threads iterate over the list of pointer and data space pages | 2044   // The sweeper threads iterate over the list of pointer and data space pages | 
| 2039   // and sweep these pages concurrently. They will stop sweeping after the | 2045   // and sweep these pages concurrently. They will stop sweeping after the | 
| 2040   // end_of_unswept_pages_ page. | 2046   // end_of_unswept_pages_ page. | 
| 2041   Page* end_of_unswept_pages_; | 2047   Page* end_of_unswept_pages_; | 
| 2042 | 2048 | 
| 2043   // Emergency memory is the memory of a full page for a given space, allocated |  | 
| 2044   // conservatively before evacuating a page. If compaction fails due to out |  | 
| 2045   // of memory error the emergency memory can be used to complete compaction. |  | 
| 2046   // If not used, the emergency memory is released after compaction. |  | 
| 2047   MemoryChunk* emergency_memory_; |  | 
| 2048 |  | 
| 2049   // Mutex guarding any concurrent access to the space. | 2049   // Mutex guarding any concurrent access to the space. | 
| 2050   base::Mutex space_mutex_; | 2050   base::Mutex space_mutex_; | 
| 2051 | 2051 | 
| 2052   friend class MarkCompactCollector; | 2052   friend class MarkCompactCollector; | 
| 2053   friend class PageIterator; | 2053   friend class PageIterator; | 
| 2054 }; | 2054 }; | 
| 2055 | 2055 | 
| 2056 | 2056 | 
| 2057 class NumberAndSizeInfo BASE_EMBEDDED { | 2057 class NumberAndSizeInfo BASE_EMBEDDED { | 
| 2058  public: | 2058  public: | 
| (...skipping 679 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2738     IncreaseCapacity(size_in_bytes); | 2738     IncreaseCapacity(size_in_bytes); | 
| 2739     Free(start, size_in_bytes); | 2739     Free(start, size_in_bytes); | 
| 2740   } | 2740   } | 
| 2741 | 2741 | 
| 2742  protected: | 2742  protected: | 
| 2743   // The space is temporary and not included in any snapshots. | 2743   // The space is temporary and not included in any snapshots. | 
| 2744   virtual bool snapshotable() { return false; } | 2744   virtual bool snapshotable() { return false; } | 
| 2745 }; | 2745 }; | 
| 2746 | 2746 | 
| 2747 | 2747 | 
|  | 2748 // A collection of |CompactionSpace|s used by a single compaction task. | 
|  | 2749 class CompactionSpaceCollection : public Malloced { | 
|  | 2750  public: | 
|  | 2751   explicit CompactionSpaceCollection(Heap* heap) | 
|  | 2752       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE), | 
|  | 2753         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {} | 
|  | 2754 | 
|  | 2755   CompactionSpace* Get(AllocationSpace space) { | 
|  | 2756     switch (space) { | 
|  | 2757       case OLD_SPACE: | 
|  | 2758         return &old_space_; | 
|  | 2759       case CODE_SPACE: | 
|  | 2760         return &code_space_; | 
|  | 2761       default: | 
|  | 2762         UNREACHABLE(); | 
|  | 2763     } | 
|  | 2764     UNREACHABLE(); | 
|  | 2765     return nullptr; | 
|  | 2766   } | 
|  | 2767 | 
|  | 2768  private: | 
|  | 2769   CompactionSpace old_space_; | 
|  | 2770   CompactionSpace code_space_; | 
|  | 2771 }; | 
|  | 2772 | 
|  | 2773 | 
| 2748 // ----------------------------------------------------------------------------- | 2774 // ----------------------------------------------------------------------------- | 
| 2749 // Old object space (includes the old space of objects and code space) | 2775 // Old object space (includes the old space of objects and code space) | 
| 2750 | 2776 | 
| 2751 class OldSpace : public PagedSpace { | 2777 class OldSpace : public PagedSpace { | 
| 2752  public: | 2778  public: | 
| 2753   // Creates an old space object. The constructor does not allocate pages | 2779   // Creates an old space object. The constructor does not allocate pages | 
| 2754   // from OS. | 2780   // from OS. | 
| 2755   OldSpace(Heap* heap, AllocationSpace id, Executability executable) | 2781   OldSpace(Heap* heap, AllocationSpace id, Executability executable) | 
| 2756       : PagedSpace(heap, id, executable) {} | 2782       : PagedSpace(heap, id, executable) {} | 
| 2757 }; | 2783 }; | 
| (...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2938     count = 0; | 2964     count = 0; | 
| 2939   } | 2965   } | 
| 2940   // Must be small, since an iteration is used for lookup. | 2966   // Must be small, since an iteration is used for lookup. | 
| 2941   static const int kMaxComments = 64; | 2967   static const int kMaxComments = 64; | 
| 2942 }; | 2968 }; | 
| 2943 #endif | 2969 #endif | 
| 2944 } | 2970 } | 
| 2945 }  // namespace v8::internal | 2971 }  // namespace v8::internal | 
| 2946 | 2972 | 
| 2947 #endif  // V8_HEAP_SPACES_H_ | 2973 #endif  // V8_HEAP_SPACES_H_ | 
| OLD | NEW | 
|---|