| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
| 10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 84 | 84 |
| 85 #define DCHECK_OBJECT_SIZE(size) \ | 85 #define DCHECK_OBJECT_SIZE(size) \ |
| 86 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) | 86 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) |
| 87 | 87 |
| 88 #define DCHECK_PAGE_OFFSET(offset) \ | 88 #define DCHECK_PAGE_OFFSET(offset) \ |
| 89 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize)) | 89 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize)) |
| 90 | 90 |
| 91 #define DCHECK_MAP_PAGE_INDEX(index) \ | 91 #define DCHECK_MAP_PAGE_INDEX(index) \ |
| 92 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) | 92 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) |
| 93 | 93 |
| 94 | 94 class AllocationInfo; |
| 95 class CompactionSpace; |
| 96 class FreeList; |
| 97 class MemoryAllocator; |
| 98 class MemoryChunk; |
| 95 class PagedSpace; | 99 class PagedSpace; |
| 96 class MemoryAllocator; | |
| 97 class AllocationInfo; | |
| 98 class Space; | 100 class Space; |
| 99 class FreeList; | |
| 100 class MemoryChunk; | |
| 101 | 101 |
| 102 class MarkBit { | 102 class MarkBit { |
| 103 public: | 103 public: |
| 104 typedef uint32_t CellType; | 104 typedef uint32_t CellType; |
| 105 | 105 |
| 106 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {} | 106 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {} |
| 107 | 107 |
| 108 #ifdef DEBUG | 108 #ifdef DEBUG |
| 109 bool operator==(const MarkBit& other) { | 109 bool operator==(const MarkBit& other) { |
| 110 return cell_ == other.cell_ && mask_ == other.mask_; | 110 return cell_ == other.cell_ && mask_ == other.mask_; |
| (...skipping 1348 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1459 size_ -= size_in_bytes; | 1459 size_ -= size_in_bytes; |
| 1460 DCHECK(size_ >= 0); | 1460 DCHECK(size_ >= 0); |
| 1461 } | 1461 } |
| 1462 | 1462 |
| 1463 // Waste free bytes (available -> waste). | 1463 // Waste free bytes (available -> waste). |
| 1464 void WasteBytes(int size_in_bytes) { | 1464 void WasteBytes(int size_in_bytes) { |
| 1465 DCHECK(size_in_bytes >= 0); | 1465 DCHECK(size_in_bytes >= 0); |
| 1466 waste_ += size_in_bytes; | 1466 waste_ += size_in_bytes; |
| 1467 } | 1467 } |
| 1468 | 1468 |
| 1469 // Merge {other} into {this}. |
| 1470 void Merge(const AllocationStats& other) { |
| 1471 capacity_ += other.capacity_; |
| 1472 size_ += other.size_; |
| 1473 waste_ += other.waste_; |
| 1474 if (other.max_capacity_ > max_capacity_) { |
| 1475 max_capacity_ = other.max_capacity_; |
| 1476 } |
| 1477 } |
| 1478 |
| 1469 private: | 1479 private: |
| 1470 intptr_t capacity_; | 1480 intptr_t capacity_; |
| 1471 intptr_t max_capacity_; | 1481 intptr_t max_capacity_; |
| 1472 intptr_t size_; | 1482 intptr_t size_; |
| 1473 intptr_t waste_; | 1483 intptr_t waste_; |
| 1474 }; | 1484 }; |
| 1475 | 1485 |
| 1476 | 1486 |
| 1477 // ----------------------------------------------------------------------------- | 1487 // ----------------------------------------------------------------------------- |
| 1478 // Free lists for old object spaces | 1488 // Free lists for old object spaces |
| (...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1689 | 1699 |
| 1690 | 1700 |
| 1691 STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize); | 1701 STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize); |
| 1692 | 1702 |
| 1693 | 1703 |
| 1694 class PagedSpace : public Space { | 1704 class PagedSpace : public Space { |
| 1695 public: | 1705 public: |
| 1696 // Creates a space with an id. | 1706 // Creates a space with an id. |
| 1697 PagedSpace(Heap* heap, AllocationSpace id, Executability executable); | 1707 PagedSpace(Heap* heap, AllocationSpace id, Executability executable); |
| 1698 | 1708 |
| 1699 virtual ~PagedSpace() {} | 1709 virtual ~PagedSpace() { TearDown(); } |
| 1700 | 1710 |
| 1701 // Set up the space using the given address range of virtual memory (from | 1711 // Set up the space using the given address range of virtual memory (from |
| 1702 // the memory allocator's initial chunk) if possible. If the block of | 1712 // the memory allocator's initial chunk) if possible. If the block of |
| 1703 // addresses is not big enough to contain a single page-aligned page, a | 1713 // addresses is not big enough to contain a single page-aligned page, a |
| 1704 // fresh chunk will be allocated. | 1714 // fresh chunk will be allocated. |
| 1705 bool SetUp(); | 1715 bool SetUp(); |
| 1706 | 1716 |
| 1707 // Returns true if the space has been successfully set up and not | 1717 // Returns true if the space has been successfully set up and not |
| 1708 // subsequently torn down. | 1718 // subsequently torn down. |
| 1709 bool HasBeenSetUp(); | 1719 bool HasBeenSetUp(); |
| 1710 | 1720 |
| 1711 // Cleans up the space, frees all pages in this space except those belonging | |
| 1712 // to the initial chunk, uncommits addresses in the initial chunk. | |
| 1713 void TearDown(); | |
| 1714 | |
| 1715 // Checks whether an object/address is in this space. | 1721 // Checks whether an object/address is in this space. |
| 1716 inline bool Contains(Address a); | 1722 inline bool Contains(Address a); |
| 1717 inline bool Contains(HeapObject* o); | 1723 inline bool Contains(HeapObject* o); |
| 1718 // Unlike Contains() methods it is safe to call this one even for addresses | 1724 // Unlike Contains() methods it is safe to call this one even for addresses |
| 1719 // of unmapped memory. | 1725 // of unmapped memory. |
| 1720 bool ContainsSafe(Address addr); | 1726 bool ContainsSafe(Address addr); |
| 1721 | 1727 |
| 1722 // Given an address occupied by a live object, return that object if it is | 1728 // Given an address occupied by a live object, return that object if it is |
| 1723 // in this space, or a Smi if it is not. The implementation iterates over | 1729 // in this space, or a Smi if it is not. The implementation iterates over |
| 1724 // objects in the page containing the address, the cost is linear in the | 1730 // objects in the page containing the address, the cost is linear in the |
| (...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1919 // Return size of allocatable area on a page in this space. | 1925 // Return size of allocatable area on a page in this space. |
| 1920 inline int AreaSize() { return area_size_; } | 1926 inline int AreaSize() { return area_size_; } |
| 1921 | 1927 |
| 1922 void CreateEmergencyMemory(); | 1928 void CreateEmergencyMemory(); |
| 1923 void FreeEmergencyMemory(); | 1929 void FreeEmergencyMemory(); |
| 1924 void UseEmergencyMemory(); | 1930 void UseEmergencyMemory(); |
| 1925 intptr_t MaxEmergencyMemoryAllocated(); | 1931 intptr_t MaxEmergencyMemoryAllocated(); |
| 1926 | 1932 |
| 1927 bool HasEmergencyMemory() { return emergency_memory_ != NULL; } | 1933 bool HasEmergencyMemory() { return emergency_memory_ != NULL; } |
| 1928 | 1934 |
| 1935 // Merges {other} into the current space. Note that this modifies {other}, |
| 1936 // e.g., removes its bump pointer area and resets statistics. |
| 1937 void MergeCompactionSpace(CompactionSpace* other); |
| 1938 |
| 1929 protected: | 1939 protected: |
| 1940 // PagedSpaces that should be included in snapshots have different, i.e., |
| 1941 // smaller, initial pages. |
| 1942 virtual bool snapshotable() { return true; } |
| 1943 |
| 1930 FreeList* free_list() { return &free_list_; } | 1944 FreeList* free_list() { return &free_list_; } |
| 1931 | 1945 |
| 1946 bool HasPages() { return anchor_.next_page() != &anchor_; } |
| 1947 |
| 1948 // Cleans up the space, frees all pages in this space except those belonging |
| 1949 // to the initial chunk, uncommits addresses in the initial chunk. |
| 1950 void TearDown(); |
| 1951 |
| 1952 // Expands the space by allocating a fixed number of pages. Returns false if |
| 1953 // it cannot allocate requested number of pages from OS, or if the hard heap |
| 1954 // size limit has been hit. |
| 1955 bool Expand(); |
| 1956 |
| 1957 // Generic fast case allocation function that tries linear allocation at the |
| 1958 // address denoted by top in allocation_info_. |
| 1959 inline HeapObject* AllocateLinearly(int size_in_bytes); |
| 1960 |
| 1961 // Generic fast case allocation function that tries aligned linear allocation |
| 1962 // at the address denoted by top in allocation_info_. Writes the aligned |
| 1963 // allocation size, which includes the filler size, to size_in_bytes. |
| 1964 inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes, |
| 1965 AllocationAlignment alignment); |
| 1966 |
| 1967 // If sweeping is still in progress try to sweep unswept pages. If that is |
| 1968 // not successful, wait for the sweeper threads and re-try free-list |
| 1969 // allocation. |
| 1970 MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation( |
| 1971 int size_in_bytes); |
| 1972 |
| 1973 // Slow path of AllocateRaw. This function is space-dependent. |
| 1974 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); |
| 1975 |
| 1932 int area_size_; | 1976 int area_size_; |
| 1933 | 1977 |
| 1934 // Accounting information for this space. | 1978 // Accounting information for this space. |
| 1935 AllocationStats accounting_stats_; | 1979 AllocationStats accounting_stats_; |
| 1936 | 1980 |
| 1937 // The dummy page that anchors the double linked list of pages. | 1981 // The dummy page that anchors the double linked list of pages. |
| 1938 Page anchor_; | 1982 Page anchor_; |
| 1939 | 1983 |
| 1940 // The space's free list. | 1984 // The space's free list. |
| 1941 FreeList free_list_; | 1985 FreeList free_list_; |
| 1942 | 1986 |
| 1943 // Normal allocation information. | 1987 // Normal allocation information. |
| 1944 AllocationInfo allocation_info_; | 1988 AllocationInfo allocation_info_; |
| 1945 | 1989 |
| 1946 // The number of free bytes which could be reclaimed by advancing the | 1990 // The number of free bytes which could be reclaimed by advancing the |
| 1947 // concurrent sweeper threads. | 1991 // concurrent sweeper threads. |
| 1948 intptr_t unswept_free_bytes_; | 1992 intptr_t unswept_free_bytes_; |
| 1949 | 1993 |
| 1950 // The sweeper threads iterate over the list of pointer and data space pages | 1994 // The sweeper threads iterate over the list of pointer and data space pages |
| 1951 // and sweep these pages concurrently. They will stop sweeping after the | 1995 // and sweep these pages concurrently. They will stop sweeping after the |
| 1952 // end_of_unswept_pages_ page. | 1996 // end_of_unswept_pages_ page. |
| 1953 Page* end_of_unswept_pages_; | 1997 Page* end_of_unswept_pages_; |
| 1954 | 1998 |
| 1955 // Emergency memory is the memory of a full page for a given space, allocated | 1999 // Emergency memory is the memory of a full page for a given space, allocated |
| 1956 // conservatively before evacuating a page. If compaction fails due to out | 2000 // conservatively before evacuating a page. If compaction fails due to out |
| 1957 // of memory error the emergency memory can be used to complete compaction. | 2001 // of memory error the emergency memory can be used to complete compaction. |
| 1958 // If not used, the emergency memory is released after compaction. | 2002 // If not used, the emergency memory is released after compaction. |
| 1959 MemoryChunk* emergency_memory_; | 2003 MemoryChunk* emergency_memory_; |
| 1960 | 2004 |
| 1961 // Expands the space by allocating a fixed number of pages. Returns false if | |
| 1962 // it cannot allocate requested number of pages from OS, or if the hard heap | |
| 1963 // size limit has been hit. | |
| 1964 bool Expand(); | |
| 1965 | |
| 1966 // Generic fast case allocation function that tries linear allocation at the | |
| 1967 // address denoted by top in allocation_info_. | |
| 1968 inline HeapObject* AllocateLinearly(int size_in_bytes); | |
| 1969 | |
| 1970 // Generic fast case allocation function that tries aligned linear allocation | |
| 1971 // at the address denoted by top in allocation_info_. Writes the aligned | |
| 1972 // allocation size, which includes the filler size, to size_in_bytes. | |
| 1973 inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes, | |
| 1974 AllocationAlignment alignment); | |
| 1975 | |
| 1976 // If sweeping is still in progress try to sweep unswept pages. If that is | |
| 1977 // not successful, wait for the sweeper threads and re-try free-list | |
| 1978 // allocation. | |
| 1979 MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation( | |
| 1980 int size_in_bytes); | |
| 1981 | |
| 1982 // Slow path of AllocateRaw. This function is space-dependent. | |
| 1983 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | |
| 1984 | |
| 1985 friend class PageIterator; | 2005 friend class PageIterator; |
| 1986 friend class MarkCompactCollector; | 2006 friend class MarkCompactCollector; |
| 1987 }; | 2007 }; |
| 1988 | 2008 |
| 1989 | 2009 |
| 1990 class NumberAndSizeInfo BASE_EMBEDDED { | 2010 class NumberAndSizeInfo BASE_EMBEDDED { |
| 1991 public: | 2011 public: |
| 1992 NumberAndSizeInfo() : number_(0), bytes_(0) {} | 2012 NumberAndSizeInfo() : number_(0), bytes_(0) {} |
| 1993 | 2013 |
| 1994 int number() const { return number_; } | 2014 int number() const { return number_; } |
| (...skipping 655 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2650 Address top_on_previous_step_; | 2670 Address top_on_previous_step_; |
| 2651 | 2671 |
| 2652 HistogramInfo* allocated_histogram_; | 2672 HistogramInfo* allocated_histogram_; |
| 2653 HistogramInfo* promoted_histogram_; | 2673 HistogramInfo* promoted_histogram_; |
| 2654 | 2674 |
| 2655 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment); | 2675 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment); |
| 2656 | 2676 |
| 2657 friend class SemiSpaceIterator; | 2677 friend class SemiSpaceIterator; |
| 2658 }; | 2678 }; |
| 2659 | 2679 |
| 2680 // ----------------------------------------------------------------------------- |
| 2681 // Compaction space that is used temporarily during compaction. |
| 2682 |
| 2683 class CompactionSpace : public PagedSpace { |
| 2684 public: |
| 2685 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) |
| 2686 : PagedSpace(heap, id, executable) {} |
| 2687 |
| 2688 protected: |
| 2689 // The space is temporary and not included in any snapshots. |
| 2690 virtual bool snapshotable() { return false; } |
| 2691 }; |
| 2692 |
| 2660 | 2693 |
| 2661 // ----------------------------------------------------------------------------- | 2694 // ----------------------------------------------------------------------------- |
| 2662 // Old object space (includes the old space of objects and code space) | 2695 // Old object space (includes the old space of objects and code space) |
| 2663 | 2696 |
| 2664 class OldSpace : public PagedSpace { | 2697 class OldSpace : public PagedSpace { |
| 2665 public: | 2698 public: |
| 2666 // Creates an old space object. The constructor does not allocate pages | 2699 // Creates an old space object. The constructor does not allocate pages |
| 2667 // from OS. | 2700 // from OS. |
| 2668 OldSpace(Heap* heap, AllocationSpace id, Executability executable) | 2701 OldSpace(Heap* heap, AllocationSpace id, Executability executable) |
| 2669 : PagedSpace(heap, id, executable) {} | 2702 : PagedSpace(heap, id, executable) {} |
| (...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2851 count = 0; | 2884 count = 0; |
| 2852 } | 2885 } |
| 2853 // Must be small, since an iteration is used for lookup. | 2886 // Must be small, since an iteration is used for lookup. |
| 2854 static const int kMaxComments = 64; | 2887 static const int kMaxComments = 64; |
| 2855 }; | 2888 }; |
| 2856 #endif | 2889 #endif |
| 2857 } | 2890 } |
| 2858 } // namespace v8::internal | 2891 } // namespace v8::internal |
| 2859 | 2892 |
| 2860 #endif // V8_HEAP_SPACES_H_ | 2893 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |