OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_SPACES_H_ | 5 #ifndef V8_SPACES_H_ |
6 #define V8_SPACES_H_ | 6 #define V8_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/base/platform/mutex.h" | 10 #include "src/base/platform/mutex.h" |
(...skipping 1601 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1612 } | 1612 } |
1613 | 1613 |
1614 // Place a node on the free list. The block of size 'size_in_bytes' | 1614 // Place a node on the free list. The block of size 'size_in_bytes' |
1615 // starting at 'start' is placed on the free list. The return value is the | 1615 // starting at 'start' is placed on the free list. The return value is the |
1616 // number of bytes that have been lost due to internal fragmentation by | 1616 // number of bytes that have been lost due to internal fragmentation by |
1617 // freeing the block. Bookkeeping information will be written to the block, | 1617 // freeing the block. Bookkeeping information will be written to the block, |
1618 // i.e., its contents will be destroyed. The start address should be word | 1618 // i.e., its contents will be destroyed. The start address should be word |
1619 // aligned, and the size should be a non-zero multiple of the word size. | 1619 // aligned, and the size should be a non-zero multiple of the word size. |
1620 int Free(Address start, int size_in_bytes); | 1620 int Free(Address start, int size_in_bytes); |
1621 | 1621 |
| 1622 // This method returns how much memory can be allocated after freeing |
| 1623 // maximum_freed memory. |
| 1624 static inline int GuaranteedAllocatable(int maximum_freed) { |
| 1625 if (maximum_freed < kSmallListMin) { |
| 1626 return 0; |
| 1627 } else if (maximum_freed <= kSmallListMax) { |
| 1628 return kSmallAllocationMax; |
| 1629 } else if (maximum_freed <= kMediumListMax) { |
| 1630 return kMediumAllocationMax; |
| 1631 } else if (maximum_freed <= kLargeListMax) { |
| 1632 return kLargeAllocationMax; |
| 1633 } |
| 1634 return maximum_freed; |
| 1635 } |
| 1636 |
1622 // Allocate a block of size 'size_in_bytes' from the free list. The block | 1637 // Allocate a block of size 'size_in_bytes' from the free list. The block |
1623 // is unitialized. A failure is returned if no block is available. The | 1638 // is unitialized. A failure is returned if no block is available. The |
1624 // number of bytes lost to fragmentation is returned in the output parameter | 1639 // number of bytes lost to fragmentation is returned in the output parameter |
1625 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. | 1640 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. |
1626 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); | 1641 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); |
1627 | 1642 |
1628 bool IsEmpty() { | 1643 bool IsEmpty() { |
1629 return small_list_.IsEmpty() && medium_list_.IsEmpty() && | 1644 return small_list_.IsEmpty() && medium_list_.IsEmpty() && |
1630 large_list_.IsEmpty() && huge_list_.IsEmpty(); | 1645 large_list_.IsEmpty() && huge_list_.IsEmpty(); |
1631 } | 1646 } |
(...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1898 | 1913 |
1899 // Report code object related statistics | 1914 // Report code object related statistics |
1900 void CollectCodeStatistics(); | 1915 void CollectCodeStatistics(); |
1901 static void ReportCodeStatistics(Isolate* isolate); | 1916 static void ReportCodeStatistics(Isolate* isolate); |
1902 static void ResetCodeStatistics(Isolate* isolate); | 1917 static void ResetCodeStatistics(Isolate* isolate); |
1903 #endif | 1918 #endif |
1904 | 1919 |
1905 bool is_iterable() { return is_iterable_; } | 1920 bool is_iterable() { return is_iterable_; } |
1906 void set_is_iterable(bool b) { is_iterable_ = b; } | 1921 void set_is_iterable(bool b) { is_iterable_ = b; } |
1907 | 1922 |
1908 bool is_swept_concurrently() { return is_swept_concurrently_; } | |
1909 void set_is_swept_concurrently(bool b) { is_swept_concurrently_ = b; } | |
1910 | |
1911 // Evacuation candidates are swept by evacuator. Needs to return a valid | 1923 // Evacuation candidates are swept by evacuator. Needs to return a valid |
1912 // result before _and_ after evacuation has finished. | 1924 // result before _and_ after evacuation has finished. |
1913 static bool ShouldBeSweptBySweeperThreads(Page* p) { | 1925 static bool ShouldBeSweptBySweeperThreads(Page* p) { |
1914 return !p->IsEvacuationCandidate() && | 1926 return !p->IsEvacuationCandidate() && |
1915 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && | 1927 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && |
1916 !p->WasSweptPrecisely(); | 1928 !p->WasSweptPrecisely(); |
1917 } | 1929 } |
1918 | 1930 |
1919 void IncrementUnsweptFreeBytes(intptr_t by) { | 1931 void IncrementUnsweptFreeBytes(intptr_t by) { |
1920 unswept_free_bytes_ += by; | 1932 unswept_free_bytes_ += by; |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1985 | 1997 |
1986 // The space's free list. | 1998 // The space's free list. |
1987 FreeList free_list_; | 1999 FreeList free_list_; |
1988 | 2000 |
1989 // Normal allocation information. | 2001 // Normal allocation information. |
1990 AllocationInfo allocation_info_; | 2002 AllocationInfo allocation_info_; |
1991 | 2003 |
1992 // This space was swept precisely, hence it is iterable. | 2004 // This space was swept precisely, hence it is iterable. |
1993 bool is_iterable_; | 2005 bool is_iterable_; |
1994 | 2006 |
1995 // This space is currently swept by sweeper threads. | |
1996 bool is_swept_concurrently_; | |
1997 | |
1998 // The number of free bytes which could be reclaimed by advancing the | 2007 // The number of free bytes which could be reclaimed by advancing the |
1999 // concurrent sweeper threads. This is only an estimation because concurrent | 2008 // concurrent sweeper threads. This is only an estimation because concurrent |
2000 // sweeping is done conservatively. | 2009 // sweeping is done conservatively. |
2001 intptr_t unswept_free_bytes_; | 2010 intptr_t unswept_free_bytes_; |
2002 | 2011 |
2003 // The sweeper threads iterate over the list of pointer and data space pages | 2012 // The sweeper threads iterate over the list of pointer and data space pages |
2004 // and sweep these pages concurrently. They will stop sweeping after the | 2013 // and sweep these pages concurrently. They will stop sweeping after the |
2005 // end_of_unswept_pages_ page. | 2014 // end_of_unswept_pages_ page. |
2006 Page* end_of_unswept_pages_; | 2015 Page* end_of_unswept_pages_; |
2007 | 2016 |
2008 // Expands the space by allocating a fixed number of pages. Returns false if | 2017 // Expands the space by allocating a fixed number of pages. Returns false if |
2009 // it cannot allocate requested number of pages from OS, or if the hard heap | 2018 // it cannot allocate requested number of pages from OS, or if the hard heap |
2010 // size limit has been hit. | 2019 // size limit has been hit. |
2011 bool Expand(); | 2020 bool Expand(); |
2012 | 2021 |
2013 // Generic fast case allocation function that tries linear allocation at the | 2022 // Generic fast case allocation function that tries linear allocation at the |
2014 // address denoted by top in allocation_info_. | 2023 // address denoted by top in allocation_info_. |
2015 inline HeapObject* AllocateLinearly(int size_in_bytes); | 2024 inline HeapObject* AllocateLinearly(int size_in_bytes); |
2016 | 2025 |
2017 // If sweeping is still in progress try to sweep unswept pages. If that is | 2026 // If sweeping is still in progress try to sweep unswept pages. If that is |
2018 // not successful, wait for the sweeper threads and re-try free-list | 2027 // not successful, wait for the sweeper threads and re-try free-list |
2019 // allocation. | 2028 // allocation. |
2020 MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes); | 2029 MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation( |
| 2030 int size_in_bytes); |
2021 | 2031 |
2022 // Slow path of AllocateRaw. This function is space-dependent. | 2032 // Slow path of AllocateRaw. This function is space-dependent. |
2023 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | 2033 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); |
2024 | 2034 |
2025 friend class PageIterator; | 2035 friend class PageIterator; |
2026 friend class MarkCompactCollector; | 2036 friend class MarkCompactCollector; |
2027 }; | 2037 }; |
2028 | 2038 |
2029 | 2039 |
2030 class NumberAndSizeInfo BASE_EMBEDDED { | 2040 class NumberAndSizeInfo BASE_EMBEDDED { |
(...skipping 985 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3016 } | 3026 } |
3017 // Must be small, since an iteration is used for lookup. | 3027 // Must be small, since an iteration is used for lookup. |
3018 static const int kMaxComments = 64; | 3028 static const int kMaxComments = 64; |
3019 }; | 3029 }; |
3020 #endif | 3030 #endif |
3021 | 3031 |
3022 | 3032 |
3023 } } // namespace v8::internal | 3033 } } // namespace v8::internal |
3024 | 3034 |
3025 #endif // V8_SPACES_H_ | 3035 #endif // V8_SPACES_H_ |
OLD | NEW |