| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_SPACES_H_ | 5 #ifndef V8_SPACES_H_ |
| 6 #define V8_SPACES_H_ | 6 #define V8_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
| 10 #include "src/base/platform/mutex.h" | 10 #include "src/base/platform/mutex.h" |
| (...skipping 1601 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1612 } | 1612 } |
| 1613 | 1613 |
| 1614 // Place a node on the free list. The block of size 'size_in_bytes' | 1614 // Place a node on the free list. The block of size 'size_in_bytes' |
| 1615 // starting at 'start' is placed on the free list. The return value is the | 1615 // starting at 'start' is placed on the free list. The return value is the |
| 1616 // number of bytes that have been lost due to internal fragmentation by | 1616 // number of bytes that have been lost due to internal fragmentation by |
| 1617 // freeing the block. Bookkeeping information will be written to the block, | 1617 // freeing the block. Bookkeeping information will be written to the block, |
| 1618 // i.e., its contents will be destroyed. The start address should be word | 1618 // i.e., its contents will be destroyed. The start address should be word |
| 1619 // aligned, and the size should be a non-zero multiple of the word size. | 1619 // aligned, and the size should be a non-zero multiple of the word size. |
| 1620 int Free(Address start, int size_in_bytes); | 1620 int Free(Address start, int size_in_bytes); |
| 1621 | 1621 |
| 1622 // This method returns how much memory can be allocated after freeing | |
| 1623 // maximum_freed memory. | |
| 1624 int GuaranteedAllocatable(int maximum_freed) { | |
| 1625 if (maximum_freed < kSmallListMin) { | |
| 1626 return 0; | |
| 1627 } else if (maximum_freed <= kSmallListMax) { | |
| 1628 return kSmallAllocationMax; | |
| 1629 } else if (maximum_freed <= kMediumListMax) { | |
| 1630 return kMediumAllocationMax; | |
| 1631 } else if (maximum_freed <= kLargeListMax) { | |
| 1632 return kLargeAllocationMax; | |
| 1633 } | |
| 1634 return maximum_freed; | |
| 1635 } | |
| 1636 | |
| 1637 // Allocate a block of size 'size_in_bytes' from the free list. The block | 1622 // Allocate a block of size 'size_in_bytes' from the free list. The block |
| 1638 // is unitialized. A failure is returned if no block is available. The | 1623 // is unitialized. A failure is returned if no block is available. The |
| 1639 // number of bytes lost to fragmentation is returned in the output parameter | 1624 // number of bytes lost to fragmentation is returned in the output parameter |
| 1640 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. | 1625 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. |
| 1641 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); | 1626 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); |
| 1642 | 1627 |
| 1643 bool IsEmpty() { | 1628 bool IsEmpty() { |
| 1644 return small_list_.IsEmpty() && medium_list_.IsEmpty() && | 1629 return small_list_.IsEmpty() && medium_list_.IsEmpty() && |
| 1645 large_list_.IsEmpty() && huge_list_.IsEmpty(); | 1630 large_list_.IsEmpty() && huge_list_.IsEmpty(); |
| 1646 } | 1631 } |
| (...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1913 | 1898 |
| 1914 // Report code object related statistics | 1899 // Report code object related statistics |
| 1915 void CollectCodeStatistics(); | 1900 void CollectCodeStatistics(); |
| 1916 static void ReportCodeStatistics(Isolate* isolate); | 1901 static void ReportCodeStatistics(Isolate* isolate); |
| 1917 static void ResetCodeStatistics(Isolate* isolate); | 1902 static void ResetCodeStatistics(Isolate* isolate); |
| 1918 #endif | 1903 #endif |
| 1919 | 1904 |
| 1920 bool is_iterable() { return is_iterable_; } | 1905 bool is_iterable() { return is_iterable_; } |
| 1921 void set_is_iterable(bool b) { is_iterable_ = b; } | 1906 void set_is_iterable(bool b) { is_iterable_ = b; } |
| 1922 | 1907 |
| 1908 bool is_swept_concurrently() { return is_swept_concurrently_; } |
| 1909 void set_is_swept_concurrently(bool b) { is_swept_concurrently_ = b; } |
| 1910 |
| 1923 // Evacuation candidates are swept by evacuator. Needs to return a valid | 1911 // Evacuation candidates are swept by evacuator. Needs to return a valid |
| 1924 // result before _and_ after evacuation has finished. | 1912 // result before _and_ after evacuation has finished. |
| 1925 static bool ShouldBeSweptBySweeperThreads(Page* p) { | 1913 static bool ShouldBeSweptBySweeperThreads(Page* p) { |
| 1926 return !p->IsEvacuationCandidate() && | 1914 return !p->IsEvacuationCandidate() && |
| 1927 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && | 1915 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && |
| 1928 !p->WasSweptPrecisely(); | 1916 !p->WasSweptPrecisely(); |
| 1929 } | 1917 } |
| 1930 | 1918 |
| 1931 void IncrementUnsweptFreeBytes(intptr_t by) { | 1919 void IncrementUnsweptFreeBytes(intptr_t by) { |
| 1932 unswept_free_bytes_ += by; | 1920 unswept_free_bytes_ += by; |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1997 | 1985 |
| 1998 // The space's free list. | 1986 // The space's free list. |
| 1999 FreeList free_list_; | 1987 FreeList free_list_; |
| 2000 | 1988 |
| 2001 // Normal allocation information. | 1989 // Normal allocation information. |
| 2002 AllocationInfo allocation_info_; | 1990 AllocationInfo allocation_info_; |
| 2003 | 1991 |
| 2004 // This space was swept precisely, hence it is iterable. | 1992 // This space was swept precisely, hence it is iterable. |
| 2005 bool is_iterable_; | 1993 bool is_iterable_; |
| 2006 | 1994 |
| 1995 // This space is currently swept by sweeper threads. |
| 1996 bool is_swept_concurrently_; |
| 1997 |
| 2007 // The number of free bytes which could be reclaimed by advancing the | 1998 // The number of free bytes which could be reclaimed by advancing the |
| 2008 // concurrent sweeper threads. This is only an estimation because concurrent | 1999 // concurrent sweeper threads. This is only an estimation because concurrent |
| 2009 // sweeping is done conservatively. | 2000 // sweeping is done conservatively. |
| 2010 intptr_t unswept_free_bytes_; | 2001 intptr_t unswept_free_bytes_; |
| 2011 | 2002 |
| 2012 // The sweeper threads iterate over the list of pointer and data space pages | 2003 // The sweeper threads iterate over the list of pointer and data space pages |
| 2013 // and sweep these pages concurrently. They will stop sweeping after the | 2004 // and sweep these pages concurrently. They will stop sweeping after the |
| 2014 // end_of_unswept_pages_ page. | 2005 // end_of_unswept_pages_ page. |
| 2015 Page* end_of_unswept_pages_; | 2006 Page* end_of_unswept_pages_; |
| 2016 | 2007 |
| 2017 // Expands the space by allocating a fixed number of pages. Returns false if | 2008 // Expands the space by allocating a fixed number of pages. Returns false if |
| 2018 // it cannot allocate requested number of pages from OS, or if the hard heap | 2009 // it cannot allocate requested number of pages from OS, or if the hard heap |
| 2019 // size limit has been hit. | 2010 // size limit has been hit. |
| 2020 bool Expand(); | 2011 bool Expand(); |
| 2021 | 2012 |
| 2022 // Generic fast case allocation function that tries linear allocation at the | 2013 // Generic fast case allocation function that tries linear allocation at the |
| 2023 // address denoted by top in allocation_info_. | 2014 // address denoted by top in allocation_info_. |
| 2024 inline HeapObject* AllocateLinearly(int size_in_bytes); | 2015 inline HeapObject* AllocateLinearly(int size_in_bytes); |
| 2025 | 2016 |
| 2026 // If sweeping is still in progress try to sweep unswept pages. If that is | 2017 // If sweeping is still in progress try to sweep unswept pages. If that is |
| 2027 // not successful, wait for the sweeper threads and re-try free-list | 2018 // not successful, wait for the sweeper threads and re-try free-list |
| 2028 // allocation. | 2019 // allocation. |
| 2029 MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation( | 2020 MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes); |
| 2030 int size_in_bytes); | |
| 2031 | 2021 |
| 2032 // Slow path of AllocateRaw. This function is space-dependent. | 2022 // Slow path of AllocateRaw. This function is space-dependent. |
| 2033 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | 2023 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); |
| 2034 | 2024 |
| 2035 friend class PageIterator; | 2025 friend class PageIterator; |
| 2036 friend class MarkCompactCollector; | 2026 friend class MarkCompactCollector; |
| 2037 }; | 2027 }; |
| 2038 | 2028 |
| 2039 | 2029 |
| 2040 class NumberAndSizeInfo BASE_EMBEDDED { | 2030 class NumberAndSizeInfo BASE_EMBEDDED { |
| (...skipping 985 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3026 } | 3016 } |
| 3027 // Must be small, since an iteration is used for lookup. | 3017 // Must be small, since an iteration is used for lookup. |
| 3028 static const int kMaxComments = 64; | 3018 static const int kMaxComments = 64; |
| 3029 }; | 3019 }; |
| 3030 #endif | 3020 #endif |
| 3031 | 3021 |
| 3032 | 3022 |
| 3033 } } // namespace v8::internal | 3023 } } // namespace v8::internal |
| 3034 | 3024 |
| 3035 #endif // V8_SPACES_H_ | 3025 #endif // V8_SPACES_H_ |
| OLD | NEW |