OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 797 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
808 // platforms, we support this using the CodeRange object, which reserves and | 808 // platforms, we support this using the CodeRange object, which reserves and |
809 // manages a range of virtual memory. | 809 // manages a range of virtual memory. |
810 class CodeRange { | 810 class CodeRange { |
811 public: | 811 public: |
812 explicit CodeRange(Isolate* isolate); | 812 explicit CodeRange(Isolate* isolate); |
813 ~CodeRange() { TearDown(); } | 813 ~CodeRange() { TearDown(); } |
814 | 814 |
815 // Reserves a range of virtual memory, but does not commit any of it. | 815 // Reserves a range of virtual memory, but does not commit any of it. |
816 // Can only be called once, at heap initialization time. | 816 // Can only be called once, at heap initialization time. |
817 // Returns false on failure. | 817 // Returns false on failure. |
818 bool Setup(const size_t requested_size); | 818 bool SetUp(const size_t requested_size); |
819 | 819 |
820 // Frees the range of virtual memory, and frees the data structures used to | 820 // Frees the range of virtual memory, and frees the data structures used to |
821 // manage it. | 821 // manage it. |
822 void TearDown(); | 822 void TearDown(); |
823 | 823 |
824 bool exists() { return this != NULL && code_range_ != NULL; } | 824 bool exists() { return this != NULL && code_range_ != NULL; } |
825 bool contains(Address address) { | 825 bool contains(Address address) { |
826 if (this == NULL || code_range_ == NULL) return false; | 826 if (this == NULL || code_range_ == NULL) return false; |
827 Address start = static_cast<Address>(code_range_->address()); | 827 Address start = static_cast<Address>(code_range_->address()); |
828 return start <= address && address < start + code_range_->size(); | 828 return start <= address && address < start + code_range_->size(); |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
936 // pages for large object space. | 936 // pages for large object space. |
937 // | 937 // |
938 // Each space has to manage it's own pages. | 938 // Each space has to manage it's own pages. |
939 // | 939 // |
940 class MemoryAllocator { | 940 class MemoryAllocator { |
941 public: | 941 public: |
942 explicit MemoryAllocator(Isolate* isolate); | 942 explicit MemoryAllocator(Isolate* isolate); |
943 | 943 |
944 // Initializes its internal bookkeeping structures. | 944 // Initializes its internal bookkeeping structures. |
945 // Max capacity of the total space and executable memory limit. | 945 // Max capacity of the total space and executable memory limit. |
946 bool Setup(intptr_t max_capacity, intptr_t capacity_executable); | 946 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); |
947 | 947 |
948 void TearDown(); | 948 void TearDown(); |
949 | 949 |
950 Page* AllocatePage(PagedSpace* owner, Executability executable); | 950 Page* AllocatePage(PagedSpace* owner, Executability executable); |
951 | 951 |
952 LargePage* AllocateLargePage(intptr_t object_size, | 952 LargePage* AllocateLargePage(intptr_t object_size, |
953 Executability executable, | 953 Executability executable, |
954 Space* owner); | 954 Space* owner); |
955 | 955 |
956 void Free(MemoryChunk* chunk); | 956 void Free(MemoryChunk* chunk); |
(...skipping 455 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1412 intptr_t max_capacity, | 1412 intptr_t max_capacity, |
1413 AllocationSpace id, | 1413 AllocationSpace id, |
1414 Executability executable); | 1414 Executability executable); |
1415 | 1415 |
1416 virtual ~PagedSpace() {} | 1416 virtual ~PagedSpace() {} |
1417 | 1417 |
1418 // Set up the space using the given address range of virtual memory (from | 1418 // Set up the space using the given address range of virtual memory (from |
1419 // the memory allocator's initial chunk) if possible. If the block of | 1419 // the memory allocator's initial chunk) if possible. If the block of |
1420 // addresses is not big enough to contain a single page-aligned page, a | 1420 // addresses is not big enough to contain a single page-aligned page, a |
1421 // fresh chunk will be allocated. | 1421 // fresh chunk will be allocated. |
1422 bool Setup(); | 1422 bool SetUp(); |
1423 | 1423 |
1424 // Returns true if the space has been successfully set up and not | 1424 // Returns true if the space has been successfully set up and not |
1425 // subsequently torn down. | 1425 // subsequently torn down. |
1426 bool HasBeenSetup(); | 1426 bool HasBeenSetUp(); |
1427 | 1427 |
1428 // Cleans up the space, frees all pages in this space except those belonging | 1428 // Cleans up the space, frees all pages in this space except those belonging |
1429 // to the initial chunk, uncommits addresses in the initial chunk. | 1429 // to the initial chunk, uncommits addresses in the initial chunk. |
1430 void TearDown(); | 1430 void TearDown(); |
1431 | 1431 |
1432 // Checks whether an object/address is in this space. | 1432 // Checks whether an object/address is in this space. |
1433 inline bool Contains(Address a); | 1433 inline bool Contains(Address a); |
1434 bool Contains(HeapObject* o) { return Contains(o->address()); } | 1434 bool Contains(HeapObject* o) { return Contains(o->address()); } |
1435 | 1435 |
1436 // Given an address occupied by a live object, return that object if it is | 1436 // Given an address occupied by a live object, return that object if it is |
(...skipping 25 matching lines...) Expand all Loading... |
1462 // The bytes in the linear allocation area are not included in this total | 1462 // The bytes in the linear allocation area are not included in this total |
1463 // because updating the stats would slow down allocation. New pages are | 1463 // because updating the stats would slow down allocation. New pages are |
1464 // immediately added to the free list so they show up here. | 1464 // immediately added to the free list so they show up here. |
1465 intptr_t Available() { return free_list_.available(); } | 1465 intptr_t Available() { return free_list_.available(); } |
1466 | 1466 |
1467 // Allocated bytes in this space. Garbage bytes that were not found due to | 1467 // Allocated bytes in this space. Garbage bytes that were not found due to |
1468 // lazy sweeping are counted as being allocated! The bytes in the current | 1468 // lazy sweeping are counted as being allocated! The bytes in the current |
1469 // linear allocation area (between top and limit) are also counted here. | 1469 // linear allocation area (between top and limit) are also counted here. |
1470 virtual intptr_t Size() { return accounting_stats_.Size(); } | 1470 virtual intptr_t Size() { return accounting_stats_.Size(); } |
1471 | 1471 |
1472 // As size, but the bytes in the current linear allocation area are not | 1472 // As size, but the bytes in lazily swept pages are estimated and the bytes |
1473 // included. | 1473 // in the current linear allocation area are not included. |
1474 virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); } | 1474 virtual intptr_t SizeOfObjects() { |
| 1475 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0)); |
| 1476 return Size() - unswept_free_bytes_ - (limit() - top()); |
| 1477 } |
1475 | 1478 |
1476 // Wasted bytes in this space. These are just the bytes that were thrown away | 1479 // Wasted bytes in this space. These are just the bytes that were thrown away |
1477 // due to being too small to use for allocation. They do not include the | 1480 // due to being too small to use for allocation. They do not include the |
1478 // free bytes that were not found at all due to lazy sweeping. | 1481 // free bytes that were not found at all due to lazy sweeping. |
1479 virtual intptr_t Waste() { return accounting_stats_.Waste(); } | 1482 virtual intptr_t Waste() { return accounting_stats_.Waste(); } |
1480 | 1483 |
1481 // Returns the allocation pointer in this space. | 1484 // Returns the allocation pointer in this space. |
1482 Address top() { | 1485 Address top() { return allocation_info_.top; } |
1483 return allocation_info_.top; | |
1484 } | |
1485 Address limit() { return allocation_info_.limit; } | 1486 Address limit() { return allocation_info_.limit; } |
1486 | 1487 |
1487 // Allocate the requested number of bytes in the space if possible, return a | 1488 // Allocate the requested number of bytes in the space if possible, return a |
1488 // failure object if not. | 1489 // failure object if not. |
1489 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); | 1490 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); |
1490 | 1491 |
1491 virtual bool ReserveSpace(int bytes); | 1492 virtual bool ReserveSpace(int bytes); |
1492 | 1493 |
1493 // Give a block of memory to the space's free list. It might be added to | 1494 // Give a block of memory to the space's free list. It might be added to |
1494 // the free list or accounted as waste. | 1495 // the free list or accounted as waste. |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1550 | 1551 |
1551 // Evacuation candidates are swept by evacuator. Needs to return a valid | 1552 // Evacuation candidates are swept by evacuator. Needs to return a valid |
1552 // result before _and_ after evacuation has finished. | 1553 // result before _and_ after evacuation has finished. |
1553 static bool ShouldBeSweptLazily(Page* p) { | 1554 static bool ShouldBeSweptLazily(Page* p) { |
1554 return !p->IsEvacuationCandidate() && | 1555 return !p->IsEvacuationCandidate() && |
1555 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && | 1556 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && |
1556 !p->WasSweptPrecisely(); | 1557 !p->WasSweptPrecisely(); |
1557 } | 1558 } |
1558 | 1559 |
1559 void SetPagesToSweep(Page* first) { | 1560 void SetPagesToSweep(Page* first) { |
| 1561 ASSERT(unswept_free_bytes_ == 0); |
1560 if (first == &anchor_) first = NULL; | 1562 if (first == &anchor_) first = NULL; |
1561 first_unswept_page_ = first; | 1563 first_unswept_page_ = first; |
1562 } | 1564 } |
1563 | 1565 |
| 1566 void MarkPageForLazySweeping(Page* p) { |
| 1567 unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes()); |
| 1568 } |
| 1569 |
1564 bool AdvanceSweeper(intptr_t bytes_to_sweep); | 1570 bool AdvanceSweeper(intptr_t bytes_to_sweep); |
1565 | 1571 |
1566 bool IsSweepingComplete() { | 1572 bool IsSweepingComplete() { |
1567 return !first_unswept_page_->is_valid(); | 1573 return !first_unswept_page_->is_valid(); |
1568 } | 1574 } |
1569 | 1575 |
1570 Page* FirstPage() { return anchor_.next_page(); } | 1576 Page* FirstPage() { return anchor_.next_page(); } |
1571 Page* LastPage() { return anchor_.prev_page(); } | 1577 Page* LastPage() { return anchor_.prev_page(); } |
1572 | 1578 |
1573 // Returns zero for pages that have so little fragmentation that it is not | 1579 // Returns zero for pages that have so little fragmentation that it is not |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1640 AllocationInfo allocation_info_; | 1646 AllocationInfo allocation_info_; |
1641 | 1647 |
1642 // Bytes of each page that cannot be allocated. Possibly non-zero | 1648 // Bytes of each page that cannot be allocated. Possibly non-zero |
1643 // for pages in spaces with only fixed-size objects. Always zero | 1649 // for pages in spaces with only fixed-size objects. Always zero |
1644 // for pages in spaces with variable sized objects (those pages are | 1650 // for pages in spaces with variable sized objects (those pages are |
1645 // padded with free-list nodes). | 1651 // padded with free-list nodes). |
1646 int page_extra_; | 1652 int page_extra_; |
1647 | 1653 |
1648 bool was_swept_conservatively_; | 1654 bool was_swept_conservatively_; |
1649 | 1655 |
| 1656 // The first page to be swept when the lazy sweeper advances. Is set |
| 1657 // to NULL when all pages have been swept. |
1650 Page* first_unswept_page_; | 1658 Page* first_unswept_page_; |
1651 | 1659 |
| 1660 // The number of free bytes which could be reclaimed by advancing the |
| 1661 // lazy sweeper. This is only an estimation because lazy sweeping is |
| 1662 // done conservatively. |
| 1663 intptr_t unswept_free_bytes_; |
| 1664 |
1652 // Expands the space by allocating a fixed number of pages. Returns false if | 1665 // Expands the space by allocating a fixed number of pages. Returns false if |
1653 // it cannot allocate requested number of pages from OS, or if the hard heap | 1666 // it cannot allocate requested number of pages from OS, or if the hard heap |
1654 // size limit has been hit. | 1667 // size limit has been hit. |
1655 bool Expand(); | 1668 bool Expand(); |
1656 | 1669 |
1657 // Generic fast case allocation function that tries linear allocation at the | 1670 // Generic fast case allocation function that tries linear allocation at the |
1658 // address denoted by top in allocation_info_. | 1671 // address denoted by top in allocation_info_. |
1659 inline HeapObject* AllocateLinearly(int size_in_bytes); | 1672 inline HeapObject* AllocateLinearly(int size_in_bytes); |
1660 | 1673 |
1661 // Slow path of AllocateRaw. This function is space-dependent. | 1674 // Slow path of AllocateRaw. This function is space-dependent. |
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1801 // Constructor. | 1814 // Constructor. |
1802 SemiSpace(Heap* heap, SemiSpaceId semispace) | 1815 SemiSpace(Heap* heap, SemiSpaceId semispace) |
1803 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), | 1816 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
1804 start_(NULL), | 1817 start_(NULL), |
1805 age_mark_(NULL), | 1818 age_mark_(NULL), |
1806 id_(semispace), | 1819 id_(semispace), |
1807 anchor_(this), | 1820 anchor_(this), |
1808 current_page_(NULL) { } | 1821 current_page_(NULL) { } |
1809 | 1822 |
1810 // Sets up the semispace using the given chunk. | 1823 // Sets up the semispace using the given chunk. |
1811 bool Setup(Address start, int initial_capacity, int maximum_capacity); | 1824 bool SetUp(Address start, int initial_capacity, int maximum_capacity); |
1812 | 1825 |
1813 // Tear down the space. Heap memory was not allocated by the space, so it | 1826 // Tear down the space. Heap memory was not allocated by the space, so it |
1814 // is not deallocated here. | 1827 // is not deallocated here. |
1815 void TearDown(); | 1828 void TearDown(); |
1816 | 1829 |
1817 // True if the space has been set up but not torn down. | 1830 // True if the space has been set up but not torn down. |
1818 bool HasBeenSetup() { return start_ != NULL; } | 1831 bool HasBeenSetUp() { return start_ != NULL; } |
1819 | 1832 |
1820 // Grow the semispace to the new capacity. The new capacity | 1833 // Grow the semispace to the new capacity. The new capacity |
1821 // requested must be larger than the current capacity and less than | 1834 // requested must be larger than the current capacity and less than |
1822 // the maximum capacity. | 1835 // the maximum capacity. |
1823 bool GrowTo(int new_capacity); | 1836 bool GrowTo(int new_capacity); |
1824 | 1837 |
1825 // Shrinks the semispace to the new capacity. The new capacity | 1838 // Shrinks the semispace to the new capacity. The new capacity |
1826 // requested must be more than the amount of used memory in the | 1839 // requested must be more than the amount of used memory in the |
1827 // semispace and less than the current capacity. | 1840 // semispace and less than the current capacity. |
1828 bool ShrinkTo(int new_capacity); | 1841 bool ShrinkTo(int new_capacity); |
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2047 public: | 2060 public: |
2048 // Constructor. | 2061 // Constructor. |
2049 explicit NewSpace(Heap* heap) | 2062 explicit NewSpace(Heap* heap) |
2050 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), | 2063 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
2051 to_space_(heap, kToSpace), | 2064 to_space_(heap, kToSpace), |
2052 from_space_(heap, kFromSpace), | 2065 from_space_(heap, kFromSpace), |
2053 reservation_(), | 2066 reservation_(), |
2054 inline_allocation_limit_step_(0) {} | 2067 inline_allocation_limit_step_(0) {} |
2055 | 2068 |
2056 // Sets up the new space using the given chunk. | 2069 // Sets up the new space using the given chunk. |
2057 bool Setup(int reserved_semispace_size_, int max_semispace_size); | 2070 bool SetUp(int reserved_semispace_size_, int max_semispace_size); |
2058 | 2071 |
2059 // Tears down the space. Heap memory was not allocated by the space, so it | 2072 // Tears down the space. Heap memory was not allocated by the space, so it |
2060 // is not deallocated here. | 2073 // is not deallocated here. |
2061 void TearDown(); | 2074 void TearDown(); |
2062 | 2075 |
2063 // True if the space has been set up but not torn down. | 2076 // True if the space has been set up but not torn down. |
2064 bool HasBeenSetup() { | 2077 bool HasBeenSetUp() { |
2065 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup(); | 2078 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp(); |
2066 } | 2079 } |
2067 | 2080 |
2068 // Flip the pair of spaces. | 2081 // Flip the pair of spaces. |
2069 void Flip(); | 2082 void Flip(); |
2070 | 2083 |
2071 // Grow the capacity of the semispaces. Assumes that they are not at | 2084 // Grow the capacity of the semispaces. Assumes that they are not at |
2072 // their maximum capacity. | 2085 // their maximum capacity. |
2073 void Grow(); | 2086 void Grow(); |
2074 | 2087 |
2075 // Shrink the capacity of the semispaces. | 2088 // Shrink the capacity of the semispaces. |
(...skipping 378 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2454 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). | 2467 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). |
2455 // A large object always starts at Page::kObjectStartOffset to a page. | 2468 // A large object always starts at Page::kObjectStartOffset to a page. |
2456 // Large objects do not move during garbage collections. | 2469 // Large objects do not move during garbage collections. |
2457 | 2470 |
2458 class LargeObjectSpace : public Space { | 2471 class LargeObjectSpace : public Space { |
2459 public: | 2472 public: |
2460 LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id); | 2473 LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id); |
2461 virtual ~LargeObjectSpace() {} | 2474 virtual ~LargeObjectSpace() {} |
2462 | 2475 |
2463 // Initializes internal data structures. | 2476 // Initializes internal data structures. |
2464 bool Setup(); | 2477 bool SetUp(); |
2465 | 2478 |
2466 // Releases internal resources, frees objects in this space. | 2479 // Releases internal resources, frees objects in this space. |
2467 void TearDown(); | 2480 void TearDown(); |
2468 | 2481 |
2469 static intptr_t ObjectSizeFor(intptr_t chunk_size) { | 2482 static intptr_t ObjectSizeFor(intptr_t chunk_size) { |
2470 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; | 2483 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; |
2471 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; | 2484 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; |
2472 } | 2485 } |
2473 | 2486 |
2474 // Shared implementation of AllocateRaw, AllocateRawCode and | 2487 // Shared implementation of AllocateRaw, AllocateRawCode and |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2630 } | 2643 } |
2631 // Must be small, since an iteration is used for lookup. | 2644 // Must be small, since an iteration is used for lookup. |
2632 static const int kMaxComments = 64; | 2645 static const int kMaxComments = 64; |
2633 }; | 2646 }; |
2634 #endif | 2647 #endif |
2635 | 2648 |
2636 | 2649 |
2637 } } // namespace v8::internal | 2650 } } // namespace v8::internal |
2638 | 2651 |
2639 #endif // V8_SPACES_H_ | 2652 #endif // V8_SPACES_H_ |
OLD | NEW |