| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/platform/platform.h" | 7 #include "src/base/platform/platform.h" |
| 8 #include "src/full-codegen.h" | 8 #include "src/full-codegen.h" |
| 9 #include "src/macro-assembler.h" | 9 #include "src/macro-assembler.h" |
| 10 #include "src/mark-compact.h" | 10 #include "src/mark-compact.h" |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 60 size_func); | 60 size_func); |
| 61 ASSERT(page->WasSweptPrecisely()); | 61 ASSERT(page->WasSweptPrecisely()); |
| 62 } | 62 } |
| 63 | 63 |
| 64 | 64 |
| 65 void HeapObjectIterator::Initialize(PagedSpace* space, | 65 void HeapObjectIterator::Initialize(PagedSpace* space, |
| 66 Address cur, Address end, | 66 Address cur, Address end, |
| 67 HeapObjectIterator::PageMode mode, | 67 HeapObjectIterator::PageMode mode, |
| 68 HeapObjectCallback size_f) { | 68 HeapObjectCallback size_f) { |
| 69 // Check that we actually can iterate this space. | 69 // Check that we actually can iterate this space. |
| 70 ASSERT(!space->was_swept_conservatively()); | 70 ASSERT(space->is_iterable()); |
| 71 | 71 |
| 72 space_ = space; | 72 space_ = space; |
| 73 cur_addr_ = cur; | 73 cur_addr_ = cur; |
| 74 cur_end_ = end; | 74 cur_end_ = end; |
| 75 page_mode_ = mode; | 75 page_mode_ = mode; |
| 76 size_func_ = size_f; | 76 size_func_ = size_f; |
| 77 } | 77 } |
| 78 | 78 |
| 79 | 79 |
| 80 // We have hit the end of the page and should advance to the next block of | 80 // We have hit the end of the page and should advance to the next block of |
| (...skipping 847 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 928 | 928 |
| 929 // ----------------------------------------------------------------------------- | 929 // ----------------------------------------------------------------------------- |
| 930 // PagedSpace implementation | 930 // PagedSpace implementation |
| 931 | 931 |
| 932 PagedSpace::PagedSpace(Heap* heap, | 932 PagedSpace::PagedSpace(Heap* heap, |
| 933 intptr_t max_capacity, | 933 intptr_t max_capacity, |
| 934 AllocationSpace id, | 934 AllocationSpace id, |
| 935 Executability executable) | 935 Executability executable) |
| 936 : Space(heap, id, executable), | 936 : Space(heap, id, executable), |
| 937 free_list_(this), | 937 free_list_(this), |
| 938 was_swept_conservatively_(false), | 938 is_iterable_(true), |
| 939 is_swept_concurrently_(false), |
| 939 unswept_free_bytes_(0), | 940 unswept_free_bytes_(0), |
| 940 end_of_unswept_pages_(NULL) { | 941 end_of_unswept_pages_(NULL) { |
| 941 if (id == CODE_SPACE) { | 942 if (id == CODE_SPACE) { |
| 942 area_size_ = heap->isolate()->memory_allocator()-> | 943 area_size_ = heap->isolate()->memory_allocator()-> |
| 943 CodePageAreaSize(); | 944 CodePageAreaSize(); |
| 944 } else { | 945 } else { |
| 945 area_size_ = Page::kPageSize - Page::kObjectStartOffset; | 946 area_size_ = Page::kPageSize - Page::kObjectStartOffset; |
| 946 } | 947 } |
| 947 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 948 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 948 * AreaSize(); | 949 * AreaSize(); |
| (...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1150 } | 1151 } |
| 1151 | 1152 |
| 1152 | 1153 |
| 1153 #ifdef DEBUG | 1154 #ifdef DEBUG |
| 1154 void PagedSpace::Print() { } | 1155 void PagedSpace::Print() { } |
| 1155 #endif | 1156 #endif |
| 1156 | 1157 |
| 1157 #ifdef VERIFY_HEAP | 1158 #ifdef VERIFY_HEAP |
| 1158 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1159 void PagedSpace::Verify(ObjectVisitor* visitor) { |
| 1159 // We can only iterate over the pages if they were swept precisely. | 1160 // We can only iterate over the pages if they were swept precisely. |
| 1160 if (was_swept_conservatively_) return; | 1161 if (!is_iterable_) return; |
| 1161 | 1162 |
| 1162 bool allocation_pointer_found_in_space = | 1163 bool allocation_pointer_found_in_space = |
| 1163 (allocation_info_.top() == allocation_info_.limit()); | 1164 (allocation_info_.top() == allocation_info_.limit()); |
| 1164 PageIterator page_iterator(this); | 1165 PageIterator page_iterator(this); |
| 1165 while (page_iterator.has_next()) { | 1166 while (page_iterator.has_next()) { |
| 1166 Page* page = page_iterator.next(); | 1167 Page* page = page_iterator.next(); |
| 1167 CHECK(page->owner() == this); | 1168 CHECK(page->owner() == this); |
| 1168 if (page == Page::FromAllocationTop(allocation_info_.top())) { | 1169 if (page == Page::FromAllocationTop(allocation_info_.top())) { |
| 1169 allocation_pointer_found_in_space = true; | 1170 allocation_pointer_found_in_space = true; |
| 1170 } | 1171 } |
| (...skipping 1368 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2539 // This counter will be increased for pages which will be swept by the | 2540 // This counter will be increased for pages which will be swept by the |
| 2540 // sweeper threads. | 2541 // sweeper threads. |
| 2541 unswept_free_bytes_ = 0; | 2542 unswept_free_bytes_ = 0; |
| 2542 | 2543 |
| 2543 // Clear the free list before a full GC---it will be rebuilt afterward. | 2544 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2544 free_list_.Reset(); | 2545 free_list_.Reset(); |
| 2545 } | 2546 } |
| 2546 | 2547 |
| 2547 | 2548 |
| 2548 intptr_t PagedSpace::SizeOfObjects() { | 2549 intptr_t PagedSpace::SizeOfObjects() { |
| 2549 ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() || | 2550 ASSERT(heap()->mark_compact_collector()-> |
| 2550 (unswept_free_bytes_ == 0)); | 2551 IsConcurrentSweepingInProgress(this) || (unswept_free_bytes_ == 0)); |
| 2551 return Size() - unswept_free_bytes_ - (limit() - top()); | 2552 return Size() - unswept_free_bytes_ - (limit() - top()); |
| 2552 } | 2553 } |
| 2553 | 2554 |
| 2554 | 2555 |
| 2555 // After we have booted, we have created a map which represents free space | 2556 // After we have booted, we have created a map which represents free space |
| 2556 // on the heap. If there was already a free list then the elements on it | 2557 // on the heap. If there was already a free list then the elements on it |
| 2557 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | 2558 // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
| 2558 // fix them. | 2559 // fix them. |
| 2559 void PagedSpace::RepairFreeListsAfterBoot() { | 2560 void PagedSpace::RepairFreeListsAfterBoot() { |
| 2560 free_list_.RepairLists(heap()); | 2561 free_list_.RepairLists(heap()); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 2575 allocation_info_.set_limit(NULL); | 2576 allocation_info_.set_limit(NULL); |
| 2576 } | 2577 } |
| 2577 } | 2578 } |
| 2578 | 2579 |
| 2579 | 2580 |
| 2580 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation( | 2581 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation( |
| 2581 int size_in_bytes) { | 2582 int size_in_bytes) { |
| 2582 MarkCompactCollector* collector = heap()->mark_compact_collector(); | 2583 MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2583 | 2584 |
| 2584 // If sweeper threads are still running, wait for them. | 2585 // If sweeper threads are still running, wait for them. |
| 2585 if (collector->IsConcurrentSweepingInProgress()) { | 2586 if (collector->IsConcurrentSweepingInProgress(this)) { |
| 2586 collector->WaitUntilSweepingCompleted(); | 2587 collector->WaitUntilSweepingCompleted(); |
| 2587 | 2588 |
| 2588 // After waiting for the sweeper threads, there may be new free-list | 2589 // After waiting for the sweeper threads, there may be new free-list |
| 2589 // entries. | 2590 // entries. |
| 2590 return free_list_.Allocate(size_in_bytes); | 2591 return free_list_.Allocate(size_in_bytes); |
| 2591 } | 2592 } |
| 2592 return NULL; | 2593 return NULL; |
| 2593 } | 2594 } |
| 2594 | 2595 |
| 2595 | 2596 |
| 2596 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { | 2597 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| 2597 // Allocation in this space has failed. | 2598 // Allocation in this space has failed. |
| 2598 | 2599 |
| 2599 // If sweeper threads are active, try to re-fill the free-lists. | 2600 // If sweeper threads are active, try to re-fill the free-lists. |
| 2600 MarkCompactCollector* collector = heap()->mark_compact_collector(); | 2601 MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2601 if (collector->IsConcurrentSweepingInProgress()) { | 2602 if (collector->IsConcurrentSweepingInProgress(this)) { |
| 2602 collector->RefillFreeList(this); | 2603 collector->RefillFreeList(this); |
| 2603 | 2604 |
| 2604 // Retry the free list allocation. | 2605 // Retry the free list allocation. |
| 2605 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2606 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2606 if (object != NULL) return object; | 2607 if (object != NULL) return object; |
| 2607 } | 2608 } |
| 2608 | 2609 |
| 2609 // Free list allocation failed and there is no next page. Fail if we have | 2610 // Free list allocation failed and there is no next page. Fail if we have |
| 2610 // hit the old generation size limit that should cause a garbage | 2611 // hit the old generation size limit that should cause a garbage |
| 2611 // collection. | 2612 // collection. |
| (...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2755 } | 2756 } |
| 2756 | 2757 |
| 2757 | 2758 |
| 2758 void PagedSpace::ReportStatistics() { | 2759 void PagedSpace::ReportStatistics() { |
| 2759 int pct = static_cast<int>(Available() * 100 / Capacity()); | 2760 int pct = static_cast<int>(Available() * 100 / Capacity()); |
| 2760 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 2761 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 2761 ", waste: %" V8_PTR_PREFIX "d" | 2762 ", waste: %" V8_PTR_PREFIX "d" |
| 2762 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | 2763 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 2763 Capacity(), Waste(), Available(), pct); | 2764 Capacity(), Waste(), Available(), pct); |
| 2764 | 2765 |
| 2765 if (was_swept_conservatively_) return; | 2766 if (!is_iterable_) return; |
| 2766 ClearHistograms(heap()->isolate()); | 2767 ClearHistograms(heap()->isolate()); |
| 2767 HeapObjectIterator obj_it(this); | 2768 HeapObjectIterator obj_it(this); |
| 2768 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) | 2769 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) |
| 2769 CollectHistogramInfo(obj); | 2770 CollectHistogramInfo(obj); |
| 2770 ReportHistogram(heap()->isolate(), true); | 2771 ReportHistogram(heap()->isolate(), true); |
| 2771 } | 2772 } |
| 2772 #endif | 2773 #endif |
| 2773 | 2774 |
| 2774 | 2775 |
| 2775 // ----------------------------------------------------------------------------- | 2776 // ----------------------------------------------------------------------------- |
| (...skipping 360 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3136 object->ShortPrint(); | 3137 object->ShortPrint(); |
| 3137 PrintF("\n"); | 3138 PrintF("\n"); |
| 3138 } | 3139 } |
| 3139 printf(" --------------------------------------\n"); | 3140 printf(" --------------------------------------\n"); |
| 3140 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3141 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3141 } | 3142 } |
| 3142 | 3143 |
| 3143 #endif // DEBUG | 3144 #endif // DEBUG |
| 3144 | 3145 |
| 3145 } } // namespace v8::internal | 3146 } } // namespace v8::internal |
| OLD | NEW |